Python ipdb 模块,set_trace() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用ipdb.set_trace()

项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def test_annotate_filter(self):
        """
        Filter mentions that are 'O' mentions.
            the mentions should correctly identify:
            - character offsets
            - glosses
            - links to canonical_mentions
            - links to parent_mentions
        """
        sentences, mentions = annotate_document(self._doc, self._client,
                                                mention_filter=lambda mentions:[m for m in mentions if m.type != 'O'])

        ipdb.set_trace()

        # Just assert counts.
        self.assertEqual(3, len(sentences))
        self.assertEqual(19, len(mentions))

        for m in mentions:
            self.assertTrue(m.ner != 'O')
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac):
        '''
        Loss definition
        Only speech inference loss is defined and work quite well
        Add VAD cross entropy loss if you want
        '''
        loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
        loss_o = loss_v1
        reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        # ipdb.set_trace()
        loss_v = loss_o + tf.add_n(reg_loss)
        tf.scalar_summary('loss', loss_v)
        # loss_merge = tf.cond(
        #     is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
        #     lambda: tf.scalar_summary('loss', loss_v))
        return loss_v, loss_o
        # return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def transform(audio_data, save_image_path, nFFT=256, overlap=0.75):
    '''audio_data: signals to convert
    save_image_path: path to store the image file'''
    # spectrogram
    freq_data = stft(audio_data, nFFT, overlap)
    freq_data = np.maximum(np.abs(freq_data),
                           np.max(np.abs(freq_data)) / 10000)
    log_freq_data = 20. * np.log10(freq_data / 1e-4)
    N_samples = log_freq_data.shape[0]
    # log_freq_data = np.maximum(log_freq_data, max_m - 70)
    # print(np.max(np.max(log_freq_data)))
    # print(np.min(np.min(log_freq_data)))
    log_freq_data = np.round(log_freq_data)
    log_freq_data = np.transpose(log_freq_data)
    # ipdb.set_trace()

    assert np.max(np.max(log_freq_data)) < 256, 'spectrogram value too large'
    # save the image
    spec_imag = Image.fromarray(log_freq_data)
    spec_imag = spec_imag.convert('RGB')
    spec_imag.save(save_image_path)
    return N_samples
项目:tweets_analyzer    作者:x0rz    | 项目源码 | 文件源码
def print_stats(dataset, top=5):
    """ Displays top values by order """
    sum = numpy.sum(list(dataset.values()))
    i = 0
    if sum:
        sorted_keys = sorted(dataset, key=dataset.get, reverse=True)
        max_len_key = max([len(x) for x in sorted_keys][:top])  # use to adjust column width
        for k in sorted_keys:
            try:
                print(("- \033[1m{:<%d}\033[0m {:>6} {:<4}" % max_len_key)
                      .format(k, dataset[k], "(%d%%)" % ((float(dataset[k]) / sum) * 100)))
            except:
                import ipdb
                ipdb.set_trace()
            i += 1
            if i >= top:
                break
    else:
        print("No data")
    print("")
项目:Automatic_Group_Photography_Enhancement    作者:Yuliang-Zou    | 项目源码 | 文件源码
def extract(self, data_path, session, saver):
        saver.restore(session, data_path)
        scopes = ['conv1_1','conv1_2','conv2_1','conv2_2','conv3_1','conv3_2','conv3_3','conv4_1','conv4_2','conv4_3','conv5_1','conv5_2','conv5_3','rpn_conv/3x3','rpn_cls_score','rpn_bbox_pred','fc6','fc7','cls_score','bbox_pred']
        data_dict = {}
        for scope in scopes:
            # Freezed layers
            if scope in ['conv1_1','conv1_2','conv2_1','conv2_2']:
                [w, b] = tf.get_collection(tf.GraphKeys.VARIABLES, scope=scope)
            # We don't need momentum variables
            else:
                [w, b] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
            data_dict[scope] = {'weights':w.eval(), 'biases':b.eval()}
        file_name = data_path[0:-5]
        np.save(file_name, data_dict)
        ipdb.set_trace()       
        return file_name + '.npy'
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        log_y = cupy.log(x + 1e-5)
        self.y = x

    if(self.debug):
        ipdb.set_trace()

        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
            't == -1 ? 0 : log_y[_j * n_channel + t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
        return ret,
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward_gpu(self, inputs):
        cupy = cuda.cupy
        x, t = inputs
        log_y = cupy.log(x + 1e-5)
        self.y = x

    if(self.debug):
        ipdb.set_trace()

        if getattr(self, 'normalize', True):
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        ret = cuda.reduce(
            'S t, raw T log_y, int32 n_channel, raw T coeff, raw T weights', 'T out',
            't == -1 ? 0 : log_y[_j * n_channel + t] * weights[t]',
            'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
        )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff, self.weights.reduced_view())
        return ret,
项目:LM_GANS    作者:anirudh9119    | 项目源码 | 文件源码
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
    probs = []

    n_done = 0

    for x in iterator:
        n_done += len(x)

        x, x_mask = prepare_data(x, n_words=options['n_words'])

        pprobs = f_log_probs(x, x_mask)
        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs)
项目:proteusisc    作者:diamondman    | 项目源码 | 文件源码
def __init__(self, insname, *args, execute=True, read_status=False,
                 loop=0, delay=0, **kwargs):
        super(RunInstruction, self).__init__(*args, **kwargs)
        self.isBH = False
        if (self.data or self.read) and not self.bitcount:
            desc = self.dev._desc
            regname = desc._instruction_register_map.get(insname)
            self.bitcount = desc._registers.get(regname)
            if self.bitcount is None:
                #print("Dealing with a Blackhole Register")
                self.isBH = True
                self.bitcount = len(self.data)
        if not self.data and self.bitcount:
            self.data = NoCareBitarray(self.bitcount)
        if self.data is not None and len(self.data) != self.bitcount:
            import ipdb
            ipdb.set_trace()
            raise ValueError("")
        self.read_status = read_status
        self.insname = insname
        self.execute = execute
        self.delay = delay
        self.loop = loop
项目:fileserver-chat    作者:tanmaydatta    | 项目源码 | 文件源码
def index():
    global curr_time
    global syspass
    if request.method == 'GET':
        return render_template('index.html')
    elif request.method == 'POST':
        # ipdb.set_trace()
        enroll = request.form['enroll']
        passwd = request.form['pass']
        syspass = request.form['syspass']
        name = request.form['name']
        session['name'] = name
        a = soldier.run('sudo mount -t cifs //fileserver2/' + enroll + ' /mnt -o user='+enroll+',password='+passwd+',workgroup=workgroup,ip=172.16.68.30', sudo=syspass)
        # a = soldier.run()
        if os.path.isfile('/mnt/chat.txt') == False:
            a = soldier.run('sudo touch /mnt/chat.txt', sudo=syspass)
        curr_time = time.time()
        session['user']=1
        # print session['curr']
        return redirect('/chat')
项目:rmn    作者:orhanf    | 项目源码 | 文件源码
def pred_probs(self, stream, f_log_probs, prepare_data, verbose=True):

        options = self.options
        probs = []
        n_done = 0

        for x in stream:
            n_done += len(x)

            x, x_mask = prepare_data(x, n_words=options['n_words'])

            pprobs = f_log_probs(x, x_mask)
            for pp in pprobs:
                probs.append(pp)

            if numpy.isnan(numpy.mean(probs)):
                ipdb.set_trace()

            if verbose:
                print >>sys.stderr, '%d samples computed' % (n_done)

        return numpy.array(probs)
项目:deep-clustering    作者:zhr1201    | 项目源码 | 文件源码
def __init__(self, data_dir, batch_size):
        '''preprocess the training data
        data_dir: dir containing the training data
                  format:root_dir + speaker_dir + wavfiles'''
        # get dirs for each speaker
        self.speakers_dir = [os.path.join(data_dir, i)
                             for i in os.listdir(data_dir)]
        self.n_speaker = len(self.speakers_dir)
        self.batch_size = batch_size
        self.speaker_file = {}
        self.epoch = 0

        # get the files in each speakers dir
        for i in range(self.n_speaker):
            wav_dir_i = [os.path.join(self.speakers_dir[i], file)
                         for file in os.listdir(self.speakers_dir[i])]
            for j in wav_dir_i:
                if i not in self.speaker_file:
                    self.speaker_file[i] = []
                self.speaker_file[i].append(j)
        # ipdb.set_trace()
        # self.reinit()
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def inspect_weight_dist(prefix_net, epoch):
    #
    sym, arg_params, aux_params = mx.model.load_checkpoint(prefix_net, epoch)

    quantize_bit = 5

    err_log = {}
    err_uni = {}

    err_diff = []

    for k in sorted(arg_params):
        if not k.endswith('_weight'):
            continue
        v = arg_params[k].asnumpy().ravel()

        err_log[k] = measure_log_quantize_error(v, quantize_bit)
        err_uni[k] = measure_uni_quantize_error(v, quantize_bit)

        err_diff.append(err_log[k] - err_uni[k])

    plt.plot(range(len(err_diff)), err_diff)

    import ipdb
    ipdb.set_trace()
项目:additions_mxnet    作者:eldercrow    | 项目源码 | 文件源码
def generate_batch(im):
    """
    preprocess image, return batch
    :param im: cv2.imread returns [height, width, channel] in BGR
    :return:
    data_batch: MXNet input batch
    data_names: names in data_batch
    im_scale: float number
    """
    import ipdb
    ipdb.set_trace()
    im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE, stride=config.IMAGE_STRIDE)
    im_array = transform(im_array, PIXEL_MEANS)
    im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
    data = [mx.nd.array(im_array), mx.nd.array(im_info)]
    data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
    data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
    return data_batch, DATA_NAMES, im_scale
项目:nature_methods_multicut_pipeline    作者:ilastik    | 项目源码 | 文件源码
def __call__(self, inp=None):
        """Apply preptrain to input `inp`."""
        # Parse
        inp = (self.x if inp is None else inp)
        # Instantiate an interloop container
        itc = inp

        # Loop
        for coach in self.train:
            try:
                itc = coach(itc)
            except Exception as e:
                if self._debug:
                    print("Exception raised, entering debugger. Hit 'q' followed by 'return' to exit.")
                    import ipdb
                    ipdb.set_trace()
                else:
                    raise e

        # Assign and return
        self.y = itc
        return self.y
项目:rnn_benchmarks    作者:caglar    | 项目源码 | 文件源码
def pred_probs(f_log_probs, options, iterator, verbose=True):
    probs = []
    n_done = 0
    for x, y in iterator:
        n_done += len(x)

        pprobs = f_log_probs(x)
        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs)
项目:mcnPyTorch    作者:albanie    | 项目源码 | 文件源码
def forward(self, x):
        # mini = list(self.features.children())[:4]
        # mini_f = torch.nn.modules.Sequential(*mini) ;
        # y = mini_f(x)
        # ipdb.set_trace()
        # mini = list(self.features.children())

        x = self.features(x)
        if self.flatten_loc == 'classifier':
            x = x.view(x.size(0), -1)
            x = self.classifier(x)
        elif self.flatten_loc == 'end':
            x = self.classifier(x)
            x = x.view(x.size(0), -1)
        else:
            msg = 'unrecognised flatten_loc: {}'.format(self.flatten_loc)
            raise ValueError(msg)
        return x
项目:tf_fcn    作者:Yuliang-Zou    | 项目源码 | 文件源码
def get_original_size(mask, max_size=(640,640)):
    row = None
    col = None
    for i in range(max_size[0]-1, -1, -1):
        if mask[i,0,0] == 1:
            row = i + 1
            break

    for i in range(max_size[1]-1, -1, -1):
        if mask[0,i,0] == 1:
            col = i + 1
            break

    if row is None or col is None:
        ipdb.set_trace()
    return row, col
项目:hackerbot    作者:omergunal    | 项目源码 | 文件源码
def print_stats(dataset, top=5):
    """ Displays top values by order """
    sum = numpy.sum(list(dataset.values()))
    i = 0
    if sum:
        sorted_keys = sorted(dataset, key=dataset.get, reverse=True)
        max_len_key = max([len(x) for x in sorted_keys][:top])  # use to adjust column width
        for k in sorted_keys:
            try:
                print(("- \033[1m{:<%d}\033[0m {:>6} {:<4}" % max_len_key)
                      .format(k, dataset[k], "(%d%%)" % ((float(dataset[k]) / sum) * 100)))
            except:
                import ipdb
                ipdb.set_trace()
            i += 1
            if i >= top:
                break
    else:
        print("No data")
    print("")
项目:dl4mt-multi    作者:nyu-dl    | 项目源码 | 文件源码
def do(self, callback_name, *args):
        probs = {}
        print ''
        logger.info(" Computing log-probs...")
        start = time.time()
        for cg_name, stream in self.streams.iteritems():
            probs[cg_name] = list()
            src_id, trg_id = p_(cg_name)

            # handle multi-source stream
            src_idx = self.enc_ids.index(src_id)
            trg_idx = self.dec_ids.index(trg_id)

            for i, batch in enumerate(stream.get_epoch_iterator()):
                batch_size = batch[0].shape[0]
                src_sel = numpy.zeros(
                    (batch_size, self.num_encs)).astype(theano.config.floatX)
                src_sel[:, src_idx] = 1.
                trg_sel = numpy.zeros(
                    (batch_size, self.num_decs)).astype(theano.config.floatX)
                trg_sel[:, trg_idx] = 1.

                inps = [batch[0].T, batch[1].T, batch[2].T, batch[3].T,
                        src_sel, trg_sel]

                pprobs = self.f_log_probs[cg_name](*inps)
                probs[cg_name].append(pprobs.tolist())

                if numpy.isnan(numpy.mean(probs[cg_name])):
                    import ipdb
                    ipdb.set_trace()

            print 'logprob for CG [{}]: {}'.format(
                cg_name, numpy.mean(probs[cg_name]))

        print "took {} seconds.".format(time.time()-start)
        records = [('logprob_' + k, numpy.mean(v))
                   for k, v in probs.iteritems()]
        self.add_records(self.main_loop.log, records)
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):
            c, h = state

            # change bias argument to False since LN will add bias via shift
            concat = tf.nn.rnn_cell._linear(
                [inputs, h], 4 * self._num_units, False)
            # ipdb.set_trace()

            i, j, f, o = tf.split(1, 4, concat)

            # add layer normalization to each gate
            i = ln(i, scope='i/')
            j = ln(j, scope='j/')
            f = ln(f, scope='f/')
            o = ln(o, scope='o/')

            new_c = (c * tf.nn.sigmoid(f + self._forget_bias) +
                     tf.nn.sigmoid(i) * self._activation(j))

            # add layer_normalization in calculation of new hidden state
            new_h = self._activation(
                ln(new_c, scope='new_h/')) * tf.nn.sigmoid(o)
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
            return new_h, new_state
项目:CNN-for-single-channel-speech-enhancement    作者:zhr1201    | 项目源码 | 文件源码
def inference(self, images, is_train):
        '''Net configuration as the original paper'''
        image_input = tf.reshape(images, [-1, self.N_IN, self.NEFF, 1])
        # ipdb.set_trace()
        with tf.variable_scope('con1') as scope:
            h_conv1 = self._conv_layer_wrapper(image_input, 12, 13, is_train)
        with tf.variable_scope('con2') as scope:
            h_conv2 = self._conv_layer_wrapper(h_conv1, 16, 11, is_train)
        with tf.variable_scope('con3') as scope:
            h_conv3 = self._conv_layer_wrapper(h_conv2, 20, 9, is_train)
        with tf.variable_scope('con4') as scope:
            h_conv4 = self._conv_layer_wrapper(h_conv3, 24, 7, is_train)
        with tf.variable_scope('con5') as scope:
            h_conv5 = self._conv_layer_wrapper(h_conv4, 32, 7, is_train)
        with tf.variable_scope('con6') as scope:
            h_conv6 = self._conv_layer_wrapper(h_conv5, 24, 7, is_train)
        with tf.variable_scope('con7') as scope:
            h_conv7 = self._conv_layer_wrapper(h_conv6, 20, 9, is_train)
        with tf.variable_scope('con8') as scope:
            h_conv8 = self._conv_layer_wrapper(h_conv7, 16, 11, is_train)
        with tf.variable_scope('con9') as scope:
            h_conv9 = self._conv_layer_wrapper(h_conv8, 12, 13, is_train)
        with tf.variable_scope('con10') as scope:
            f_w = h_conv9.get_shape()[1].value
            i_fm = h_conv9.get_shape()[-1].value
            W_con10 = weight_variable(
                [f_w, 129, i_fm, 1])
            b_conv10 = bias_variable([1])
            h_conv10 = conv2d(h_conv9, W_con10) + b_conv10
        return tf.reshape(h_conv10, [-1, self.NEFF])
项目:CNN-for-single-channel-speech-enhancement    作者:zhr1201    | 项目源码 | 文件源码
def __init__(self,
                 audio_dir,
                 noise_dir,
                 coord,
                 N_IN,
                 frame_length,
                 frame_move,
                 is_val):
        '''coord: tensorflow coordinator
        N_IN: number of input frames presented to DNN
        frame_move: hopsize'''
        self.audio_dir = audio_dir
        self.noise_dir = noise_dir
        self.coord = coord
        self.N_IN = N_IN
        self.frame_length = frame_length
        self.frame_move = frame_move
        self.is_val = is_val
        self.sample_placeholder_many = tf.placeholder(
            tf.float32, shape=(None, self.N_IN, 2, frame_length))
        # queues to store the data
        if not is_val:
            self.q = tf.RandomShuffleQueue(
                200000, 5000, tf.float32, shapes=(self.N_IN, 2, frame_length))
        else:
            self.q = tf.FIFOQueue(
                200000, tf.float32, shapes=(self.N_IN, 2, frame_length))
        self.enqueue_many = self.q.enqueue_many(
            self.sample_placeholder_many + 0)
        self.audiofiles = find_files(audio_dir)
        self.noisefiles = find_files(noise_dir)
        print('%d speech found' % len(self.audiofiles))
        print('%d noise found' % len(self.noisefiles))
        # ipdb.set_trace()
项目:factoriommo-agent    作者:factoriommo    | 项目源码 | 文件源码
def main_loop(self):
        logger.debug("In main loop")
        while True:
            sleeptime = 0.1
            if self.options.debug:
                import ipdb
                ipdb.set_trace()

            try:
                logdata = self.log.q.get(False)
                self.parse_logdata(logdata)
                sleeptime = 0.1
            except Empty:
                sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some log data")

            try:
                chatdata = self.log.chat.get(False)
                self.parse_chatdata(chatdata)
                sleeptime = 0.1
            except Empty:
                if sleeptime != 0.1:
                    sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some chat data")

            try:
                wsdata = self.ws.from_server.get(False)
                self.parse_wsdata(wsdata)
                sleeptime = 0.1
            except Empty:
                if sleeptime != 0.1:
                    sleeptime = 0.5
            except:
                logger.exception("Something went wrong handling some ws data")

            sleep(sleeptime)
项目:nematus    作者:hlt-mt    | 项目源码 | 文件源码
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True, normalize=False, alignweights=False):
    probs = []
    n_done = 0

    alignments_json = []

    for x, y in iterator:
        n_done += len(x)

        x, x_mask, y, y_mask = prepare_data(x, y,
                                            n_words_src=options['n_words_src'],
                                            n_words=options['n_words'])

        ### in optional save weights mode.
        if alignweights:
            pprobs, attention = f_log_probs(x, x_mask, y, y_mask)
            for jdata in get_alignments(attention, x_mask, y_mask):
                alignments_json.append(jdata)
        else:
            pprobs = f_log_probs(x, x_mask, y, y_mask)

        # normalize scores according to output length
        if normalize:
            lengths = numpy.array([numpy.count_nonzero(s) for s in y_mask.T])
            pprobs /= lengths

        for pp in pprobs:
            probs.append(pp)

        if numpy.isnan(numpy.mean(probs)):
            ipdb.set_trace()

        if verbose:
            print >>sys.stderr, '%d samples computed' % (n_done)

    return numpy.array(probs), alignments_json


# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
项目:DLink_Harvester    作者:MikimotoH    | 项目源码 | 文件源码
def parse_date(txt):
    if not txt:
        return None
    try:
        return datetime.strptime(txt, '%Y-%m-%d %H:%M:%S')
    except ValueError:
        ipdb.set_trace()
项目:DLink_Harvester    作者:MikimotoH    | 项目源码 | 文件源码
def guess_date(ftp_url):
    import re
    m = re.search(r'_\d{6,8}', ftp_url.split('/')[-1])
    if not m:
        return None
    m = m.group(0).strip('_')
    if len(m)==6:
        return datetime.strptime(m,'%y%m%d')
    elif len(m)==8:
        return datetime.strptime(m,'%Y%m%d')
    else:
        ipdb.set_trace()
项目:solr_presentation    作者:avolkov    | 项目源码 | 文件源码
def search_phrase(text):
    print()
    print()
    print("Searching for: '%s'" % text)
    res = solr.search(text)
    print("Search results object ", res)
    print("The number of results: %d " % len(res.docs))
    print("The best result ", res.docs[0])
    import ipdb
    ipdb.set_trace()

# Exact phrase
项目:nicfit.py    作者:nicfit    | 项目源码 | 文件源码
def debugger():
    """If called in the context of an exception, calls post_mortem; otherwise
    set_trace.
    ``ipdb`` is preferred over ``pdb`` if installed.
    """
    e, m, tb = sys.exc_info()
    if tb is not None:
        _debugger.post_mortem(tb)
    else:
        _debugger.set_trace()
项目:vrptw-pgss-2016    作者:conwayje    | 项目源码 | 文件源码
def plot_for_truck(k):
  global depot, customers
  n = len(customers)
  x = [element.x for element in customers[1:]]
  y = [element.y for element in customers[1:]]
  plt.scatter(x, y)
  plt.scatter(depot.x, depot.y, c="r")


  truck = trucks[k]
  x2 = []
  y2 = []

  x2.append( depot.x )
  y2.append( depot.y )
  cs = truck.ordered_customers
  for c in cs:
    x2.append( c.x )
    y2.append( c.y )

  x2.append( depot.x )
  y2.append( depot.y )

  colors = ["b","g","r","c","m","k"]

  ipdb.set_trace()

  plt.plot(x2, y2, c=colors[k], linewidth=3)

  # plt.show()
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def getBatch_(self, indices):
        # format NxCHxWxH
        batchRGB = np.zeros((len(indices), self.CH, self.W, self.H), dtype='float32')
        batchLabel = np.zeros((len(indices), self.W, self.H), dtype='int32')

        k = 0
        for i in indices:
            (rgbname, gtname) = self.flist[i]

            # format: HxWxCH
            rgb =  misc.imread(rgbname)

            if(gtname.endswith('.png')):
                gt = misc.imread(gtname)
            else:
                gt = np.loadtxt(gtname)
            gt = gt.astype('uint8')

            if(self.data_transformer is not None):
                rgb = self.data_transformer.transformData(rgb)
                gt = self.data_transformer.transformLabel(gt)
            #^ data_transformer outputs in format HxWxCH

            # convertion from HxWxCH to CHxWxH
            batchRGB[k,:,:,:] = rgb.astype(np.float32).transpose((2,1,0))
            batchLabel[k,:,:] = gt.astype(np.int32).transpose((1,0))

            k += 1

            #ipdb.set_trace()

        if(self.weights_classes_flag):
            return (batchRGB, batchLabel, self.weights_classes)
        else:
            return (batchRGB, batchLabel)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def saveInfo(self, model, optimizer, smanager, epoch, outputFolder, saveEach):
        #ipdb.set_trace()
        if(epoch % saveEach == 0):
            if(not os.path.exists(outputFolder)):
                os.makedirs(outputFolder)
            bname = outputFolder + '/' + model.getName() + '_' + str(epoch)
            serializers.save_npz(bname + '.model', model)
            serializers.save_npz(bname + '.state', optimizer)
            smanager.save(bname + '.stats')
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, input_blob, test_mode=False):
        # explicit and very flexible DAG!
        #################################
        data = input_blob[0]
        labels = input_blob[1]

        if(len(input_blob) >= 3):
            weights_classes = input_blob[2]
        else:
            weights_classes = chainer.Variable(cuda.cupy.ones((self.classes, 1), dtype='float32'))

        # ---- CONTRACTION BLOCKS ---- #
        blob_b0  = self.bnorm0(data)
        (blob_b1, indices_b1, size_b1)  = F.max_pooling_2dIndices(self.bnorm1(F.relu(self.conv1(blob_b0)), test=test_mode), (2, 2), stride=(2,2), pad=(0, 0))
        (blob_b2, indices_b2, size_b2)  = F.max_pooling_2dIndices(self.bnorm2(F.relu(self.conv2(blob_b1)), test=test_mode), (2, 2), stride=(2,2), pad=(0, 0))
        (blob_b3, indices_b3, size_b3)  = F.max_pooling_2dIndices(self.bnorm3(F.relu(self.conv3(blob_b2)), test=test_mode), (2, 2), stride=(2,2), pad=(0, 0))
        (blob_b4, indices_b4, size_b4)  = F.max_pooling_2dIndices(self.bnorm4(F.relu(self.conv4(blob_b3)), test=test_mode), (2, 2), stride=(2,2), pad=(0, 0))

        # ---- EXPANSION BLOCKS ---- #
        blob_b5  = self.bnorm5(F.relu(self.conv5(F.unpooling_2d(blob_b4, indices_b4, size_b4))), test=test_mode)
        blob_b6  = self.bnorm6(F.relu(self.conv6(F.unpooling_2d(blob_b5, indices_b3, size_b3))), test=test_mode)
        blob_b7  = self.bnorm7(F.relu(self.conv7(F.unpooling_2d(blob_b6, indices_b2, size_b2))), test=test_mode)
        blob_b8  = self.bnorm8(F.relu(self.conv8(F.unpooling_2d(blob_b7, indices_b1, size_b1))), test=test_mode)

        #ipdb.set_trace()

        # ---- SOFTMAX CLASSIFIER ---- #
        self.blob_class = self.classi(blob_b8)
        self.probs = F.softmax(self.blob_class)

        # ---- CROSS-ENTROPY LOSS ---- #
        #ipdb.set_trace()
            self.loss = F.weighted_cross_entropy(self.probs, labels, weights_classes, normalize=True)
        self.output_point = self.probs

        return self.loss
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def get_rescaled_value_from_model(model, data):
    try:
        predicted_value = get_data_from_model(model, data)
    except:
        ipdb.set_trace();
    return (predicted_value - data.mean())/data.std()
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def __init__(self, assets, look_back, episode_length, look_back_reinforcement, price_series, train):
        # think about it whether its needed or not
        self.action_repeat = 2

        self.gym_actions = range(len(assets) + 1)

        self.look_back = look_back
        total_data = pd.read_csv("../data/all_data.csv")
        cut_index = int(total_data.shape[0] * 0.8)
        if train:
            data = total_data[0:cut_index]
        else:
            data = total_data[cut_index:-1]
        self.look_back = look_back
        self.assets_index = range(0, (len(self.gym_actions)) * 4, 4)[1:]
        self.look_ahead = 1
        self.batch_size = 50
        self.look_back_reinforcement = look_back_reinforcement
        self.total_data = pandas_split_series_into_list(data, self.look_back + episode_length + 1)
        # ipdb.set_trace();
        # self.numpy_data = self.data.as_matrix()
        self.price_series = price_series
        self.episode_length = episode_length
        self.models = make_asset_input(assets, look_back, self.look_ahead, self.batch_size)
        # self.models = [0,1]
        self.assets = assets
项目:python_for_linux_system_administration    作者:lalor    | 项目源码 | 文件源码
def sum_nums(n):
    s=0
    for i in range(n):
        ipdb.set_trace()
        s += i
        print(s)
项目:rsmtool    作者:EducationalTestingService    | 项目源码 | 文件源码
def test_filter_on_flag_column_nothing_left():
    bad_df = pd.DataFrame({'spkitemid': ['a1', 'b1', 'c1', 'd1'],
                           'sc1': [1, 2, 1, 3],
                           'feature': [2, 3, 4, 5],
                           'flag1': [1, 0, 20, 14],
                           'flag2': [1, 1.0, 'TD', '03']})

    flag_dict = {'flag1': [1, 0, 14], 'flag2': ['TD']}

    df_new, df_excluded = filter_on_flag_columns(bad_df, flag_dict)
    import ipdb
    ipdb.set_trace()
项目:fileserver-chat    作者:tanmaydatta    | 项目源码 | 文件源码
def poll():
    # global curr_time
    # ipdb.set_trace()
    global curr_time
    # curr = session['curr']
    while os.path.isfile('/mnt/lock'):
        pass
    a = soldier.run('sudo touch /mnt/lock', sudo=syspass)
    resp = []
    f = open('/mnt/chat.txt', 'r')
    lines = f.readlines()
    print lines
    for line in lines:
        tm = line.split("$$$")[0]
        print str(curr_time) + "   $$$   " + str(tm) 
        print int(tm) > int(curr_time)
        if int(tm) > int(curr_time):
            tt = datetime.datetime.fromtimestamp(int(tm)/1000).strftime('%Y-%m-%d %H:%M:%S')
            try:
                resp.append(tt + ' : ' + line.split("$$$")[1] + ' : ' + line.split("$$$")[2] )
                curr_time = int(tm)
            except:
                pass
            try:
                curr_time = int(tm)
            except:
                pass
    f.close()
    a = soldier.run('sudo rm /mnt/lock', sudo=syspass)
    # session['curr'] = curr
    return resp
    # return 'hello'
项目:fileserver-chat    作者:tanmaydatta    | 项目源码 | 文件源码
def send(msg,tm):
    # ipdb.set_trace()
    curr = tm
    while os.path.isfile('/mnt/lock'):
        pass
    a = soldier.run('sudo touch /mnt/lock', sudo=syspass)
    resp = []
    f = open('/mnt/chat.txt', 'a')
    f.write(str(curr) + '$$$' + str(session['name']) + '$$$' + msg + '\n')
    f.close()
    a = soldier.run('sudo rm /mnt/lock', sudo=syspass)
    return "success"
项目:frappuccino    作者:Carreau    | 项目源码 | 文件源码
def params_compare(old_ps, new_ps):
    try:
        from itertools import zip_longest
        for (o, ov), (n, nv) in zip_longest(old_ps.items(), new_ps.items(), fillvalue=(None, None)):
            if o == n and ov == nv:
                continue
            param_compare(ov, nv)
    except:
        import ipdb
        ipdb.set_trace()
项目:Attentive_reader    作者:caglar    | 项目源码 | 文件源码
def ipdb_breakpoint(x):
    """A simple hook function for :func:`put_hook` that runs ipdb.

    Parameters
    ----------
    x : :class:`~numpy.ndarray`
        The value of the hooked variable.

    """
    import ipdb
    ipdb.set_trace()
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def feature(aegan, filename):
    import ipdb
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix='./samples/reid_aegan/aegan/50')

        paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines())
        x = transform( np.array([load_image(path, (64, 128)) for path in paths]) )
        code = aegan.autoencoder.encoder.predict(x)

    ipdb.set_trace()
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def test(aegan, prefix):
    import ipdb
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix=prefix)

        from GAN.utils.vis import vis_grid
        vis_grid(inverse_transform(aegan.generator.random_generate(128)), (2, 20), 'random_generate.png')

        paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines())
        from load import load_image
        sample = transform( np.array([load_image(path, (64, 128)) for path in paths[:128]]) )

        vis_grid(inverse_transform(sample), (2, 20), 'sample.png')
        vis_grid(inverse_transform(aegan.autoencoder.autoencoder.predict(sample)), (2, 20), 'reconstruct.png')


        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
#       codes = aegan.autoencoder.encoder.predict(sample)
#       codes = aegan.generator.sample(128)
        codes = aegan.autoencoder.encoder.predict(aegan.generator.random_generate(128))


        for ind, code in enumerate(codes):
            n, bins, patches = plt.hist(code, 50, normed=1, facecolor='green', alpha=0.75)
            plt.savefig('test/{}.pdf'.format(ind))
            plt.clf()

    ipdb.set_trace()
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def feature(aegan, filename):
    import ipdb
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix='./samples/reid_aegan/aegan/50')

        paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines())
        x = transform( np.array([load_image(path, (64, 128)) for path in paths]) )
        code = aegan.autoencoder.encoder.predict(x)

    ipdb.set_trace()
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def test(aegan, prefix):
    import ipdb
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix=prefix)

        from GAN.utils.vis import vis_grid
        vis_grid(inverse_transform(aegan.generator.random_generate(128)), (2, 20), 'random_generate.png')

        paths = map(lambda x: x.strip(), open('protocol/cuhk01-all.txt').readlines())
        from load import load_image
        sample = transform( np.array([load_image(path, (64, 128)) for path in paths[:128]]) )

        vis_grid(inverse_transform(sample), (2, 20), 'sample.png')
        vis_grid(inverse_transform(aegan.autoencoder.autoencoder.predict(sample)), (2, 20), 'reconstruct.png')


        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
#       codes = aegan.autoencoder.encoder.predict(sample)
#       codes = aegan.generator.sample(128)
        codes = aegan.autoencoder.encoder.predict(aegan.generator.random_generate(128))


        for ind, code in enumerate(codes):
            n, bins, patches = plt.hist(code, 50, normed=1, facecolor='green', alpha=0.75)
            plt.savefig('test/{}.pdf'.format(ind))
            plt.clf()

    ipdb.set_trace()
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def feature_aegan(aegan, modelname, protoname):
    with ipdb.launch_ipdb_on_exception():
        aegan.load(prefix=modelname)

        x = transform(load_all(protoname, (npxw, npxh)))
        code = aegan.autoencoder.encoder.predict(x)

    ipdb.set_trace()
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def apply(self, inputs, gate_inputs, mask=None):
        def step(inputs, gate_inputs, states, state_to_gates, state_to_state):
            #import ipdb
            #ipdb.set_trace()
            gate_values = self.gate_activation.apply(
                states.dot(self.state_to_gates) + gate_inputs)
            update_values = gate_values[:, :self.dim]
            reset_values = gate_values[:, self.dim:]
            states_reset = states * reset_values
            next_states = self.activation.apply(
                states_reset.dot(self.state_to_state) + inputs)
            next_states = (next_states * update_values +
                           states * (1 - update_values))
            return next_states

        def step_mask(inputs, gate_inputs, mask_input, states, state_to_gates, state_to_state):
            next_states = step(inputs, gate_inputs, states, state_to_gates, state_to_state)
            if mask_input:
                next_states = (mask_input[:, None] * next_states +
                               (1 - mask_input[:, None]) * states)
            return next_states


        if mask:
            func = step_mask
            sequences = [inputs, gate_inputs, mask]
        else:
            func = step
            sequences = [inputs, gate_inputs]
        #[dict(input=inputs), dict(input=gate_inputs), dict(input=mask)]
        output = tensor.repeat(self.params[2].dimshuffle('x',0), inputs.shape[1], axis=0)
        states_output, _ = theano.scan(fn=func,
                sequences=sequences,
                outputs_info=[output],
                non_sequences=[self.state_to_gates, self.state_to_state],
                strict=True,
                #allow_gc=False)
                )

        return states_output
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def apply(self, inputs, update_inputs, reset_inputs, mask=None):
        def step(inputs, update_inputs, reset_inputs, states, state_to_update, state_to_reset, state_to_state):
            #import ipdb
            #ipdb.set_trace()
            reset_values = self.gate_activation.apply(
                    states.dot(self.state_to_reset) + reset_inputs)

            update_values = self.gate_activation.apply(
                    states.dot(self.state_to_update) + update_inputs)

            next_states_proposed = self.activation.apply(
                (states * reset_values).dot(self.state_to_state) + inputs)

            next_states = (next_states_proposed * update_values +
                           states * (1 - update_values))
            return next_states

        def step_mask(inputs, update_inputs, reset_inputs, mask_input, states, state_to_update, state_to_reset, state_to_state):
            next_states = step(inputs, updatE_inputs, reset_inputs, states, state_to_update, state_to_reset, state_to_state)
            if mask_input:
                next_states = (mask_input[:, None] * next_states +
                               (1 - mask_input[:, None]) * states)
            return next_states


        if mask:
            func = step_mask
            sequences = [inputs, update_inputs, reset_inputs, mask]
        else:
            func = step
            sequences = [inputs, update_inputs, reset_inputs]
        #[dict(input=inputs), dict(input=gate_inputs), dict(input=mask)]
        #output = tensor.repeat(self.params[2].dimshuffle('x',0), inputs.shape[1], axis=0)
        states_output, _ = theano.scan(fn=func,
                sequences=sequences,
                outputs_info=[self.initial_state('initial_state', inputs.shape[1])],
                non_sequences=[self.state_to_reset, self.state_to_update, self.state_to_state],
                strict=True,
                allow_gc=False)

        return states_output
项目:packyou    作者:llazzaro    | 项目源码 | 文件源码
def get_or_create_module(self, fullname):
        """
            Given a name and a path it will return a module instance
            if found.
            When the module could not be found it will raise ImportError
        """
        LOGGER.info('Loading module {0}'.format(fullname))
        parent, _, module_name = fullname.rpartition('.')
        if fullname in modules:
            LOGGER.info('Found cache entry for {0}'.format(fullname))
            return modules[fullname]

        module = modules.setdefault(fullname, imp.new_module(fullname))
        if len(fullname.strip('.')) > 3:
            absolute_from_root = fullname.split('.', 3)[-1]
            modules.setdefault(absolute_from_root, module)
        if len(fullname.split('.')) == 4:
            # add the root of the project
            modules[fullname.split('.')[-1]] = module
        # required by PEP 302
        module.__file__ = self.get_filename(fullname)
        LOGGER.info('Created module {0} with fullname {1}'.format(self.get_filename(fullname), fullname))
        module.__name__ = fullname
        module.__loader__ = self
        module.__path__ = self.path
        if self.is_package(fullname):
            module.__path__ = self.path
            module.__package__ = fullname
        else:
            module.__package__ = fullname.rpartition('.')[0]

        LOGGER.debug('loading file {0}'.format(self.get_filename(fullname)))
        source = self.get_source(fullname)
        try:
            exec(source, module.__dict__)
        except Exception as ex:
            ipdb.set_trace()
        return module
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def sample(self, dist_info):
        samples = self._f_sample(dist_info["prob"])
        import ipdb
        ipdb.set_trace()