Python numpy.minimum() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 2

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 3

def batch_iou(boxes, box):
  """Compute the Intersection-Over-Union of a batch of boxes with another
  box.

  Args:
    box1: 2D array of [cx, cy, width, height].
    box2: a single array of [cx, cy, width, height]
  Returns:
    ious: array of a float number in range [0, 1].
  """
  lr = np.maximum(
      np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
      np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
      0
  )
  tb = np.maximum(
      np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
      np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
      0
  )
  inter = lr*tb
  union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
  return inter/union 

Example 4

def imin(arrays, axis, ignore_nan = False):
    """ 
    Minimum of a stream of arrays along an axis.

    Parameters
    ----------
    arrays : iterable
        Arrays to be reduced.
    axis : int or None, optional
        Axis along which the minimum is found. The default
        is to find the minimum along the 'stream axis', as if all arrays in ``array``
        were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened
        before reduction.
    ignore_nan : bool, optional
        If True, NaNs are ignored. Default is propagation of NaNs.

    Yields
    ------
    online_min : ndarray
        Cumulative minimum.
    """
    ufunc = np.fmin if ignore_nan else np.minimum
    yield from ireduce_ufunc(arrays, ufunc, axis) 

Example 5

def test_train(self):
    model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
    predictions_, loss_, _ = fetches_

    target_len = self.sequence_length + 10 + 2
    max_decode_length = model.params["target.max_seq_len"]
    expected_decode_len = np.minimum(target_len, max_decode_length)

    np.testing.assert_array_equal(predictions_["logits"].shape, [
        self.batch_size, expected_decode_len - 1,
        model.target_vocab_info.total_size
    ])
    np.testing.assert_array_equal(predictions_["losses"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    self.assertFalse(np.isnan(loss_)) 

Example 6

def eval_one_dataset(self, sess, dataset, save_dir, subset='train'):
        count = 0
        print('num_examples:', dataset._num_examples)
        while count < dataset._num_examples:
            start = count % dataset._num_examples
            images, embeddings_batchs, filenames, _ =\
                dataset.next_batch_test(self.batch_size, start, 1)
            print('count = ', count, 'start = ', start)
            for i in range(len(embeddings_batchs)):
                samples_batchs = []
                # Generate up to 16 images for each sentence,
                # with randomness from noise z and conditioning augmentation.
                for j in range(np.minimum(16, cfg.TRAIN.NUM_COPY)):
                    samples = sess.run(self.fake_images,
                                       {self.embeddings: embeddings_batchs[i]})
                    samples_batchs.append(samples)
                self.save_super_images(images, samples_batchs,
                                       filenames, i, save_dir,
                                       subset)

            count += self.batch_size 

Example 7

def custom_crop(img, bbox):
    # bbox = [x-left, y-top, width, height]
    imsiz = img.shape  # [height, width, channel]
    # if box[0] + box[2] >= imsiz[1] or\
    #     box[1] + box[3] >= imsiz[0] or\
    #     box[0] <= 0 or\
    #     box[1] <= 0:
    #     box[0] = np.maximum(0, box[0])
    #     box[1] = np.maximum(0, box[1])
    #     box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2])
    #     box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3])
    center_x = int((2 * bbox[0] + bbox[2]) / 2)
    center_y = int((2 * bbox[1] + bbox[3]) / 2)
    R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
    y1 = np.maximum(0, center_y - R)
    y2 = np.minimum(imsiz[0], center_y + R)
    x1 = np.maximum(0, center_x - R)
    x2 = np.minimum(imsiz[1], center_x + R)
    img_cropped = img[y1:y2, x1:x2, :]
    return img_cropped 

Example 8

def append(self, x):
        self._count += 1

        if self._count == 1:
            self.m = x
            self.last_m = x
            self.last_s = 0.0
            self.min = x
            self.max = x
        else:
            self.m = self.last_m + (x - self.last_m) / self._count
            self.s = self.last_s + (x - self.last_m) * (x - self.m)

            self.last_m = self.m
            self.last_s = self.s

            self.min = numpy.minimum(self.min, x)
            self.max = numpy.maximum(self.max, x) 

Example 9

def __init__(self, card, skill_up=0):
		skill = card.skill
		if skill is None: 
			self.trigger_type = None
			return
		# Skill type
		self.trigger_type = skill.trigger_type
		self.effect_type = skill.effect_type
		# Skill data
		self.cooldown = skill.trigger_count
		self.prob = np.minimum(100, (1+skill_up) * skill.odds) / 100
		self.reward = skill.reward
		self.duration = skill.reward if self.effect_type in ['Weak Judge', 'Strong Judge'] else 0
		# Skill gem
		self.score_boost, self.heal_boost = 1, 0
		for gem in card.equipped_gems:
			if gem.effect == 'score_boost':
				self.score_boost = gem.value
			elif gem.effect == 'heal_boost':
				self.heal_boost = gem.value
		self.init_state() 

Example 10

def to_LLTB(self, filename='cards.666', rare=True):
		def gen_row(index, c):
			card = raw_card_dict[str(c['card_id'])].copy()
			card.idolize(c['idolized'])
			card.level_up(skill_level=c['skill'].level, slot_num=c['slot_num'])
			# name = str(index)+':'+card.card_name if card.card_name != ' ' else 'NOTSET'
			name = str(index)+':'+card.member_name if card.card_name != ' ' else 'NOTSET'
			info = [TB_member_dict[card.member_name], name] + adjusted_card_stat(card) + \
					get_skill_stat(card.skill, card.skill.level) + get_cskill_stat(card.cskill) + [card.slot_num]
			return '\t'.join([str(x) for x in info])+'\t'
		df = self.owned_card.copy()
		df = df[df.apply(lambda x: x.member_name in list(TB_member_dict.keys()), axis=1)]
		if rare:
			df = df[df.apply(lambda x: not x.promo and (x.rarity in ['UR','SSR'] or (x.rarity == 'SR' and x.idolized)), axis=1)]
		df = df[['card_id', 'idolized', 'skill', 'slot_num']]
		card_info = '\n'.join([gen_row(i,c) for i, c in df.iterrows()])
		gem_info = '-2 ' + ' '.join([str(np.minimum(self.owned_gem[x],9)) for x in TB_gem_skill_list])
		with codecs.open(filename, 'w', encoding='utf-16') as fp:
			fp.write('\n\n'.join([card_info, gem_info]))
		print('file saved to', filename) 

Example 11

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 12

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 13

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 14

def get_IOU(rec1, rec2):
    """
    rec1&2 are both np.arrays with x_center, y_center, width, height
    should work with any dimension as long as the last dimension is 4
    """

    rec1_xy_max = rec1[..., :2] + (rec1[..., 2:4] - 1) / 2
    rec1_xy_min = rec1[..., :2] - (rec1[..., 2:4] - 1) / 2

    rec2_xy_max = rec2[..., :2] + (rec2[..., 2:4] - 1) / 2
    rec2_xy_min = rec2[..., :2] - (rec2[..., 2:4] - 1) / 2

    intersec_max = np.minimum(rec1_xy_max, rec2_xy_max)
    intersec_min = np.maximum(rec1_xy_min, rec2_xy_min)

    intersec_wh = np.maximum(intersec_max - intersec_min + 1, 0)

    intersec_area = intersec_wh[..., 0] * intersec_wh[..., 1]

    area1 = rec1[..., 2] * rec1[..., 3]
    area2 = rec2[..., 2] * rec2[..., 3]

    union = area1 + area2 - intersec_area

    return intersec_area / union 

Example 15

def overlap_ratio(boxes1, boxes2):
  # find intersection bbox
  x_int_bot = np.maximum(boxes1[:, 0], boxes2[0])
  x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2])
  y_int_bot = np.maximum(boxes1[:, 1], boxes2[1])
  y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3])

  # find intersection area
  dx = x_int_top - x_int_bot
  dy = y_int_top - y_int_bot
  area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))

  # find union
  area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int

  # find overlap ratio
  ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
  return ratio


###########################################################################
#                          overlap_ratio of two bboxes                    #
########################################################################### 

Example 16

def overlap_ratio_pair(boxes1, boxes2):
  # find intersection bbox
  x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0])
  x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2])
  y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1])
  y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3])

  # find intersection area
  dx = x_int_top - x_int_bot
  dy = y_int_top - y_int_bot
  area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))

  # find union
  area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int

  # find overlap ratio
  ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
  return ratio 

Example 17

def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
    """
    TensorFlow implementation for apply perturbations to input features based
    on salency maps
    :param i: index of first selected feature
    :param j: index of second selected feature
    :param X: a matrix containing our input features for our sample
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :param theta: delta for each feature adjustment
    :param clip_min: mininum value for a feature in our sample
    :param clip_max: maximum value for a feature in our sample
    : return: a perturbed input feature matrix for a target class
    """

    # perturb our input sample
    if increase:
        X[0, i] = np.minimum(clip_max, X[0, i] + theta)
        X[0, j] = np.minimum(clip_max, X[0, j] + theta)
    else:
        X[0, i] = np.maximum(clip_min, X[0, i] - theta)
        X[0, j] = np.maximum(clip_min, X[0, j] - theta)

    return X 

Example 18

def iou_loss(p, t):
    # print "pass"
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps_t1 - overlaps_t0
    bool_overlap = T.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = T.maximum(intersection, np.float32(0.))
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    loss = 1. - T.minimum(
        T.exp(T.log(T.abs_(intersection)) -
              T.log(T.abs_(union) + np.float32(1e-5))),
        np.float32(1.)
    )
    # return loss
    return T.mean(loss) 

Example 19

def iou_loss_val(p, t):
    tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
    overlaps = np.zeros_like(tp, dtype=np.float32)
    overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
    overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
    intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
    bool_overlap = np.min(intersection, axis=1) > 0
    intersection = intersection[:, 0] * intersection[:, 1]
    intersection = np.maximum(intersection, 0.)
    # print "bool", bool_overlap
    # print "Int", intersection
    dims_p = tp[:, 1, :] - tp[:, 0, :]
    areas_p = dims_p[:, 0] * dims_p[:, 1]
    dims_t = tt[:, 1, :] - tt[:, 0, :]
    areas_t = dims_t[:, 0] * dims_t[:, 1]
    union = areas_p + areas_t - intersection
    # print "un", union
    loss = 1. - np.minimum(
        np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
        1.
    )
    # print loss
    return np.mean(loss) 

Example 20

def _exp_single(x):
    """Sanitized exponential function.
    Since this method internally calls np.exp and carries
    the (very likely) possibility to overflow, the method
    suppresses all warnings.

    #XXX: at some point we might want to let ``suppress_warnings``
    # specify exactly which types of warnings it should filter.

    Parameters
    ----------

    x : float, int
        The number to exp


    Returns
    -------

    val : float
        the exp of x
    """
    val = np.minimum(__max_exp__, np.exp(x))
    return val 

Example 21

def reshape_to_yolo_size(img):
    input_width, input_height = img.size
    min_pixel = 320.0
    #max_pixel = 608
    max_pixel = 1024.0

    min_edge = np.minimum(input_width, input_height)
    if min_edge < min_pixel:
        input_width *= min_pixel / min_edge
        input_height *= min_pixel / min_edge
    max_edge = np.maximum(input_width, input_height)
    if max_edge > max_pixel:
        input_width *= max_pixel / max_edge
        input_height *= max_pixel / max_edge

    input_width = int(input_width / 32.0 + round(input_width % 32 / 32.0)) * 32
    input_height = int(input_height / 32.0 + round(input_height % 32 / 32.0)) * 32
    img = img.resize((input_width, input_height))

    return img 

Example 22

def test_reduce(self):
        dflt = np.typecodes['AllFloat']
        dint = np.typecodes['AllInteger']
        seq1 = np.arange(11)
        seq2 = seq1[::-1]
        func = np.minimum.reduce
        for dt in dint:
            tmp1 = seq1.astype(dt)
            tmp2 = seq2.astype(dt)
            assert_equal(func(tmp1), 0)
            assert_equal(func(tmp2), 0)
        for dt in dflt:
            tmp1 = seq1.astype(dt)
            tmp2 = seq2.astype(dt)
            assert_equal(func(tmp1), 0)
            assert_equal(func(tmp2), 0)
            tmp1[::2] = np.nan
            tmp2[::2] = np.nan
            assert_equal(func(tmp1), np.nan)
            assert_equal(func(tmp2), np.nan) 

Example 23

def test_truth_table_logical(self):
        # 2, 3 and 4 serves as true values
        input1 = [0, 0, 3, 2]
        input2 = [0, 4, 0, 2]

        typecodes = (np.typecodes['AllFloat']
                     + np.typecodes['AllInteger']
                     + '?')     # boolean
        for dtype in map(np.dtype, typecodes):
            arg1 = np.asarray(input1, dtype=dtype)
            arg2 = np.asarray(input2, dtype=dtype)

            # OR
            out = [False, True, True, True]
            for func in (np.logical_or, np.maximum):
                assert_equal(func(arg1, arg2).astype(bool), out)
            # AND
            out = [False, False, False, True]
            for func in (np.logical_and, np.minimum):
                assert_equal(func(arg1, arg2).astype(bool), out)
            # XOR
            out = [False, True, True, False]
            for func in (np.logical_xor, np.not_equal):
                assert_equal(func(arg1, arg2).astype(bool), out) 

Example 24

def test_wrap(self):

        class with_wrap(object):
            def __array__(self):
                return np.zeros(1)

            def __array_wrap__(self, arr, context):
                r = with_wrap()
                r.arr = arr
                r.context = context
                return r

        a = with_wrap()
        x = ncu.minimum(a, a)
        assert_equal(x.arr, np.zeros(1))
        func, args, i = x.context
        self.assertTrue(func is ncu.minimum)
        self.assertEqual(len(args), 2)
        assert_equal(args[0], a)
        assert_equal(args[1], a)
        self.assertEqual(i, 0) 

Example 25

def test_minimummaximum_func(self):
        a = np.ones((2, 2))
        aminimum = minimum(a, a)
        self.assertTrue(isinstance(aminimum, MaskedArray))
        assert_equal(aminimum, np.minimum(a, a))

        aminimum = minimum.outer(a, a)
        self.assertTrue(isinstance(aminimum, MaskedArray))
        assert_equal(aminimum, np.minimum.outer(a, a))

        amaximum = maximum(a, a)
        self.assertTrue(isinstance(amaximum, MaskedArray))
        assert_equal(amaximum, np.maximum(a, a))

        amaximum = maximum.outer(a, a)
        self.assertTrue(isinstance(amaximum, MaskedArray))
        assert_equal(amaximum, np.maximum.outer(a, a)) 

Example 26

def make_sampling_table(size, sampling_factor=1e-5):
    '''This generates an array where the ith element
    is the probability that a word of rank i would be sampled,
    according to the sampling distribution used in word2vec.

    The word2vec formula is:
        p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))

    We assume that the word frequencies follow Zipf's law (s=1) to derive
    a numerical approximation of frequency(rank):
       frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
        where gamma is the Euler-Mascheroni constant.

    # Arguments
        size: int, number of possible words to sample.
    '''
    gamma = 0.577
    rank = np.array(list(range(size)))
    rank[0] = 1
    inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1./(12.*rank)
    f = sampling_factor * inv_fq

    return np.minimum(1., f / np.sqrt(f)) 

Example 27

def batch_iou(proposals, gt):
    bboxes = np.transpose(proposals).reshape((4, -1, 1))
    bboxes_x1 = bboxes[0]
    bboxes_x2 = bboxes[0]+bboxes[2]
    bboxes_y1 = bboxes[1]
    bboxes_y2 = bboxes[1]+bboxes[3]

    gt = np.transpose(gt).reshape((4, 1, -1))
    gt_x1 = gt[0]
    gt_x2 = gt[0]+gt[2]
    gt_y1 = gt[1]
    gt_y2 = gt[1]+gt[3]

    widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
                        np.maximum(bboxes_x1, gt_x1))
    heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
                         np.maximum(bboxes_y1, gt_y1))
    intersection = widths*heights
    union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
    return (intersection / union) 

Example 28

def batch_iou(proposals, gt):
    bboxes = np.transpose(proposals).reshape((4, -1, 1))
    bboxes_x1 = bboxes[0]
    bboxes_x2 = bboxes[0]+bboxes[2]
    bboxes_y1 = bboxes[1]
    bboxes_y2 = bboxes[1]+bboxes[3]

    gt = np.transpose(gt).reshape((4, 1, -1))
    gt_x1 = gt[0]
    gt_x2 = gt[0]+gt[2]
    gt_y1 = gt[1]
    gt_y2 = gt[1]+gt[3]

    widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
                        np.maximum(bboxes_x1, gt_x1))
    heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
                         np.maximum(bboxes_y1, gt_y1))
    intersection = widths*heights
    union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
    return (intersection / union) 

Example 29

def decode_bboxes(tcoords, anchors):
    var_x, var_y, var_w, var_h = config['prior_variance']
    t_x = tcoords[:, 0]*var_x
    t_y = tcoords[:, 1]*var_y
    t_w = tcoords[:, 2]*var_w
    t_h = tcoords[:, 3]*var_h
    a_w = anchors[:, 2]
    a_h = anchors[:, 3]
    a_x = anchors[:, 0]+a_w/2
    a_y = anchors[:, 1]+a_h/2
    x = t_x*a_w + a_x
    y = t_y*a_h + a_y
    w = tf.exp(t_w)*a_w
    h = tf.exp(t_h)*a_h

    x1 = tf.maximum(0., x - w/2)
    y1 = tf.maximum(0., y - h/2)
    x2 = tf.minimum(1., w + x1)
    y2 = tf.minimum(1., h + y1)
    return tf.stack([y1, x1, y2, x2], axis=1) 

Example 30

def plot(self, image, filename, save_sample):
        """ Plot an image."""
        image = np.minimum(image, 1)
        image = np.maximum(image, -1)
        image = np.squeeze(image)
        # Scale to 0..255.
        imin, imax = image.min(), image.max()
        image = (image - imin) * 255. / (imax - imin) + .5
        image = image.astype(np.uint8)
        if save_sample:
            try:
                Image.fromarray(image).save(filename)
            except Exception as e:
                print("Warning: could not sample to ", filename, ".  Please check permissions and make sure the path exists")
                print(e)
        GlobalViewer.update(image) 

Example 31

def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string 

Example 32

def iterate(self, x, eps=32, alp=1.0):
        num_iter = min(eps + 4, 1.25 * eps)
        loss = 1.0
        x = np.copy(x)
        while loss > 0 and num_iter > 0:
            inp = x.reshape((1,) + inp_size)
            outs = self.f_outputs([inp, 0])
            loss = outs[0]
            print('Loss: ', loss)
            grads = np.array(outs[1:]).reshape(inp_size)
            s_grads = np.sign(grads)
            adv_x = x - alp * s_grads
            sub_x = np.minimum(x + eps, np.maximum(x - eps, adv_x))
            next_x = preprocess_img(np.clip(deprocess_img(sub_x), 0.0, 255.0))
            x = next_x
            confidence = self.mdl.predict(x.reshape((1,) + inp_size))[0][0]
            print('Current confidence value: ', confidence) #'minval =', min_val)
            yield (deprocess_img(x), confidence)
            num_iter -= 1 

Example 33

def calc_stoi_from_spec(clean_spec, degraded_spec, analysis_len=30):
    freq_bins = np.size(clean_spec, 0)
    frames = np.size(clean_spec, 1)
    x = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    y = np.zeros((freq_bins, frames - analysis_len + 1, analysis_len), dtype=np.float32)
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            x[j, m] = clean_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = degraded_spec[j, m - analysis_len + 1:m + 1]
            y[j, m] = np.minimum(np.linalg.norm(x[j,m,:])/np.linalg.norm(y[j,m,:])*y[j,m,:],
                                 (1.+np.power(10., 15./20.))*x[j,m,:])  # y is normalized and clipped
    x_mean = np.mean(x, axis=(0, 1))
    y_mean = np.mean(y, axis=(0, 1))
    score = 0.
    for j in range(0, freq_bins):
        for m in range(analysis_len - 1, frames, 1):
            score += np.dot(x[j, m, :] - x_mean, y[j, m, :] - y_mean) / \
                     (np.linalg.norm(x[j, m, :] - x_mean) * np.linalg.norm(y[j, m, :] - y_mean))
    score /= (freq_bins * analysis_len)
    return score 

Example 34

def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0):
    if nfilts is None:
        nfilts = nfft
    if maxfrq is None:
        maxfrq = sr // 2
    wts = np.zeros((nfilts, nfft//2+1))
    fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr)
    minmel = hz2mel(minfrq)
    maxmel = hz2mel(maxfrq)
    binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel))
    # binbin = np.round(binfrqs / maxfrq * nfft)
    for i in range(nfilts):
        fs = binfrqs[[i+0, i+1, i+2]]
        fs = fs[1] + width * (fs - fs[1])
        loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0])
        hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1])
        wts[i, :] = np.maximum(0, np.minimum(loslope, hislope))
    return wts 

Example 35

def get_fft_mel_mat(nfft, sr=8000, nfilts=None, width=1.0, minfrq=20, maxfrq=None, constamp=0):
    if nfilts is None:
        nfilts = nfft
    if maxfrq is None:
        maxfrq = sr // 2
    wts = np.zeros((nfilts, nfft//2+1))
    fftfrqs = np.arange(0, nfft//2+1) / (1. * nfft) * (sr)
    minmel = hz2mel(minfrq)
    maxmel = hz2mel(maxfrq)
    binfrqs = mel2hz(minmel + np.arange(0, nfilts+2) / (nfilts+1.) * (maxmel - minmel))
    # binbin = np.round(binfrqs / maxfrq * nfft)
    for i in range(nfilts):
        fs = binfrqs[[i+0, i+1, i+2]]
        fs = fs[1] + width * (fs - fs[1])
        loslope = (fftfrqs - fs[0]) / (fs[1] - fs[0])
        hislope = (fs[2] - fftfrqs) / (fs[2] - fs[1])
        wts[i, :] = np.maximum(0, np.minimum(loslope, hislope))
    return wts 

Example 36

def preprocess(self, strokes):
    """Remove entries from strokes having > max_seq_length points."""
    raw_data = []
    seq_len = []
    count_data = 0

    for i in range(len(strokes)):
      data = strokes[i]
      if len(data) <= (self.max_seq_length):
        count_data += 1
        # removes large gaps from the data
        data = np.minimum(data, self.limit)
        data = np.maximum(data, -self.limit)
        data = np.array(data, dtype=np.float32)
        data[:, 0:2] /= self.scale_factor
        raw_data.append(data)
        seq_len.append(len(data))
    seq_len = np.array(seq_len)  # nstrokes for each sketch
    idx = np.argsort(seq_len)
    self.strokes = []
    for i in range(len(seq_len)):
      self.strokes.append(raw_data[idx[i]])
    print("total images <= max_seq_len is %d" % count_data)
    self.num_batches = int(count_data / self.batch_size) 

Example 37

def flow(self, Kc, Ks, Kz, Ka, numexpr):
        zeros = np.zeros
        where = np.where
        min = np.minimum
        max = np.maximum
        abs = np.absolute
        arctan = np.arctan
        sin = np.sin

        center = (slice(   1,   -1,None),slice(   1,  -1,None))
        rock = self.center
        ds = self.scour[center]    
        rcc = rock[center]
        rock[center] = rcc - ds * Kz
        # there isn't really a bottom to the rock but negative values look ugly
        rock[center] = where(rcc<0,0,rcc) 

Example 38

def _init_grid(self):
        """ Initializes the grid. Currently works best for multiples of 3 which
        are also odd. For now let's only test on 9x9 grids. """

        self.grid.fill(OPEN)
        w1 = np.maximum((self.length/3), 1)
        w2 = np.minimum(2*(self.length/3), self.length)
        self.grid[:, w1:w2].fill(WALL)
        self.grid[self.length/2, :].fill(OPEN)

        sx = np.random.randint(0, self.length)
        sy = np.random.randint(0, w1)
        gx = np.random.randint(0, self.length)
        gy = np.random.randint(w2, self.length)
        s_agent = (sx,sy)
        s_goal = (gx,gy)

        assert s_agent != s_goal
        assert self.grid[s_agent] != WALL
        assert self.grid[s_goal] != WALL
        self.grid[s_agent] = AGENT
        self.grid[s_goal] = GOAL
        s_start = s_agent
        return s_start, s_agent, s_goal 

Example 39

def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX) 

Example 40

def sample_weights_orth(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)

    assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'

    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals

    u,s,v = numpy.linalg.svd(values)
    values = u * scale

    return values.astype(theano.config.floatX) 

Example 41

def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX) 

Example 42

def find_null_offset(xpts, powers, default=0.0):
    """Finds the offset corresponding to the minimum power using a fit to the measured data"""
    def model(x, a, b, c):
        return a*(x - b)**2 + c
    powers = np.power(10, powers/10.)
    min_idx = np.argmin(powers)
    try:
        fit = curve_fit(model, xpts, powers, p0=[1, xpts[min_idx], powers[min_idx]])
    except RuntimeError:
        logger.warning("Mixer null offset fit failed.")
        return default, np.zeros(len(powers))
    best_offset = np.real(fit[0][1])
    best_offset = np.minimum(best_offset, xpts[-1])
    best_offset = np.maximum(best_offset, xpts[0])
    xpts_fine = np.linspace(xpts[0],xpts[-1],101)
    fit_pts = np.array([np.real(model(x, *fit[0])) for x in xpts_fine])
    if min(fit_pts)<0: fit_pts-=min(fit_pts)-1e-10 #prevent log of a negative number
    return best_offset, xpts_fine, 10*np.log10(fit_pts) 

Example 43

def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 44

def quadratic_polynomial_coefficients(self, t):
    """The posterior mean ``mu`` of this GP is piece-wise cubic. Return the
    coefficients of the **quadratic** polynomial that is the **derivative** of
    ``mu`` at ``t``.
    
    This is used to find the minimum of the cubic polynomial in
    ``gp.find_mimima()``."""
    
    assert isinstance(t, (float, np.float32, np.float64))
    assert t not in self.ts # at the observations, polynomial is ambiguous
    
    d1, d2, d3 = self.dmu(t), self.d2mu(t), self.d3mu(t)
    a = .5*d3
    b = d2 - d3*t
    c = d1 - d2*t + 0.5*d3*t**2
    
    return (a, b, c) 

Example 45

def test_train(self):
    model, fetches_ = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
    predictions_, loss_, _ = fetches_

    target_len = self.sequence_length + 10 + 2
    max_decode_length = model.params["target.max_seq_len"]
    expected_decode_len = np.minimum(target_len, max_decode_length)

    np.testing.assert_array_equal(predictions_["logits"].shape, [
        self.batch_size, expected_decode_len - 1,
        model.target_vocab_info.total_size
    ])
    np.testing.assert_array_equal(predictions_["losses"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    np.testing.assert_array_equal(predictions_["predicted_ids"].shape,
                                  [self.batch_size, expected_decode_len - 1])
    self.assertFalse(np.isnan(loss_)) 

Example 46

def eval_one_dataset(self, sess, dataset, save_dir, subset='train'):
        count = 0
        print('num_examples:', dataset._num_examples)
        while count < dataset._num_examples:
            start = count % dataset._num_examples
            images, embeddings_batchs, filenames, _ =\
                dataset.next_batch_test(self.batch_size, start, 1)
            print('count = ', count, 'start = ', start)
            for i in range(len(embeddings_batchs)):
                samples_batchs = []
                # Generate up to 16 images for each sentence,
                # with randomness from noise z and conditioning augmentation.
                for j in range(np.minimum(16, cfg.TRAIN.NUM_COPY)):
                    samples = sess.run(self.fake_images,
                                       {self.embeddings: embeddings_batchs[i]})
                    samples_batchs.append(samples)
                self.save_super_images(images, samples_batchs,
                                       filenames, i, save_dir,
                                       subset)

            count += self.batch_size 

Example 47

def custom_crop(img, bbox):
    # bbox = [x-left, y-top, width, height]
    imsiz = img.shape  # [height, width, channel]
    # if box[0] + box[2] >= imsiz[1] or\
    #     box[1] + box[3] >= imsiz[0] or\
    #     box[0] <= 0 or\
    #     box[1] <= 0:
    #     box[0] = np.maximum(0, box[0])
    #     box[1] = np.maximum(0, box[1])
    #     box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2])
    #     box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3])
    center_x = int((2 * bbox[0] + bbox[2]) / 2)
    center_y = int((2 * bbox[1] + bbox[3]) / 2)
    R = int(np.maximum(bbox[2], bbox[3]) * 0.75)
    y1 = np.maximum(0, center_y - R)
    y2 = np.minimum(imsiz[0], center_y + R)
    x1 = np.maximum(0, center_x - R)
    x2 = np.minimum(imsiz[1], center_x + R)
    img_cropped = img[y1:y2, x1:x2, :]
    return img_cropped 

Example 48

def vis_detections(im, class_name, dets, thresh=0.5):
    """Visual debugging of detections."""

    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(5, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]

        if score > thresh:
            
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show() 

Example 49

def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
		L = post['L']
		if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
		Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
		ns = diagKTestTest.shape[0]
		nperbatch = 5000
		nact = 0
		
		#allocate mem
		fmu = np.zeros(ns)	#column vector (of length ns) of predictive latent means
		fs2 = np.zeros(ns)	#column vector (of length ns) of predictive latent variances
		while (nact<(ns-1)):
			id = np.arange(nact, np.minimum(nact+nperbatch, ns))
			kss = diagKTestTest[id]		
			Ks = KtrainTest[:, id]
			if (len(post['alpha'].shape) == 1):
				try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
				except: Fmu = intercept + Ks.T.dot(post['alpha'])
				fmu[id] = Fmu
			else:
				try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
				except: Fmu = intercept + Ks.T.dot(post['alpha'])
				fmu[id] = Fmu.mean(axis=1)
			if Lchol:
				V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
				fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances						
			else:
				fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)		   #predictive variances
			fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances		
			nact = id[-1]    #set counter to index of last processed data point
			
		return fmu, fs2 

Example 50

def adjust_pvalue_bh(p):
    """ Multiple testing correction of p-values using the Benjamini-Hochberg procedure """
    descending = np.argsort(p)[::-1]
    # q = p * N / k where p = p-value, N = # tests, k = p-value rank
    scale = float(len(p)) / np.arange(len(p), 0, -1)
    q = np.minimum(1, np.minimum.accumulate(scale * p[descending]))

    # Return to original order
    return q[np.argsort(descending)] 
点赞