Python numpy 模块,array_equal() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.array_equal()

项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def gen_pruned_features(name):
    print name
    feature_dir = 'data/feature_' + args.domain + \
        '_' + str(args.n_boxes) + 'boxes/' + name + '/'
    n_clips = len(glob.glob(feature_dir + BOX_FEATURE + '*.npy'))
    for clip in xrange(1, n_clips+1):
        pruned_boxes = np.load(feature_dir + BOX_FEATURE + '{:04d}.npy'.format(clip)) # (50, args.n_boxes, 4)
        roisavg = np.load(feature_dir + 'roisavg{:04d}.npy'.format(clip)) # (50, args.n_boxes, 512)

        pruned_roisavg = np.zeros((50, args.n_boxes, 512))
        for frame in xrange(50):
            for box_id in xrange(args.n_boxes):
                if not np.array_equal(pruned_boxes[frame][box_id], np.zeros((4))):
                    pruned_roisavg[frame][box_id] = roisavg[frame][box_id]

        np.save('{}pruned_roisavg{:04d}'.format(feature_dir, clip), pruned_roisavg)
项目:postgis-t    作者:postgis-t    | 项目源码 | 文件源码
def test_intersection_com_mock_2(self):
        ls = LineString([(1, 1, 9.48024060e+08), (2, 2, 9.49363260e+08),
                         (3, 1, 9.51868860e+08)])

        poly = Polygon([(1, 1), (1, 3), (4, 3), (4, 1), (1, 1)])

        self.traj2.intersection_shapely = MagicMock(return_value=ls)
        response = self.traj2.intersection_shapely(poly)

        ls = np.array(ls)

        trajMock = self.traj2.to_Trajectory(response)
        traj = Trajectory(ls[:, 0], ls[:, 1], ls[:, 2])

        assert (np.array_equal(trajMock.getX(), traj.getX()))
        assert (np.array_equal(trajMock.getY(), traj.getY()))
        assert (np.array_equal(trajMock.getTime(), traj.getTime()))
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def _test_FileSink(self, format):
        filename = self._tempfileName('sink_%s' % format)

        complexData = format.startswith('C')
        typecode = format[1]
        dataFormat, dataType = self.TYPEMAP[typecode]
        indata = [dataType(x) for x in xrange(16)]

        source = sb.DataSource(dataFormat=dataFormat)
        sink = sb.FileSink(filename, midasFile=True)
        source.connect(sink)
        sb.start()
        source.push(indata, complexData=complexData, EOS=True)
        sink.waitForEOS()

        hdr, outdata = bluefile.read(filename)
        self.assertEqual(hdr['format'], format)
        if complexData:
            if dataFormat in ('double', 'float'):
                outdata = list(self._flatten(outdata))
            else:
                outdata = outdata.flatten()
        self.assertTrue(numpy.array_equal(indata, outdata), msg="Format '%s' %s != %s" % (format, indata, outdata))
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def _test_FileSource(self, format):
        filename = self._tempfileName('source_%s' % format)

        complexData = format.startswith('C')
        typecode = format[1]
        dataFormat, dataType = self.TYPEMAP[typecode]

        indata = self._generateSourceData(format, 16)
        hdr = bluefile.header(1000, format)
        bluefile.write(filename, hdr, indata)

        source = sb.FileSource(filename, midasFile=True, dataFormat=dataFormat)
        sink = sb.DataSink()
        source.connect(sink)
        sb.start()
        outdata = sink.getData(eos_block=True)
        if complexData:
            self.assertEqual(sink.sri().mode, 1)
            if dataFormat in ('float', 'double'):
                outdata = bulkio_helpers.bulkioComplexToPythonComplexList(outdata)
            else:
                outdata = numpy.reshape(outdata, (len(outdata)/2,2))
        else:
            self.assertEqual(sink.sri().mode, 0)
        self.assertTrue(numpy.array_equal(indata, outdata), msg='%s != %s' % (indata, outdata))
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def Verify(**kwargs):
    '''
        Verification for the signature
        i/p:
        msg: the string sent by the sender
        (z,c): vectors in Zq, the signature
        A  : numpy array, Verification Key dimension nxm
        T : the matrix AS mod q ,it is used in the Verification of the signature
    '''
    msg, z, c, A, T, sd, eta, m, k, q = kwargs['msg'], kwargs['z'], kwargs['c'], kwargs['A'], kwargs['T'], kwargs['sd'], kwargs['eta'], kwargs['m'], kwargs['k'], kwargs['q']
    norm_bound = eta * sd * np.sqrt(m)
    # checks for norm of z being small and that H(Az-Tc mod q,msg) hashes to c
    vec = util.vector_to_Zq(np.array(np.matmul(A,z) - np.matmul(T,c)), q)
    hashedList = util.hash_to_baseb(vec, msg, 3, k)
    print hashedList, c             
    if np.sqrt(z.dot(z)) <= norm_bound and np.array_equal(c, hashedList):
        return True
    else:
        return False
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def test_non_aligned_read():
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))

    # the last dimension is the number of channels
    assert cv[31:65,0:64,0:64].shape == (34,64,64,1) 
    assert np.all(cv[31:65,0:64,0:64] == data[31:65,:64,:64,:])

    # read a single pixel
    delete_layer()
    cv, data = create_layer(size=(64,64,64,1), offset=(0,0,0))
    # the last dimension is the number of channels
    assert cv[22:23,22:23,22:23].shape == (1,1,1,1) 
    assert np.all(cv[22:23,22:23,22:23] == data[22:23,22:23,22:23,:])

    # Test steps (negative steps are not supported)
    img1 = cv[::2, ::2, ::2, :]
    img2 = cv[:, :, :, :][::2, ::2, ::2, :]
    assert np.array_equal(img1, img2)
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def __setitem__(self, slices, img):
    imgshape = list(img.shape)
    if len(imgshape) == 3:
      imgshape = imgshape + [ self.num_channels ]

    maxsize = list(self.bounds.maxpt) + [ self.num_channels ]
    minsize = list(self.bounds.minpt) + [ 0 ]
    slices = generate_slices(slices, minsize, maxsize)
    bbox = Bbox.from_slices(slices)

    slice_shape = list(bbox.size3()) + [ slices[3].stop - slices[3].start ]

    if not np.array_equal(imgshape, slice_shape):
      raise ValueError("Illegal slicing, Image shape: {} != {} Slice Shape".format(imgshape, slice_shape))

    if self.path.protocol == 'boss':
      self.upload_boss_image(img, bbox.minpt)
    else:
      self.upload_image(img, bbox.minpt)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_dense_to_sparse(self):
        """ Test if `dense_to_sparse` works properly."""

        with tf.Session().as_default():
            dense = tf.constant([[1., 2., 0.], [0., 0., 3.]], dtype=tf.float32)

            sparse = dense_to_sparse(dense)

            self.assertTrue(np.array_equal(sparse.indices.eval(), np.array([[0, 0], [0, 1], [1, 2]])))
            self.assertTrue(np.array_equal(sparse.values.eval(), np.array([1., 2., 3.])))

            mask = tf.constant([[0, 1, 0], [1, 0, 0]], dtype=tf.int32)

            masked = dense_to_sparse(dense, mask)
            self.assertTrue(np.array_equal(masked.indices.eval(), np.array([[0, 1], [1, 0]])))
            self.assertTrue(np.array_equal(masked.values.eval(), np.array([2., 0.])))
项目:question-classification-cnn-rnn-attention    作者:sefira    | 项目源码 | 文件源码
def __init__(self, test_model=False, verify_model=True):
        model = Word2Vec.load(modelfile)

        if(test_model):
            acc = model.accuracy(questionfile)
            logger.info("Test model " + modelfile + " in " + questionfile)

        self.vector_size = model.vector_size
        self.vocab_size = len(model.wv.vocab) + 1
        self.word2index = self.GetWord2Index(model)
        self.index2word = self.GetIndex2Word(model)
        self.wordvector = self.GetWordVector(model)

        if(verify_model):
            logger.info("Verifing imported word2vec model")
            random_state = check_random_state(12)
            check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
            for index in check_index:
                word_wv = model.wv.index2word[index]
                word_our = self.index2word[index+1]
                #print(index, word_wv, word_our)
                assert word_wv == word_our
                assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
                assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
            logger.info("Imported word2vec model is verified")
项目:question-classification-cnn-rnn-attention    作者:sefira    | 项目源码 | 文件源码
def __init__(self, test_model=False, verify_model=True):
        model = Word2Vec.load(modelfile)

        if(test_model):
            acc = model.accuracy(questionfile)
            logger.info("Test model " + modelfile + " in " + questionfile)

        self.vector_size = model.vector_size
        self.vocab_size = len(model.wv.vocab) + 1
        self.word2index = self.GetWord2Index(model)
        self.index2word = self.GetIndex2Word(model)
        self.wordvector = self.GetWordVector(model)

        if(verify_model):
            logger.info("Verifing imported word2vec model")
            random_state = check_random_state(12)
            check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
            for index in check_index:
                word_wv = model.wv.index2word[index]
                word_our = self.index2word[index+1]
                #print(index, word_wv, word_our)
                assert word_wv == word_our
                assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
                assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
            logger.info("Imported word2vec model is verified")
项目:question-classification-cnn-rnn-attention    作者:sefira    | 项目源码 | 文件源码
def __init__(self, test_model=False, verify_model=True):
        model = Word2Vec.load(modelfile)

        if(test_model):
            acc = model.accuracy(questionfile)
            logger.info("Test model " + modelfile + " in " + questionfile)

        self.vector_size = model.vector_size
        self.vocab_size = len(model.wv.vocab) + 1
        self.word2index = self.GetWord2Index(model)
        self.index2word = self.GetIndex2Word(model)
        self.wordvector = self.GetWordVector(model)

        if(verify_model):
            logger.info("Verifing imported word2vec model")
            random_state = check_random_state(12)
            check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
            for index in check_index:
                word_wv = model.wv.index2word[index]
                word_our = self.index2word[index+1]
                #print(index, word_wv, word_our)
                assert word_wv == word_our
                assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
                assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
            logger.info("Imported word2vec model is verified")
项目:search-MjoLniR    作者:wikimedia    | 项目源码 | 文件源码
def test_vectorized_jaccard_sim():
    # The vectorized version of jaccard similarity is 20x faster, but it is
    # harder to understand. Compute it the simple way and compare to the
    # vectorized version
    def jaccard_sim(X, Y):
        assert len(X) == len(Y)
        a = np.sum((X == 1) & (Y == 1))
        d = np.sum((X == 0) & (Y == 0))
        return a / float(len(X) - d)

    def binary_sim(mat):
        n_rows = mat.shape[0]
        out = np.empty((n_rows, n_rows), dtype=np.float64)
        for i in range(n_rows):
            out[i][i] = 1.
            for j in range(0, i):
                out[i][j] = jaccard_sim(mat[i], mat[j])
                out[j][i] = out[i][j]
        return out

    # Simulate 200 queries with 100 shared page ids
    matrix = np.random.rand(200, 100) > 0.7
    simple = binary_sim(matrix)
    vectorized = mjolnir.norm_query._binary_sim(matrix)
    assert np.array_equal(simple, vectorized)
项目:pycoal    作者:capstone-coal    | 项目源码 | 文件源码
def test_classify_image_in_memory():

    # create mineral classifier instance with image loading enabled
    mc = mineral.MineralClassification(libraryFilenames[0], in_memory=True)

    # for each of the test images
    for image_file_name in test_classifyImage_testFilenames:

        # classify the test image
        classified_file_name = image_file_name[:-4] + "_class_test.hdr"
        mc.classify_image(image_file_name, classified_file_name)
        actual = spectral.open_image(classified_file_name)

        # classified image for comparison
        expected = spectral.open_image(image_file_name[:-4] + "_class.hdr")

        # verify that every pixel has the same classification
        assert numpy.array_equal(expected.asarray(), actual.asarray())

# test files for classify image threshold and subset tests
项目:DBCV    作者:christopherjenness    | 项目源码 | 文件源码
def _core_dist(point, neighbors, dist_function):
    """
    Computes the core distance of a point.
    Core distance is the inverse density of an object.

    Args:
        point (np.array): array of dimensions (n_features,)
            point to compute core distance of
        neighbors (np.ndarray): array of dimensions (n_neighbors, n_features):
            array of all other points in object class
        dist_dunction (func): function to determine distance between objects
            func args must be [np.array, np.array] where each array is a point

    Returns: core_dist (float)
        inverse density of point
    """
    n_features = np.shape(point)[0]
    n_neighbors = np.shape(neighbors)[1]

    numerator = 0
    for row in neighbors:
        if not np.array_equal(point, row):
            numerator += (1/dist_function(point, row))**n_features
    core_dist = (numerator / (n_neighbors)) ** (-1/n_features)
    return core_dist
项目:postgis-t    作者:postgis-t    | 项目源码 | 文件源码
def test_intersection_com_mock(self):
        ls = LineString([(1.5, 1, 9.48024060e+08), (2, 2, 9.49363260e+08),
                         (3, 2, 9.51868860e+08), (4, 3, 9.53208060e+08)])

        poly = Polygon([(1, 1), (1, 3), (4, 3), (4, 1), (1, 1)])

        self.traj.intersection_shapely = MagicMock(return_value=ls)
        response = self.traj.intersection_shapely(poly)

        ls = np.array(ls)
        trajMock = self.traj.to_Trajectory(response)
        traj = Trajectory(ls[:, 0], ls[:, 1], ls[:, 2])

        assert (np.array_equal(trajMock.getX(), traj.getX()))
        assert (np.array_equal(trajMock.getY(), traj.getY()))
        assert (np.array_equal(trajMock.getTime(), traj.getTime()))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_array_equal(self):
        res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
        assert_(res)
        assert_(type(res) is bool)
        res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
        assert_(not res)
        assert_(type(res) is bool)
        res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
        assert_(not res)
        assert_(type(res) is bool)
        res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
        assert_(not res)
        assert_(type(res) is bool)
        res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
        assert_(res)
        assert_(type(res) is bool)
        res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
                             np.array([('a', 1)], dtype='S1,u4'))
        assert_(res)
        assert_(type(res) is bool)
项目:psp    作者:cmap    | 项目源码 | 文件源码
def test_slice_metadata_using_already_sliced_data_df(self):
        data = pd.DataFrame([[2, 3], [5, 6], [11, 12]],
                            index=["a", "b", "d"],
                            columns=["f", "g"])
        row_meta = pd.DataFrame([["rm1", "rm2"],["rm3", "rm4"],
                                 ["rm5", "rm6"],["rm7", "rm8"]],
                                index=["a", "b", "c", "d"],
                                columns=["row_field1", "row_field2"])
        col_meta = pd.DataFrame([["cm1", "cm2"],["cm3", "cm4"],["cm5", "cm6"]],
                                index=["e", "f", "g"],
                                columns=["col_field1", "col_field2"])
        e_row_meta = pd.DataFrame([["rm1", "rm2"],["rm3", "rm4"],["rm7", "rm8"]],
                                index=["a", "b", "d"],
                                columns=["row_field1", "row_field2"])
        e_col_meta = pd.DataFrame([["cm3", "cm4"],["cm5", "cm6"]],
                                index=["f", "g"],
                                columns=["col_field1", "col_field2"])

        out_gct = dry.slice_metadata_using_already_sliced_data_df(data, row_meta, col_meta)
        self.assertTrue(np.array_equal(out_gct.row_metadata_df, e_row_meta),
                        "row_metadata_df is wrong: \n{}".format(out_gct.row_metadata_df))
        self.assertTrue(np.array_equal(out_gct.col_metadata_df, e_col_meta),
                        "col_metadata_df is wrong: \n{}".format(out_gct.col_metadata_df))
项目:psp    作者:cmap    | 项目源码 | 文件源码
def test_make_norm_ndarray(self):
        ROW_SUBSET_FIELD = "pr_probe_normalization_group"
        COL_SUBSET_FIELD = "det_normalization_group_vector"
        row_df = pd.DataFrame(np.array([["8350", "1"], ["8350", "1"],
                                        ["8350", "2"], ["8350", "2"]]),
                              index=["r1", "r2", "r3", "r4"],
                              columns=["pr_gene_id", "pr_probe_normalization_group"])
        col_df = pd.DataFrame(np.array([["G-0022", "1,1"], ["G-0022", "1,1"], ["G-0022", "1,2"],
                                        ["G-0022", "2,2"], ["G-0022", "2,2"]]),
                              index=["c1", "c2", "c3", "c4", "c5"],
                              columns=["det_plate", "det_normalization_group_vector"])
        e_norm_ndarray = np.array([[1, 1, 1, 2, 2],
                                   [1, 1, 1, 2, 2],
                                   [1, 1, 2, 2, 2],
                                   [1, 1, 2, 2, 2]])
        norm_ndarray = tear.make_norm_ndarray(row_df, col_df, ROW_SUBSET_FIELD, COL_SUBSET_FIELD)
        self.assertTrue(np.array_equal(norm_ndarray, e_norm_ndarray),
                        ("\nExpected out:\n{} " +
                         "\nActual out:\n{}").format(e_norm_ndarray, norm_ndarray))
项目:shoelace    作者:rjagerman    | 项目源码 | 文件源码
def test_save_txt_and_load_txt():

    # Get sample data set
    dataset = get_dataset()

    # Get in-memory string handle
    with StringIO() as handle:

        # Save text to handle
        dataset.save_txt(handle)
        handle.seek(0)

        # Load text from handle
        dataset2 = LtrDataset.load_txt(handle)

        # Assert that everything loaded correctly
        assert_true(np.array_equal(dataset.feature_vectors,
                                   dataset2.feature_vectors))
        assert_true(np.array_equal(dataset.relevance_scores,
                                   dataset2.relevance_scores))
        assert_true(np.array_equal(dataset.query_pointer,
                                   dataset2.query_pointer))
        assert_true(np.array_equal(dataset.query_ids, dataset2.query_ids))
项目:shoelace    作者:rjagerman    | 项目源码 | 文件源码
def test_save_and_load():

    # Get sample data set
    dataset = get_dataset()

    # Get in-memory binary handle
    with BytesIO() as handle:

        # Save binary to handle
        dataset.save(handle)
        handle.seek(0)

        # Load binary from handle
        dataset2 = LtrDataset.load(handle)

        # Assert that everything loaded correctly
        assert_true(np.array_equal(dataset.feature_vectors,
                                   dataset2.feature_vectors))
        assert_true(np.array_equal(dataset.relevance_scores,
                                   dataset2.relevance_scores))
        assert_true(np.array_equal(dataset.query_pointer,
                                   dataset2.query_pointer))
        assert_true(np.array_equal(dataset.query_ids, dataset2.query_ids))
项目:shoelace    作者:rjagerman    | 项目源码 | 文件源码
def test_backward():

    # Construct test data
    x = Variable(np.array([5., 3., 3., 1., 0.]))
    g = Variable(np.ones(5))
    expected_result = np.array([0.7717692057972512, 0.562087881852882,
                                1.4058826163342215, 0.9213241007090265,
                                1.3389361953066183])

    # Generate object
    lcse = LogCumsumExp()

    # Run forward and backward pass
    lcse.forward((x.data,))
    result = lcse.backward((x.data, ), (g.data, ))

    # Assert that the result equals the expected result
    assert_true(np.array_equal(result[0], expected_result))
项目:ababe    作者:unkcpz    | 项目源码 | 文件源码
def d2_at_Z(self, z=15.0):
        def is_z(ax):
            for axis in np.array([[z,0,0],[0,z,0],[0,0,z]]):
                if np.array_equal(ax, axis):
                    return True
            return False

        if is_z(self._lattice[0]):
            self.swap_axis((2,1,0))
            return None
        elif is_z(self._lattice[1]):
            self.swap_axis((0,2,1))
            return None
        else:
            # print("DO NOTHING...")
            return None
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def test_prepare_fcma_data():
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
    expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
    assert len(raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    for idx in range(len(raw_data)):
        assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
            'raw data do not match in test_prepare_fcma_data'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_fcma_data'
    from brainiak.fcma.preprocessing import RandomType
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
                                              random=RandomType.REPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
                                              random=RandomType.UNREPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def test_weighted_var():
    es = brainiak.eventseg.event.EventSegment(2)

    D = np.zeros((8, 4))
    for t in range(4):
        D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
    for t in range(4, 8):
        D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
    mean_pat = D[[0, 4], :].T

    weights = np.zeros((8, 2))
    weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
    weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
    assert np.array_equal(
        es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
        "Failed to compute variance with 0/1 weights"

    weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
    weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
    true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
    assert np.allclose(
        es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
        "Failed to compute variance with fractional weights"
项目:deep-learning-nd    作者:RyanCCollins    | 项目源码 | 文件源码
def test_sentence_to_seq(sentence_to_seq):
    sentence = 'this is a test sentence'
    vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}

    output = sentence_to_seq(sentence, vocab_to_int)

    assert len(output) == 5,\
        'Wrong length. Found a length of {}'.format(len(output))

    assert output[3] == 2,\
        'Missing <UNK> id.'

    assert np.array_equal(output, [3, 6, 5, 2, 4]),\
        'Incorrect ouput. Found {}'.format(output)

    _print_success_message()
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_CentreTransform_caching(make_random_data):

    # Generate an initial set of data
    x, mu, std = make_random_data

    # Apply the CentreTransform to the first dataset to preserve the mean
    x_copy = x.copy()
    center_transformer = CentreTransform()
    center_transformer(x_copy)

    # Now apply the center transform to a matrix that has been translated
    x_translated = x + 3.0 * mu
    x_expected = x_translated - mu
    x_produced = center_transformer(x_translated)

    # Check that the transformer used the mean mu instead of the translated
    # mean which was 3 * mu in this case above
    assert np.array_equal(x_expected, x_produced)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def test_StandardiseTransform_caching(make_random_data):

    # Generate an initial set of data
    x, mu, std = make_random_data

    # Apply the CentreTransform to the first dataset to preserve the mean
    x_copy = x.copy()
    center_transformer = CentreTransform()
    center_transformer(x_copy)

    # Now apply the center transform to a matrix translated by 2 * mu
    x_translated = x + 3.0 * mu
    x_expected = x_translated - mu
    x_produced = center_transformer(x_translated)

    # Check that the transformer used the mean mu instead of the translated
    # mean which was 4 * mu in this case above
    assert np.array_equal(x_expected, x_produced)
项目:describe    作者:SINGROUP    | 项目源码 | 文件源码
def test_atoms_to_system(self):
        """Tests that an ASE Atoms is succesfully converted to a System object.
        """
        class NaClFactory(SimpleCubicFactory):
            "A factory for creating NaCl (B1, Rocksalt) lattices."

            bravais_basis = [[0, 0, 0], [0, 0, 0.5], [0, 0.5, 0], [0, 0.5, 0.5],
                            [0.5, 0, 0], [0.5, 0, 0.5], [0.5, 0.5, 0],
                            [0.5, 0.5, 0.5]]
            element_basis = (0, 1, 1, 0, 1, 0, 0, 1)

        nacl = NaClFactory()(symbol=["Na", "Cl"], latticeconstant=5.6402)
        system = System.from_atoms(nacl)

        self.assertTrue(np.array_equal(nacl.get_positions(), system.get_positions()))
        self.assertTrue(np.array_equal(nacl.get_initial_charges(), system.get_initial_charges()))
        self.assertTrue(np.array_equal(nacl.get_atomic_numbers(), system.get_atomic_numbers()))
        self.assertTrue(np.array_equal(nacl.get_chemical_symbols(), system.get_chemical_symbols()))
        self.assertTrue(np.array_equal(nacl.get_cell(), system.get_cell()))
        self.assertTrue(np.array_equal(nacl.get_pbc(), system.get_pbc()))
        self.assertTrue(np.array_equal(nacl.get_scaled_positions(), system.get_scaled_positions()))
项目:describe    作者:SINGROUP    | 项目源码 | 文件源码
def test_matrix(self):
        desc = CoulombMatrix(n_atoms_max=5, flatten=False)
        cm = desc.create(H2O)

        # Test against assumed values
        q = H2O.get_initial_charges()
        p = H2O.get_positions()
        norm = np.linalg.norm
        assumed = np.array(
            [
                [0.5*q[0]**2.4,              q[0]*q[1]/(norm(p[0]-p[1])),  q[0]*q[2]/(norm(p[0]-p[2]))],
                [q[1]*q[0]/(norm(p[1]-p[0])), 0.5*q[1]**2.4,               q[1]*q[2]/(norm(p[1]-p[2]))],
                [q[2]*q[0]/(norm(p[2]-p[0])), q[2]*q[1]/(norm(p[2]-p[1])), 0.5*q[2]**2.4],
            ]
        )
        zeros = np.zeros((5, 5))
        zeros[:3, :3] = assumed
        assumed = zeros

        self.assertTrue(np.array_equal(cm, assumed))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_add():
    """TODO."""
    Y = ng.make_axis(length=2)
    M = ng.make_axis(length=2)
    N = ng.make_axis(length=2)

    a = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
    b = ng.constant(np.array([3, 5], dtype=np.float32), [Y])
    c = a + b
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, [6, 10])

    np_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
    np_b = np.array([[1, 2], [3, 4]], dtype=np.float32)
    np_c = np_a + np_b

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = a + b
    with executor(c) as ex:
        result = ex()
    assert np.array_equal(result, np_c)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_dot():
    Y = ng.make_axis(length=2)
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[1, 2], [2, 3], [3, 4]], dtype=np.float32)
    np_c = np.dot(np_a, np_b)

    a = ng.constant(np_a, [M, N]).named('a')
    b = ng.constant(np_b, [N, Y]).named('b')
    c = ng.dot(a, b)

    with executor(c) as ex:
        result = ex()

    assert np.array_equal(result, np_c)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_multiply_constant():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.multiply(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_c)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_mlp():
    """TODO."""
    D = ng.make_axis(length=3)
    H = ng.make_axis(length=2)
    N = ng.make_axis(length=1)

    np_x = np.array([[1, 2, 3]], dtype=np.float32)
    np_w = np.array([[1, 1], [1, 1], [1, 1]], dtype=np.float32)
    np_b = np.array([1, 2], dtype=np.float32)
    np_c = np.dot(np_x, np_w) + np_b

    x = ng.constant(np_x, [N, D])
    w = ng.constant(np_w, [D, H])
    b = ng.constant(np_b, [H])
    wx = ng.dot(x, w)
    c = wx + b
    with executor(c) as ex:
        result = ex()
    print(result)
    print(np_c)
    assert np.array_equal(result, np_c)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_idempotent_axes_a():
    """
    Test test axes transformations with autodiff, case a, reference test
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

        w = ng.variable(axes, initial_value=np.ones((3, 1)))
        result = w + w

        result = ng.cast_axes(result, axes)
        cost = ng.sum(result, reduction_axes=axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        cost_comp_val = cost_comp()
        grad_comp_val = grad_comp()
        grad_comp_np = np.ones((3, 1)) * 2.

        assert cost_comp_val == 6.0
        assert np.array_equal(grad_comp_val, grad_comp_np)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_idempotent_axes_b():
    """
    Test test axes transformations with autodiff, case b, with broadcast applied
    to the same tensor
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

        w = ng.variable(axes, initial_value=np.ones((3, 1)))
        l = ng.broadcast(w, axes)
        r = ng.broadcast(w, axes)
        result = ng.add(l, r)

        result = ng.cast_axes(result, axes)
        cost = ng.sum(result, reduction_axes=axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        assert cost_comp() == 6.0
        assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_rolling_window(input_seq, batch_size, seq_len, strides):
    # This test checks if the rolling window works
    # We check if the first two samples in each batch are strided by strides

    # Truncate input sequence such that last section that doesn't fit in a batch
    # is thrown away
    input_seq = input_seq[:seq_len * batch_size * (len(input_seq) // seq_len // batch_size)]
    data_array = {'X': input_seq,
                  'y': np.roll(input_seq, axis=0, shift=-1)}
    time_steps = seq_len
    it_array = SequentialArrayIterator(data_arrays=data_array, time_steps=time_steps,
                                       stride=strides, batch_size=batch_size, tgt_key='y',
                                       shuffle=False)
    for idx, iter_val in enumerate(it_array):
        # Start of the array needs to be time_steps * idx
        assert np.array_equal(iter_val['X'][0, strides:time_steps],
                              iter_val['X'][1, :time_steps - strides])
        assert np.array_equal(iter_val['y'][0, strides:time_steps],
                              iter_val['y'][1, :time_steps - strides])
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_np_layout_shuffle():
    # set up
    bsz = 8
    C, H, W, N = 3, 28, 28, bsz
    C, R, S, K = 3, 5, 5, 32

    # image dim-shuffle
    np_tf_image = np.random.randn(N, H, W, C)
    np_ng_image = np_layout_shuffle(np_tf_image, "NHWC", "CDHWN")
    np_tf_image_reverse = np_layout_shuffle(np_ng_image, "CDHWN", "NHWC")
    assert np.array_equal(np_tf_image, np_tf_image_reverse)

    # filter dim-shuffle
    np_tf_weight = np.random.randn(R, S, C, K)
    np_ng_weight = np_layout_shuffle(np_tf_weight, "RSCK", "CTRSK")
    np_tf_weight_reverse = np_layout_shuffle(np_ng_weight, "CTRSK", "RSCK")
    assert np.array_equal(np_tf_weight, np_tf_weight_reverse)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_max_keepdims():
    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ReduceMax', data), np.max(data, keepdims=True))
    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(0,)),
                          np.max(data, keepdims=True, axis=(0,)))
    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(1,)),
                          np.max(data, keepdims=True, axis=(1,)))
    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(2,)),
                          np.max(data, keepdims=True, axis=(2,)))

    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(0, 1)),
                          np.max(data, keepdims=True, axis=(0, 1)))
    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(0, 2)),
                          np.max(data, keepdims=True, axis=(0, 2)))
    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(1, 2)),
                          np.max(data, keepdims=True, axis=(1, 2)))

    assert np.array_equal(import_and_compute('ReduceMax', data, axes=(0, 1, 2)),
                          np.max(data, keepdims=True, axis=(0, 1, 2)))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_min():
    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ReduceMin', data), np.min(data, keepdims=True))
    assert np.array_equal(import_and_compute('ReduceMin', data, keepdims=0),
                          np.min(data, keepdims=False))

    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(1,)),
                          np.min(data, keepdims=True, axis=(1,)))
    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(1,), keepdims=0),
                          np.min(data, keepdims=False, axis=(1,)))

    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(0, 2)),
                          np.min(data, keepdims=True, axis=(0, 2)))
    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(0, 2), keepdims=0),
                          np.min(data, keepdims=False, axis=(0, 2)))

    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(0, 1, 2)),
                          np.min(data, keepdims=True, axis=(0, 1, 2)))
    assert np.array_equal(import_and_compute('ReduceMin', data, axes=(0, 1, 2), keepdims=0),
                          np.min(data, keepdims=False, axis=(0, 1, 2)))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_mean():
    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ReduceMean', data), np.mean(data, keepdims=True))
    assert np.array_equal(import_and_compute('ReduceMean', data, keepdims=0),
                          np.mean(data, keepdims=False))

    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(1,)),
                          np.mean(data, keepdims=True, axis=(1,)))
    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(1,), keepdims=0),
                          np.mean(data, keepdims=False, axis=(1,)))

    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(0, 2)),
                          np.mean(data, keepdims=True, axis=(0, 2)))
    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(0, 2), keepdims=0),
                          np.mean(data, keepdims=False, axis=(0, 2)))

    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(0, 1, 2)),
                          np.mean(data, keepdims=True, axis=(0, 1, 2)))
    assert np.array_equal(import_and_compute('ReduceMean', data, axes=(0, 1, 2), keepdims=0),
                          np.mean(data, keepdims=False, axis=(0, 1, 2)))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_sum():
    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ReduceSum', data), np.sum(data, keepdims=True))
    assert np.array_equal(import_and_compute('ReduceSum', data, keepdims=0),
                          np.sum(data, keepdims=False))

    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(1,)),
                          np.sum(data, keepdims=True, axis=(1,)))
    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(1,), keepdims=0),
                          np.sum(data, keepdims=False, axis=(1,)))

    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(0, 2)),
                          np.sum(data, keepdims=True, axis=(0, 2)))
    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(0, 2), keepdims=0),
                          np.sum(data, keepdims=False, axis=(0, 2)))

    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(0, 1, 2)),
                          np.sum(data, keepdims=True, axis=(0, 1, 2)))
    assert np.array_equal(import_and_compute('ReduceSum', data, axes=(0, 1, 2), keepdims=0),
                          np.sum(data, keepdims=False, axis=(0, 1, 2)))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_prod():
    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ReduceProd', data), np.prod(data, keepdims=True))
    assert np.array_equal(import_and_compute('ReduceProd', data, keepdims=0),
                          np.prod(data, keepdims=False))

    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(1,)),
                          np.prod(data, keepdims=True, axis=(1,)))
    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(1,), keepdims=0),
                          np.prod(data, keepdims=False, axis=(1,)))

    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(0, 2)),
                          np.prod(data, keepdims=True, axis=(0, 2)))
    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(0, 2), keepdims=0),
                          np.prod(data, keepdims=False, axis=(0, 2)))

    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(0, 1, 2)),
                          np.prod(data, keepdims=True, axis=(0, 1, 2)))
    assert np.array_equal(import_and_compute('ReduceProd', data, axes=(0, 1, 2), keepdims=0),
                          np.prod(data, keepdims=False, axis=(0, 1, 2)))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_argmin():
    def argmin(ndarray, axis, keepdims=False):
        res = np.argmin(ndarray, axis=axis)
        if keepdims:
            res = np.expand_dims(res, axis=axis)
        return res

    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ArgMin', data, axis=0),
                          argmin(data, keepdims=True, axis=0))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=0, keepdims=0),
                          argmin(data, keepdims=False, axis=0))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=1),
                          argmin(data, keepdims=True, axis=1))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=1, keepdims=0),
                          argmin(data, keepdims=False, axis=1))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=2),
                          argmin(data, keepdims=True, axis=2))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=2, keepdims=0),
                          argmin(data, keepdims=False, axis=2))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_argmax():
    def argmax(ndarray, axis, keepdims=False):
        res = np.argmax(ndarray, axis=axis)
        if keepdims:
            res = np.expand_dims(res, axis=axis)
        return res

    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ArgMax', data, axis=0),
                          argmax(data, keepdims=True, axis=0))
    assert np.array_equal(import_and_compute('ArgMax', data, axis=0, keepdims=0),
                          argmax(data, keepdims=False, axis=0))
    assert np.array_equal(import_and_compute('ArgMax', data, axis=1),
                          argmax(data, keepdims=True, axis=1))
    assert np.array_equal(import_and_compute('ArgMax', data, axis=1, keepdims=0),
                          argmax(data, keepdims=False, axis=1))
    assert np.array_equal(import_and_compute('ArgMax', data, axis=2),
                          argmax(data, keepdims=True, axis=2))
    assert np.array_equal(import_and_compute('ArgMax', data, axis=2, keepdims=0),
                          argmax(data, keepdims=False, axis=2))
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def execute_calculation(operands, first_operand, const_executor):
    iterations = len(operands) != 1
    for i in operands:
        _operands, expected_result, description = unpack_list(*i)
        if description:
            print("Description: ", description)
        print("Operands: ", _operands)
        print("Expected result: ", expected_result)
        flex_result = const_executor(*_operands)
        try:
            print("flex_result: {0:.30}".format(float(flex_result)))
        except TypeError:
            # exception for arrays
            np.set_printoptions(precision=30)
            print("flex_result: {}".format(flex_result))
        print("difference: ", flex_result - expected_result)
        if iterations:
            assert_allclose(flex_result, expected_result)
        elif not isinstance(first_operand, np.ndarray):
            assert flex_result == expected_result
        else:
            assert np.array_equal(flex_result, expected_result)
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def _kshape(x, k, initial_clustering=None):
    """
    >>> from numpy.random import seed; seed(0)
    >>> _kshape(np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 2)
    (array([0, 0, 1, 0]), array([[-1.2244258 , -0.35015476,  0.52411628,  1.05046429],
           [-0.8660254 ,  0.8660254 , -0.8660254 ,  0.8660254 ]]))
    """
    m = x.shape[0]

    if initial_clustering is not None:
        assert len(initial_clustering) == m, "Initial assigment does not match column length"
        idx = initial_clustering
    else:
        idx = randint(0, k, size=m)

    print(idx)

    centroids = np.zeros((k,x.shape[1]))
    distances = np.empty((m, k))

    for _ in range(100):
        old_idx = idx
        for j in range(k):
            centroids[j] = _extract_shape(idx, x, j, centroids[j])

        for i in range(m):
             for j in range(k):
                 distances[i,j] = 1 - max(_ncc_c(x[i], centroids[j]))
        idx = distances.argmin(1)
        if np.array_equal(old_idx, idx):
            break

    print(idx)

    return idx, centroids
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def _test_FileSourceType2000(self, format, subsize):
        filename = self._tempfileName('source_2000_%s' % format)

        complexData = format.startswith('C')
        typecode = format[1]
        dataFormat, dataType = self.TYPEMAP[typecode]

        frames = 4
        indata = [self._generateSourceData(format, subsize) for x in xrange(frames)]
        hdr = bluefile.header(2000, format, subsize=subsize)
        bluefile.write(filename, hdr, indata)

        source = sb.FileSource(filename, midasFile=True, dataFormat=dataFormat)
        sink = sb.DataSink()
        source.connect(sink)
        sb.start()
        outdata = sink.getData(eos_block=True)
        if complexData:
            if format == 'CF':
                outdata = numpy.array(outdata, dtype=numpy.float32).view(numpy.complex64)
                outdata = numpy.reshape(outdata, (-1, subsize))
            elif format == 'CD':
                outdata = numpy.array(outdata, dtype=numpy.float64).view(numpy.complex128)
                outdata = numpy.reshape(outdata, (-1, subsize))
            else:
                outdata = numpy.reshape(outdata, (-1, subsize, 2))
            self.assertEqual(sink.sri().mode, 1)
        else:
            self.assertEqual(sink.sri().mode, 0)

        self.assertTrue(numpy.array_equal(indata, outdata), msg="Format '%s' %s != %s" % (format, indata, outdata))
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def merge_h5(in_filenames, out_filename):
    """ Merge a list of h5 files """
    out_h5 = h5.File(out_filename, 'a')
    for filename in in_filenames:
        if filename is None:
            continue
        in_h5 = h5.File(filename, 'r')
        for name in in_h5.keys():
            # If the dataset already exists,
            # They must be equal or one must be all-zero.
            if name in out_h5.keys():
                src_data, dest_data = in_h5[name][()], out_h5[name][()]
                if src_data.dtype.kind != 'S' and dest_data.dtype.kind != 'S':
                    # Both numeric
                    if not np.any(src_data):
                        # Source is all zero. Do nothing.
                        continue
                    elif not np.any(dest_data):
                        # Dest is all zero. Overwrite.
                        del out_h5[name]
                        h5.h5o.copy(in_h5.id, name, out_h5.id, name)
                    else:
                        # Both non-zero. Assert equality and do nothing.
                        assert np.array_equal(src_data, dest_data)
                else:
                    # Either are non-numeric. Assert equality and do nothing.
                    assert np.array_equal(src_data, dest_data)
            else:
                # Only exists in src. Copy to dest.
                h5.h5o.copy(in_h5.id, name, out_h5.id, name)

    out_h5.flush()
    out_h5.close()
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def Verify(**kwargs):
    msg, A, m, n, sd, q, eta, z, c, kappa = kwargs['msg'], kwargs['A'], kwargs['m'], kwargs['n'], kwargs['sd'], kwargs['q'], kwargs['eta'], kwargs['z'], kwargs['c'], kwargs['kappa']
    B2 = eta*sd*np.sqrt(m)
    reduced_prod = util.vector_to_Zq(np.matmul(A,z) + q*c, 2*q)
    #print np.sqrt(z.dot(z)),B2
    #print LA.norm(z,np.inf),float(q)/4
    if np.sqrt(z.dot(z)) > B2  or LA.norm(z,np.inf) >= float(q)/4:      
        return False    
    if np.array_equal(c, hash_iterative(np.array_str(reduced_prod)+msg, n, kappa)):
        return True
    return False
项目:alchemy    作者:voidrank    | 项目源码 | 文件源码
def test_main(self):

        # set up
        mask = np.asarray([
            [0, 0, 0, 1, 1],
            [0, 0, 0, 1, 0],
            [0, 1, 1, 1, 1],
            [0, 0, 0, 0, 0]
        ])
        mask2 = np.ones((4, 5))
        masks = np.array([mask])

        # test area
        assert area(encode(mask)) == 7
        assert area(encode(masks)[0]) == 7
        assert np.array_equal(decode(encode(mask)), mask)

        # test iou
        assert isinstance(iou(encode(masks), encode(masks), [0]), np.ndarray)
        assert iou(encode(mask), encode(mask), [0]) == 1
        assert equal(iou(encode(np.array([mask, mask])), encode(mask2), [0]), 7.0/20).all()

        # test toBbox
        assert isinstance(toBbox(masks), np.ndarray)
        assert np.equal(toBbox(encode(mask)), np.array([1, 0, 4, 3])).all()
        assert np.equal(toBbox(encode(mask2)), np.array([0, 0, 5, 4])).all()