Python cv2 模块,kmeans() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用cv2.kmeans()

项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def k(screen):
        Z = screen.reshape((-1,3))

        # convert to np.float32
        Z = np.float32(Z)

        # define criteria, number of clusters(K) and apply kmeans()
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2
        ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

        # Now convert back into uint8, and make original image
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((screen.shape))
        return res2
项目:Machine-Learning    作者:Jegathis    | 项目源码 | 文件源码
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows()
项目:action-recoginze    作者:WeiruZ    | 项目源码 | 文件源码
def k_means(self, a_frame, K=2):
        """
        :param a_frame:
        :param K:
        :return: np.ndarray draw the frame use K color's centers
        """
        i = 0
        Z = a_frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((a_frame.shape))

        return res2
项目:action-recoginze    作者:WeiruZ    | 项目源码 | 文件源码
def cluster(frame_matrix):
    new_frame_matrix = []
    i = 0
    for frame in frame_matrix:
        print "reader {} frame".format(i)
        i += 1
        Z = frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2

        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((frame.shape))

        new_frame_matrix.append(res2)
        cv2.imshow('res2', res2)
        cv2.waitKey(1)
    cv2.destroyAllWindows()
项目:object-classification    作者:HenrYxZ    | 项目源码 | 文件源码
def gen_codebook(dataset, descriptors, k = 64):
    """
    Generate a k codebook for the dataset.

    Args:
        dataset (Dataset object): An object that stores information about the dataset.
        descriptors (list of integer arrays): The descriptors for every class.
        k (integer): The number of clusters that are going to be calculated.

    Returns:
        list of integer arrays: The k codewords for the dataset.
    """
    iterations = 10
    epsilon = 1.0
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, iterations, epsilon)
    compactness, labels, centers = cv2.kmeans(descriptors, k , criteria, iterations, cv2.KMEANS_RANDOM_CENTERS)
    return centers
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def find_label_clusters(kitti_base, kittiLabels, shape, num_clusters, descriptors=None):
    if descriptors is None:
        progressbar = ProgressBar('Computing descriptors', max=len(kittiLabels))
        descriptors = []
        for label in kittiLabels:
            progressbar.next()
            img = getCroppedSampleFromLabel(kitti_base, label)
            # img = cv2.resize(img, (shape[1], shape[0]), interpolation=cv2.INTER_AREA)
            img = resizeSample(img, shape, label)
            hist = get_hog(img)
            descriptors.append(hist)
        progressbar.finish()
    else:
        print 'find_label_clusters,', 'Using supplied descriptors.'
        print len(kittiLabels), len(descriptors)
        assert(len(kittiLabels) == len(descriptors))

    # X = np.random.randint(25,50,(25,2))
    # Y = np.random.randint(60,85,(25,2))
    # Z = np.vstack((X,Y))

    # convert to np.float32
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(kittiLabels, label)
    return clusters
    # # Plot the data
    # from matplotlib import pyplot as plt
    # plt.scatter(A[:,0],A[:,1])
    # plt.scatter(B[:,0],B[:,1],c = 'r')
    # plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
    # plt.xlabel('Height'),plt.ylabel('Weight')
    # plt.show()
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def find_sample_clusters(pos_reg_generator, window_dims, hog, num_clusters):
    regions = list(pos_reg_generator)
    descriptors = trainhog.compute_hog_descriptors(hog, regions, window_dims, 1)

    # convert to np.float32
    descriptors = [rd.descriptor for rd in descriptors]
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(regions, label)
    return clusters
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def createTrainingInstances(self, images):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def createTrainingInstances(self, images):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def local_bow_train(image):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers