Python numpy 模块,short() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用numpy.short()

项目:ASRT_SpeechRecognition    作者:nl8590687    | 项目源码 | 文件源码
def read_wav_data(filename):
    '''
    ????wav????????????????????
    '''
    wav = wave.open(filename,"rb") # ????wav????????
    num_frame = wav.getnframes() # ????
    num_channel=wav.getnchannels() # ?????
    framerate=wav.getframerate() # ?????
    num_sample_width=wav.getsampwidth() # ??????????????????
    str_data = wav.readframes(num_frame) # ??????
    wav.close() # ???
    wave_data = np.fromstring(str_data, dtype = np.short) # ????????????????
    wave_data.shape = -1, num_channel # ?????????????????????????????????
    wave_data = wave_data.T # ?????
    #wave_data = wave_data 
    return wave_data, framerate
项目:bird-species-classification    作者:johnmartinsson    | 项目源码 | 文件源码
def read_gzip_wave_file(filename):
    if (not os.path.isfile(filename)):
        raise ValueError("File does not exist")

    with gzip.open(filename, 'rb') as wav_file:
        with wave.open(wav_file, 'rb') as s:
            if (s.getnchannels() != 1):
                raise ValueError("Wave file should be mono")
            #if (s.getframerate() != 22050):
                #raise ValueError("Sampling rate of wave file should be 16000")

            strsig = s.readframes(s.getnframes())
            x = np.fromstring(strsig, np.short)
            fs = s.getframerate()
            s.close()

            return fs, x
项目:bird-species-classification    作者:johnmartinsson    | 项目源码 | 文件源码
def read_wave_file(filename):
    """ Read a wave file from disk
    # Arguments
        filename : the name of the wave file
    # Returns
        (fs, x)  : (sampling frequency, signal)
    """
    if (not os.path.isfile(filename)):
        raise ValueError("File does not exist")

    s = wave.open(filename, 'rb')

    if (s.getnchannels() != 1):
        raise ValueError("Wave file should be mono")
    # if (s.getframerate() != 22050):
        # raise ValueError("Sampling rate of wave file should be 16000")

    strsig = s.readframes(s.getnframes())
    x = np.fromstring(strsig, np.short)
    fs = s.getframerate()
    s.close()

    x = x/32768.0

    return fs, x
项目:bird-species-classification    作者:johnmartinsson    | 项目源码 | 文件源码
def read_wave_file_not_normalized(filename):
    """ Read a wave file from disk
    # Arguments
        filename : the name of the wave file
    # Returns
        (fs, x)  : (sampling frequency, signal)
    """
    if (not os.path.isfile(filename)):
        raise ValueError("File does not exist")

    s = wave.open(filename, 'rb')

    if (s.getnchannels() != 1):
        raise ValueError("Wave file should be mono")
    # if (s.getframerate() != 22050):
        # raise ValueError("Sampling rate of wave file should be 16000")

    strsig = s.readframes(s.getnframes())
    x = np.fromstring(strsig, np.short)
    fs = s.getframerate()
    s.close()

    return fs, x
项目:pcbre    作者:pcbre    | 项目源码 | 文件源码
def initializeGL(self):
        self.sdf_shader = self.gls.shader_cache.get("image_vert", "tex_frag")

        self.buffer_dtype = numpy.dtype([
            ("vertex", numpy.float32, 2),
            ("texpos", numpy.float32, 2)
        ])

        self.b1 = vbobind(self.sdf_shader, self.buffer_dtype, "vertex")
        self.b2 = vbobind(self.sdf_shader, self.buffer_dtype, "texpos")


        self.tex = Texture()


        # TODO: implement short-int caching
        # build a De-bruijn sequence (shorted substring containing all substrings)
        # self.int_seq = de_bruijn(4)
        # self.int_seq += self.int_seq[:3]
项目:Poccala    作者:Byshx    | 项目源码 | 文件源码
def init_audio(self, wav=None, path=None, show_pic=False):
            """
            ??????
            :param wav: ????????wave??
            :param path: ??????
            :param show_pic: ?????
            :return: None
            """
            if wav is None:
                if path is None:
                    print('Error: ?????????')
                self.__wav = wave.open(path, 'rb')
            else:
                self.__wav = wav
            nframes = self.__wav.getnframes()
            '''byte??????????'''
            str_data = self.__wav.readframes(nframes)
            '''????????'''
            self.__wdata = np.fromstring(str_data, dtype=np.short)
            '''???????'''
            if self.__wav.getnchannels() == 2:
                '''??????2????????'''
                self.__wdata.shape = -1, 2
                self.__wdata = self.__wdata.T
                '''???????????????wdata[0]?'''
                for _ in range(len(self.__wdata[0])):
                    if self.__wdata[0][_] < self.__wdata[1][_]:
                        self.__wdata[0][_] = self.__wdata[1][_]
                self.__wdata = self.__wdata[0]
            self.__wdata = np.delete(self.__wdata, np.where(self.__wdata == 0))

            if show_pic:
                x = [_ for _ in range(len(self.__wdata))]
                pylab.plot(x, self.__wdata, 'b')
                pylab.show()
项目:jdtext_classify    作者:zhongnanxiaoqin    | 项目源码 | 文件源码
def recode(self):
        pa=PyAudio()
        stream=pa.open(format=paInt16,channels=1,rate=self.SAMPLES_RATE,input=True,frames_per_buffer=self.NUM_SAMPLES)
        save_count=0
        save_buffer=[]
        time_count=self.TIME_COUNT
        print '\n\n\n??????????????'
        while True:
            time_count-=1
            string_audio_data=stream.read(self.NUM_SAMPLES)
            audio_data=numpy.fromstring(string_audio_data,dtype=numpy.short)
            large_sample_count=numpy.sum(audio_data>self.LEVEL)
            print(numpy.max(audio_data))
            if large_sample_count>self.COUNT_NUM:
                save_count=self.SAVE_LENGTH
            else:
                save_count-=1
            if save_count<0:
                save_count=0
            if save_count>0:
                save_buffer.append(string_audio_data)
            else:
                if len(save_buffer):
                    self.Voice_String=save_buffer
                    save_buffer=[]
                    print '????????'
                    return True
            if not time_count:
                if len(save_buffer):
                    self.Voice_String=save_buffer
                    save_buffer=[]
                    print '????????'
                    return True
                else:
                    return False
项目:BigBrotherBot-For-UrT43    作者:ptitbigorneau    | 项目源码 | 文件源码
def amean (inarray,dimension=None,keepdims=0):
    """
    Calculates the arithmatic mean of the values in the passed array.
    That is:  1/n * (x1 + x2 + ... + xn).  Defaults to ALL values in the
    passed array.  Use dimension=None to flatten array first.  REMEMBER: if
    dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
    if dimension is a sequence, it collapses over all specified dimensions.  If
    keepdims is set to 1, the resulting array will have as many dimensions as
    inarray, with only 1 'level' per dim that was collapsed over.

    Usage:   amean(inarray,dimension=None,keepdims=0)
    Returns: arithematic mean calculated over dim(s) in dimension
    """
    if inarray.dtype in [N.int_, N.short,N.ubyte]:
        inarray = inarray.astype(N.float_)
    if dimension == None:
        inarray = N.ravel(inarray)
        sum = N.add.reduce(inarray)
        denom = float(len(inarray))
    elif type(dimension) in [IntType,FloatType]:
        sum = asum(inarray,dimension)
        denom = float(inarray.shape[dimension])
        if keepdims == 1:
            shp = list(inarray.shape)
            shp[dimension] = 1
            sum = N.reshape(sum,shp)
    else: # must be a TUPLE of dims to average over
        dims = list(dimension)
        dims.sort()
        dims.reverse()
        sum = inarray *1.0
        for dim in dims:
            sum = N.add.reduce(sum,dim)
        denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
        if keepdims == 1:
            shp = list(inarray.shape)
            for dim in dims:
                shp[dim] = 1
            sum = N.reshape(sum,shp)
    return sum/denom
项目:BigBrotherBot-For-UrT43    作者:ptitbigorneau    | 项目源码 | 文件源码
def atmean(a,limits=None,inclusive=(1,1)):
    """
   Returns the arithmetic mean of all values in an array, ignoring values
   strictly outside the sequence passed to 'limits'.   Note: either limit
   in the sequence, or the value of limits itself, can be set to None.  The
   inclusive list/tuple determines whether the lower and upper limiting bounds
   (respectively) are open/exclusive (0) or closed/inclusive (1).

   Usage:   atmean(a,limits=None,inclusive=(1,1))
   """
    if a.dtype in [N.int_, N.short,N.ubyte]:
        a = a.astype(N.float_)
    if limits == None:
        return mean(a)
    assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atmean"
    if inclusive[0]:         lowerfcn = N.greater_equal
    else:               lowerfcn = N.greater
    if inclusive[1]:         upperfcn = N.less_equal
    else:               upperfcn = N.less
    if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
        raise ValueError, "No array values within given limits (atmean)."
    elif limits[0]==None and limits[1]<>None:
        mask = upperfcn(a,limits[1])
    elif limits[0]<>None and limits[1]==None:
        mask = lowerfcn(a,limits[0])
    elif limits[0]<>None and limits[1]<>None:
        mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
    s = float(N.add.reduce(N.ravel(a*mask)))
    n = float(N.add.reduce(N.ravel(mask)))
    return s/n
项目:BigBrotherBot-For-UrT43    作者:ptitbigorneau    | 项目源码 | 文件源码
def asum (a, dimension=None,keepdims=0):
    """
   An alternative to the Numeric.add.reduce function, which allows one to
   (1) collapse over multiple dimensions at once, and/or (2) to retain
   all dimensions in the original array (squashing one down to size.
   Dimension can equal None (ravel array first), an integer (the
   dimension over which to operate), or a sequence (operate over multiple
   dimensions).  If keepdims=1, the resulting array will have as many
   dimensions as the input array.

   Usage:   asum(a, dimension=None, keepdims=0)
   Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
   """
    if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
        a = a.astype(N.float_)
    if dimension == None:
        s = N.sum(N.ravel(a))
    elif type(dimension) in [IntType,FloatType]:
        s = N.add.reduce(a, dimension)
        if keepdims == 1:
            shp = list(a.shape)
            shp[dimension] = 1
            s = N.reshape(s,shp)
    else: # must be a SEQUENCE of dims to sum over
        dims = list(dimension)
        dims.sort()
        dims.reverse()
        s = a *1.0
        for dim in dims:
            s = N.add.reduce(s,dim)
        if keepdims == 1:
            shp = list(a.shape)
            for dim in dims:
                shp[dim] = 1
            s = N.reshape(s,shp)
    return s
项目:dsrc    作者:sverrets    | 项目源码 | 文件源码
def __init__(self, v_min, v_max, phase, pulse_shaper_interpolation):
        gr.interp_block.__init__(self,
            name="pulse_shaper_bs",
            in_sig=[numpy.int8],
            out_sig=[numpy.short], interp=pulse_shaper_interpolation)
    self.min = v_min
    self.max = v_max
    self.phase = phase
    self.ps_interpolation = pulse_shaper_interpolation
项目:BeltaGo    作者:54BayMax    | 项目源码 | 文件源码
def my_record(self):
        while self.start_flag == 1:
            pa = PyAudio()
            stream = pa.open(format=paInt16, channels=1, rate=self.framerate, input=True,
                             frames_per_buffer=self.NUM_SAMPLES)
            save_buffer = []
            count = 0
            while count < self.TIME * 20:
                string_audio_data = stream.read(self.NUM_SAMPLES)
                audio_data = np.fromstring(string_audio_data, dtype=np.short)
                large_sample_count = np.sum(audio_data > self.LEVEL)
                print large_sample_count
                if large_sample_count < self.mute_count_limit:
                    self.mute_begin = 1
                else:
                    save_buffer.append(string_audio_data)
                    self.mute_begin = 0
                    self.mute_end = 1
                count += 1
                if (self.mute_end - self.mute_begin) > 9:
                    self.mute_begin = 0
                    self.mute_end = 1
                    break
                if self.mute_begin:
                    self.mute_end += 1
                print '.'
            save_buffer = save_buffer[:]
            # my_buf.append(string_audio_data)
            # count+=1
            # print '.'
            if save_buffer:
                if self.file_name_index < 11:
                    pass
                else:
                    self.file_name_index = 1
                filename = str(self.file_name_index) + '.wav'
                self.save_wave_file(filename=filename, data=save_buffer)
                self.writeQ(queue=self.wav_queue, data=filename)
                self.file_name_index += 1
                print filename, 'saved'
            else:
                print 'file not saved!'
            # self.save_wave_file('01.wav',my_buf)
            save_buffer = []
            stream.close()
项目:Opencv_learning    作者:wjb711    | 项目源码 | 文件源码
def recoder(self):
        pa = PyAudio() 
        stream = pa.open(format=paInt16, channels=1, rate=self.SAMPLING_RATE, input=True, 
            frames_per_buffer=self.NUM_SAMPLES) 
        save_count = 0 
        save_buffer = [] 
        time_count = self.TIME_COUNT

        while True:
            time_count -= 1
            # print time_count
            # ??NUM_SAMPLES???
            string_audio_data = stream.read(self.NUM_SAMPLES) 
            # ???????????
            audio_data = np.fromstring(string_audio_data, dtype=np.short)
            # ????LEVEL??????
            large_sample_count = np.sum( audio_data > self.LEVEL )
            print(np.max(audio_data))
            # ??????COUNT_NUM??????SAVE_LENGTH??
            if large_sample_count > self.COUNT_NUM:
                save_count = self.SAVE_LENGTH 
            else: 
                save_count -= 1

            if save_count < 0:
                save_count = 0 

            if save_count > 0 : 
            # ??????????save_buffer?
                #print  save_count > 0 and time_count >0
                save_buffer.append( string_audio_data ) 
            else: 
            #print save_buffer
            # ?save_buffer??????WAV???WAV????????????
                #print "debug"
                if len(save_buffer) > 0 : 
                    self.Voice_String = save_buffer
                    save_buffer = [] 
                    print("Recode a piece of  voice successfully!")
                    return True
            if time_count==0: 
                if len(save_buffer)>0:
                    self.Voice_String = save_buffer
                    save_buffer = [] 
                    print("Recode a piece of  voice successfully!")
                    return True
                else:
                    return False
项目:speech_rec_py    作者:YichiHuang    | 项目源码 | 文件源码
def record_wave(self,temp):
        while self.start_flag==1:
            pa=PyAudio()
            stream=pa.open(format=paInt16,channels=1,
                       rate=framerate,input=True,
                       frames_per_buffer=self.NUM_SAMPLES)
            my_buf=[]
            count=0
            print "* start recoding *"
            while count<self.TIME*20:
                string_audio_data=stream.read(self.NUM_SAMPLES)
                audio_data=np.fromstring(string_audio_data,dtype=np.short)
                large_sample_count=np.sum(audio_data>self.LEVEL)
                print large_sample_count
                if large_sample_count<self.mute_count_limit:
                    self.mute_begin=1
                else:
                    my_buf.append(string_audio_data)
                    self.mute_begin=0
                    self.mute_end=1
                count+=1
                if(self.mute_end-self.mute_begin)>9:
                    self.mute_begin=0
                    self.mute_end=1
                    break
                if self.mute_begin:
                    self.mute_end+=1
                print '.'

            my_buf=my_buf[:]
            if my_buf:
                if self.file_name_index<11:
                    pass
                else:
                    self.file_name_index=1
                filename=str(self.file_name_index)+'.wav'
                self.save_wave_file(filename=filename,data=my_buf)
                self.writeQ(queue=self.wav_queue,data=filename)
                self.file_name_index+=1
                print filename,"saved"
            else:
                print '* Error: file not saved! *'
            #self.save_wave_file(filename, my_buf)
            my_buf=[]
            stream.close()
项目:pyMABED    作者:AdrienGuille    | 项目源码 | 文件源码
def discretize(self, time_slice_length):
        self.time_slice_length = time_slice_length

        # clean the data directory
        if os.path.exists('corpus'):
            shutil.rmtree('corpus')
        os.makedirs('corpus')

        # compute the total number of time-slices
        time_delta = (self.end_date - self.start_date)
        time_delta = time_delta.total_seconds()/60
        self.time_slice_count = int(time_delta // self.time_slice_length) + 1
        self.tweet_count = np.zeros(self.time_slice_count)
        print('   Number of time-slices: %d' % self.time_slice_count)

        # create empty files
        for time_slice in range(self.time_slice_count):
            dummy_file = open('corpus/' + str(time_slice), 'w')
            dummy_file.write('')

        # compute word frequency
        self.global_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        self.mention_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        with open(self.source_file_path, 'r') as input_file:
            csv_reader = csv.reader(input_file, delimiter=self.separator)
            header = next(csv_reader)
            text_column_index = header.index('text')
            date_column_index = header.index('date')
            for line in csv_reader:
                tweet_date = datetime.strptime(line[date_column_index], "%Y-%m-%d %H:%M:%S")
                time_delta = (tweet_date - self.start_date)
                time_delta = time_delta.total_seconds() / 60
                time_slice = int(time_delta / self.time_slice_length)
                self.tweet_count[time_slice] += 1
                # tokenize the tweet and update word frequency
                tweet_text = line[text_column_index]
                words = self.tokenize(tweet_text)
                mention = '@' in tweet_text
                for word in set(words):
                    word_id = self.vocabulary.get(word)
                    if word_id is not None:
                        self.global_freq[word_id, time_slice] += 1
                        if mention:
                            self.mention_freq[word_id, time_slice] += 1
                with open('corpus/' + str(time_slice), 'a') as time_slice_file:
                    time_slice_file.write(tweet_text+'\n')
        self.global_freq = self.global_freq.tocsr()
        self.mention_freq = self.mention_freq.tocsr()
项目:assistant    作者:yjoe0    | 项目源码 | 文件源码
def startAccord(self, Times=100):
    # ??????
    pa = PyAudio() 
    if not self.SAMPLING_RATE:
        setAudio(self)
    stream = pa.open(format=paInt16, channels=1, rate=self.SAMPLING_RATE, input=True, 
                    frames_per_buffer=self.NUM_SAMPLES) 

    save_count = 0 
    save_buffer = [] 

    while True: 
        #????
        if Times != 100:
            Times -=1
        # ??NUM_SAMPLES???
        string_audio_data = stream.read(self.NUM_SAMPLES) 
        # ???????????
        audio_data = np.fromstring(string_audio_data, dtype=np.short) 
        # ????LEVEL??????
        large_sample_count = np.sum( audio_data > self.LEVEL ) 
        if self.debug:
            print np.max(audio_data) 
        # ??????COUNT_NUM??????SAVE_LENGTH??
        if large_sample_count > self.COUNT_NUM: 
            save_count = self.SAVE_LENGTH 
        else: 
            save_count -= 1 

        if save_count < 0: 
            save_count = 0 

        if save_count > 0: 
            # ??????????save_buffer?
            save_buffer.append( string_audio_data ) 
        else: 
            # ?save_buffer??????WAV???WAV????????????
            if len(save_buffer) > 0: 
                filename = "tmp.wav" 
                cacheFile = save_wave_file(self, filename, save_buffer) 
                save_buffer = [] 
                stream.close()
                if self.debug:
                    print "saved audio stream" 
                return cacheFile
                break
        if Times < 0:
            stream.close()
            return False
            break
项目:baidu_speech    作者:DinnerHowe    | 项目源码 | 文件源码
def recode(self):
  pa = PyAudio() 
  stream = pa.open(format=paInt16, channels=self.nchannel, rate=self.SAMPLING_RATE, input=True, frames_per_buffer=self.NUM_SAMPLES) 
  save_count = 0 
  save_buffer = [] 
  time_out = self.TIME_OUT
  NO_WORDS=self.NO_WORDS

  while True and NO_WORDS:
   time_out -= 1
   print 'time_out in', time_out # ??NUM_SAMPLES???
   string_audio_data = stream.read(self.NUM_SAMPLES) # ???????????
   audio_data = np.fromstring(string_audio_data, dtype=np.short) 

   # ??????????
   NO_WORDS -= 1
   if np.max(audio_data) > self.UPPER_LEVEL:
    NO_WORDS=self.NO_WORDS
   print 'self.NO_WORDS ', NO_WORDS
   print 'np.max(audio_data) ', np.max(audio_data)

   # ????LOWER_LEVEL??????
   large_sample_count = np.sum( audio_data > self.LOWER_LEVEL )

   # ??????COUNT_NUM??????SAVE_LENGTH??
   if large_sample_count > self.COUNT_NUM:
    save_count = self.SAVE_LENGTH 
   else: 
    save_count -= 1
   #print 'save_count',save_count

   # ??????????save_buffer?
   if save_count < 0:
    save_count = 0 
   elif save_count > 0 : 
    save_buffer.append( string_audio_data ) 
   else:
    pass

   # ?save_buffer??????WAV???WAV????????????
   if len(save_buffer) > 0 and NO_WORDS==0: 
    self.Voice_String = save_buffer
    save_buffer = [] 
    rospy.loginfo( "Recode a piece of voice successfully!")
    #return self.Voice_String

   elif len(save_buffer) > 0 and time_out==0: 
    self.Voice_String = save_buffer
    save_buffer = [] 
    rospy.loginfo( "Recode a piece of voice successfully!")
    #return self.Voice_String
   else: 
    pass
   #rospy.loginfo( '\n\n')