Python pyaudio 模块,paFloat32() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用pyaudio.paFloat32()

项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def play(self, signal, range = (), sr = 44100, stop_func = None):
        self.current_stream_id+=1
        stream = PyAudioDriver.PlaybackStream(
            signal,range,
            self.wrap_stop_func(self.current_stream_id, stop_func)
        )

        pa_stream = self.pyaudio.open(
            format = pa.paFloat32,
            channels=signal.shape[1],
            rate=sr,
            output=True,
            stream_callback=stream.callback)

        self.streams[self.current_stream_id] = pa_stream
        return self.current_stream_id
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def record(self, max_seconds, num_channels, sr = 44100, stop_func = None):
        self.current_stream_id+=1
        self.recordings[self.current_stream_id] = np.ndarray((0,num_channels))

        stream = PyAudioDriver.RecordStream(
            self.recordings[self.current_stream_id],
            max_seconds, num_channels, sr,
            self.wrap_stop_func(self.current_stream_id, stop_func)
        )

        pa_stream = self.pyaudio.open(
            format = pa.paFloat32,
            channels=num_channels,
            rate=sr,
            input=True,
            stream_callback=stream.callback)

        self.streams[self.current_stream_id] = pa_stream
        return self.current_stream_id
项目:APEX    作者:ymollard    | 项目源码 | 文件源码
def __init__(self):
        self.rospack = RosPack()
        with open(join(self.rospack.get_path('apex_playground'), 'config', 'environment.json')) as f:
            self.params = json.load(f)

        with open(join(self.rospack.get_path('apex_playground'), 'config', 'bounds.json')) as f:
            self.bounds = json.load(f)["sensory"]["sound"][0]

        self.p = pyaudio.PyAudio()
        self.fs = 44100       # sampling rate, Hz, must be integer
        self.duration = 1./self.params['rate']

        # for paFloat32 sample values must be in range [-1.0, 1.0]
        self.stream = self.p.open(format=pyaudio.paFloat32,
                                  channels=1,
                                  rate=self.fs,
                                  output=True)
项目:zignal    作者:ronnyandersson    | 项目源码 | 文件源码
def _data_format(self, x):
        """The data types in numpy needs to be mapped to the equivalent type in
        portaudio. This is an issue for 24 bit audio files since there isn't a
        24 bit data type in numpy. This is currently not implemented. There are
        some options on how to do this. We could for example use a 32 bit int and
        store the 24 bits either so that bits 1 to 8 is set to zeroes or so that
        bits 25 to 32 is set to zeros.
        """
        retval = None

        if x.samples.dtype == np.dtype(np.float32):
            self._logger.debug("pyaudio.paFloat32")
            retval = pyaudio.paFloat32
        elif x.samples.dtype == np.dtype(np.int16):
            self._logger.debug("pyaudio.paInt16")
            retval = pyaudio.paInt16
        elif x.samples.dtype == np.dtype(np.int32):
            self._logger.debug("pyaudio.paInt32")
            retval = pyaudio.paInt32
        else:
            raise NotImplementedError("Data type not understood: %s" %x.samples.dtype)

        return retval
项目:challenges    作者:py-study-group    | 项目源码 | 文件源码
def play(self, channels=1):
        '''Plays notes'''
        for note in self.notes:
            stream = self.audio_adapter.open(
                format=pyaudio.paFloat32,
                channels=channels,
                rate=note.rate,
                output=True
            )
            stream.write(self.volume*note.sample)
项目:pyBinSim    作者:pyBinSim    | 项目源码 | 文件源码
def stream_start(self):
        self.log.info("BinSim: stream_start")
        self.stream = self.p.open(format=pyaudio.paFloat32, channels=2,
                                  rate=self.sampleRate, output=True,
                                  frames_per_buffer=self.blockSize,
                                  stream_callback=audio_callback(self))
        self.stream.start_stream()

        while self.stream.is_active():
            time.sleep(1)
项目:sim_game    作者:olehermanse    | 项目源码 | 文件源码
def play_notes(self, notes=None):
        if not notes:
            notes = self.notes
        player = pyaudio.PyAudio()
        lib = sound_lib.SoundLib()
        fs = 44100  # sampling rate, Hz, must be integer
        stream = player.open(format=pyaudio.paFloat32,
                             channels=1,
                             rate=fs,
                             output=True)
        old = 0
        final = np.array([], dtype=np.float32)
        for note in notes:
            time.sleep(note[4]-old)
            duration = note[5]  # in seconds, may be float
            f = lib.midi_to_freq(note[3])  # sine frequency, Hz, may be float
            samples = (np.sin(2 * np.pi * np.arange(fs * duration) * f / fs)).astype(np.float32)
            n = len(samples)
            level = (note[6])/100
            volume = [level]*n # range [0.0, 1.0]
            volume[0] = 0
            for index in range(1,n):
                volume[index] = (-(2*((index/n)-0.5))**6 + 1)*volume[index]
                samples[index] = samples[index]*volume[index]
            old = note[4] + duration
            final = np.append(final,samples)
        stream.write(final,num_frames=len(final))
        stream.stop_stream()
        stream.close()
        player.terminate()


    #Play notes in a new thread instead of in the main thread.
项目:speechT    作者:timediv    | 项目源码 | 文件源码
def __init__(self, rate=16000, threshold=0.03, chunk_size=1024):
    self.rate = rate
    self.threshold = threshold
    self.chunk_size = chunk_size
    self.format = pyaudio.paFloat32
    self._pyaudio = pyaudio.PyAudio()
项目:PyPeVoc    作者:goiosunsw    | 项目源码 | 文件源码
def PlaySound(w, sr=44100):
    import pyaudio

    p = pyaudio.PyAudio()
    stream = p.open(format=pyaudio.paFloat32,
                    channels=1, rate=sr, output=1)

    stream.write(w.astype(np.float32).tostring())

    stream.close()
    p.terminate()
项目:PyMorsetrainer    作者:yayachiken    | 项目源码 | 文件源码
def run(self):
        samples = self.morse.morse_tone(self.wpm, self.effective_wpm, self.frequency)
        stream = self.p.open(format=pyaudio.paFloat32,
                    channels=1, rate=SAMPLE_RATE, output=1)
        chunknum = len(samples) / 1024
        samples = numpy.array_split(samples, chunknum)
        for s in samples:
            if self.shutdownFlag == True:
                stream.close()
                break
            stream.write(s.tostring())
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def record(self):
        """Records from the microphone and returns the data as an array of signed shorts."""

        print("Wait in silence to begin recording; wait in silence to terminate")

        import pyaudio

        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paFloat32, channels=1, rate=self.sample_rate, input=True, output=True,
                        frames_per_buffer=self.chunk_size)

        silent_chunk_count = 0
        has_recording_started = False
        is_first_chunk = False
        chunks = []

        while True:
            chunk_as_array = array.array('f', stream.read(self.chunk_size))

            # drop first, as it is often loud noise
            if not is_first_chunk:
                is_first_chunk = True
                continue

            if byteorder == 'big':
                chunk_as_array.byteswap()

            chunk = numpy.array(chunk_as_array)

            chunks.append(chunk)

            silent = self._is_silent(chunk)
            print("Silent: " + str(silent))

            if has_recording_started:
                if silent:
                    silent_chunk_count += 1
                    if silent_chunk_count * self.chunk_size > self.silence_until_terminate_in_s * self.sample_rate:
                        break
                else:
                    silent_chunk_count = 0
            elif not silent:
                has_recording_started = True

        stream.stop_stream()
        stream.close()
        print("Stopped recording.")

        p.terminate()

        return self._normalize(self._trim_silence(concatenate(chunks)))
项目:jsrlive-cli    作者:pqlime    | 项目源码 | 文件源码
def play_song(name, url):
    """
    Function that plays a song in a new thread

    Args:
        name (str): Name to display
        url (str): URL to fetch the mp3 from
    """
    global current_song
    global playback_progress

    current_song = 'Loading...'  # Set the song name to 'Loading...' to notify the user
    playback_progress = 0

    wav = download_mp3_to_wav(url)  # Download the mp3 file as a wav from jetsetradio.live
    if not wav:  # If there's no wav returned, don't play it
        return

    current_song = name  # Set the song name to the new song

    pa = pyaudio.PyAudio()  # Main class of pyAudio; contains the open() function we need for an audio stream

    # Opens an audio stream on the default output device.
    # Explained: We're using 1/2th the framerate because we're going from Int16 to Float32; this change
    # requires us to get twice the amount of data, hence leaving us with twice the amount of bytes.
    # We convert from Int16 to Float32 to prevent byte overflow, which results in garbled (and scary) static.
    audio_stream = pa.open(wav.getframerate() // 2, wav.getnchannels(), pyaudio.paFloat32, output=True)
    audio_stream.start_stream()

    buffer_size = audio_stream._frames_per_buffer  # The amount of int16's to read per frame

    while True:
        data = wav.readframes(buffer_size * 2)  # Read data from wav
        if isinstance(data, str):  # Check typing to prevent errors
            data = data.encode('utf-8')

        # Take each byte, divide by 0x7FFF to get a float, and then multiply that by the volume constant
        data = struct.pack('f' * (len(data) // 2), *list(map(lambda b: b / 65535 * (volume / 9),
                           struct.unpack('H' * (len(data) // 2), data))))
        playback_progress = wav.tell() / wav.getnframes()  # Set percent of song played
        audio_stream.write(data)  # Write raw data to speakers

        if len(data) // 2 < buffer_size:  # If we're out of data, exit the loop
            break
        if current_song != name:  # If the song changed halfway through, stop the stream
            break

    audio_stream.stop_stream()

    del audio_stream  # Cleanup unused variables
    del pa
    del wav


# Main code