2010-07-24 4 views

답변

25

다음은 함수에서 사운드를 생성하는 코드입니다. AudioQueue 서비스를 사용하고, AudioSession을 설정하고, 오디오 출력 대기열을 올바르게 시작하고 중지하는 방법을 알고 있다고 가정합니다. 콜백이 호출 될 것입니다 조심

// AudioQueue output queue callback. 
void AudioEngineOutputBufferCallback (void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) { 
    AudioEngine *engine = (AudioEngine*) inUserData; 
    [engine processOutputBuffer:inBuffer queue:inAQ]; 
} 

- (void) processOutputBuffer: (AudioQueueBufferRef) buffer queue:(AudioQueueRef) queue { 
    OSStatus err; 
    if (isPlaying == YES) { 
     [outputLock lock]; 
     if (outputBuffersToRewrite > 0) { 
      outputBuffersToRewrite--; 
      [self generateTone:buffer]; 
     } 
     err = AudioQueueEnqueueBuffer(queue, buffer, 0, NULL); 
     if (err == 560030580) { // Queue is not active due to Music being started or other reasons 
      isPlaying = NO; 
     } else if (err != noErr) { 
      NSLog(@"AudioQueueEnqueueBuffer() error %d", err); 
     } 
     [outputLock unlock]; 
    } else { 
     err = AudioQueueStop (queue, NO); 
     if (err != noErr) NSLog(@"AudioQueueStop() error: %d", err); 
    } 
} 

-(void) generateTone: (AudioQueueBufferRef) buffer { 
    if (outputFrequency == 0.0) { 
     memset(buffer->mAudioData, 0, buffer->mAudioDataBytesCapacity); 
     buffer->mAudioDataByteSize = buffer->mAudioDataBytesCapacity; 
    } else { 
     // Make the buffer length a multiple of the wavelength for the output frequency. 
     int sampleCount = buffer->mAudioDataBytesCapacity/sizeof (SInt16); 
     double bufferLength = sampleCount; 
     double wavelength = sampleRate/outputFrequency; 
     double repetitions = floor (bufferLength/wavelength); 
     if (repetitions > 0.0) { 
      sampleCount = round (wavelength * repetitions); 
     } 

     double  x, y; 
     double  sd = 1.0/sampleRate; 
     double  amp = 0.9; 
     double  max16bit = SHRT_MAX; 
     int i; 
     SInt16 *p = buffer->mAudioData; 

     for (i = 0; i < sampleCount; i++) { 
      x = i * sd * outputFrequency; 
      switch (outputWaveform) { 
       case kSine: 
        y = sin (x * 2.0 * M_PI); 
        break; 
       case kTriangle: 
        x = fmod (x, 1.0); 
        if (x < 0.25) 
         y = x * 4.0; // up 0.0 to 1.0 
        else if (x < 0.75) 
         y = (1.0 - x) * 4.0 - 2.0; // down 1.0 to -1.0 
        else 
         y = (x - 1.0) * 4.0; // up -1.0 to 0.0 
        break; 
       case kSawtooth: 
        y = 0.8 - fmod (x, 1.0) * 1.8; 
        break; 
       case kSquare: 
        y = (fmod(x, 1.0) < 0.5)? 0.7: -0.7; 
        break; 
       default: y = 0; break; 
      } 
      p[i] = y * max16bit * amp; 
     } 

     buffer->mAudioDataByteSize = sampleCount * sizeof (SInt16); 
    } 
} 

뭔가 : 톤을 생성하는 부분 여기

// Get the preferred sample rate (8,000 Hz on iPhone, 44,100 Hz on iPod touch) 
size = sizeof(sampleRate); 
err = AudioSessionGetProperty (kAudioSessionProperty_CurrentHardwareSampleRate, &size, &sampleRate); 
if (err != noErr) NSLog(@"AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate) error: %d", err); 
//NSLog (@"Current hardware sample rate: %1.0f", sampleRate); 

BOOL isHighSampleRate = (sampleRate > 16000); 
int bufferByteSize; 
AudioQueueBufferRef buffer; 

// Set up stream format fields 
AudioStreamBasicDescription streamFormat; 
streamFormat.mSampleRate = sampleRate; 
streamFormat.mFormatID = kAudioFormatLinearPCM; 
streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 
streamFormat.mBitsPerChannel = 16; 
streamFormat.mChannelsPerFrame = 1; 
streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame; 
streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame; 
streamFormat.mFramesPerPacket = 1; 
streamFormat.mReserved = 0; 

// New output queue ---- PLAYBACK ---- 
if (isPlaying == NO) { 
    err = AudioQueueNewOutput (&streamFormat, AudioEngineOutputBufferCallback, self, nil, nil, 0, &outputQueue); 
    if (err != noErr) NSLog(@"AudioQueueNewOutput() error: %d", err); 

    // Enqueue buffers 
    //outputFrequency = 0.0; 
    outputBuffersToRewrite = 3; 
    bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz 
    for (i=0; i<3; i++) { 
     err = AudioQueueAllocateBuffer (outputQueue, bufferByteSize, &buffer); 
     if (err == noErr) { 
      [self generateTone: buffer]; 
      err = AudioQueueEnqueueBuffer (outputQueue, buffer, 0, nil); 
      if (err != noErr) NSLog(@"AudioQueueEnqueueBuffer() error: %d", err); 
     } else { 
      NSLog(@"AudioQueueAllocateBuffer() error: %d", err); 
      return; 
     } 
    } 

    // Start playback 
    isPlaying = YES; 
    err = AudioQueueStart(outputQueue, nil); 
    if (err != noErr) { NSLog(@"AudioQueueStart() error: %d", err); isPlaying= NO; return; } 
} else { 
    NSLog (@"Error: audio is already playing back."); 
} 

것 : 여기

는 출력 AudioQueue을 설정하고 시작하는 코드 조각입니다 비 메인 스레드에서는 잠금, 뮤텍스 또는 다른 기술을 사용하여 스레드 안전성을 연습해야합니다.

+0

코드를 공유 할 수 있습니까? 신디사이저에 –

7

높은 수준 : 당신이 잘가는 얻는다 trailsinthesand.com/exploring-iphone-audio-part-1/ 오디오 큐 : AVAudioPlayer https://github.com/hollance/AVBufferPlayer

메드 레벨을 사용합니다. 참고 : 이전 링크가있을 수 있도록 http를 제거했지만 잘못된 사이트로 직접 연결되므로 분명히 변경되었습니다.

낮은 수준 : 양자 택일로, 당신은 수준을 드롭 다운 및 오디오 장치로 작업을 수행 할 수 있습니다 http://cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html

+2

오디오 대기열 : https://github.com/hollance/AudioBufferPlayer이 –

+0

귀하의 예를 – Amitg2k12

+0

trailsinthesand.com이있다, 그래서 성인 웹 사이트를 지금 – blwinters

9

void SetupAudio() 
    { 
     AudioSession.Initialize(); 
     AudioSession.Category = AudioSessionCategory.MediaPlayback; 

     sampleRate = AudioSession.CurrentHardwareSampleRate; 
     var format = new AudioStreamBasicDescription() { 
      SampleRate = sampleRate, 
      Format = AudioFormatType.LinearPCM, 
      FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked, 
      BitsPerChannel = 16, 
      ChannelsPerFrame = 1, 
      BytesPerFrame = 2, 
      BytesPerPacket = 2, 
      FramesPerPacket = 1, 
     }; 

     var queue = new OutputAudioQueue (format); 
     var bufferByteSize = (sampleRate > 16000)? 2176 : 512; // 40.5 Hz : 31.25 Hz 
     var buffers = new AudioQueueBuffer* [numBuffers]; 
     for (int i = 0; i < numBuffers; i++){ 
      queue.AllocateBuffer (bufferByteSize, out buffers [i]); 
      GenerateTone (buffers [i]); 
      queue.EnqueueBuffer (buffers [i], null); 
     } 
     queue.OutputCompleted += (object sender, OutputCompletedEventArgs e) => { 
      queue.EnqueueBuffer (e.UnsafeBuffer, null); 
     }; 

     queue.Start(); 
     return true; 
    } 

이가이다 @lucius에서 동일한 샘플의 C#을 사용 버전입니다 톤 제너레이터 :

0 :

void GenerateTone (AudioQueueBuffer *buffer) 
    { 
     // Make the buffer length a multiple of the wavelength for the output frequency. 
     uint sampleCount = buffer->AudioDataBytesCapacity/2; 
     double bufferLength = sampleCount; 
     double wavelength = sampleRate/outputFrequency; 
     double repetitions = Math.Floor (bufferLength/wavelength); 
     if (repetitions > 0) 
      sampleCount = (uint)Math.Round (wavelength * repetitions); 

     double  x, y; 
     double  sd = 1.0/sampleRate; 
     double  amp = 0.9; 
     double  max16bit = Int16.MaxValue; 
     int i; 
     short *p = (short *) buffer->AudioData; 

     for (i = 0; i < sampleCount; i++) { 
      x = i * sd * outputFrequency; 
      switch (outputWaveForm) { 
       case WaveForm.Sine: 
        y = Math.Sin (x * 2.0 * Math.PI); 
        break; 
       case WaveForm.Triangle: 
        x = x % 1.0; 
        if (x < 0.25) 
         y = x * 4.0; // up 0.0 to 1.0 
        else if (x < 0.75) 
         y = (1.0 - x) * 4.0 - 2.0; // down 1.0 to -1.0 
        else 
         y = (x - 1.0) * 4.0; // up -1.0 to 0.0 
        break; 
       case WaveForm.Sawtooth: 
        y = 0.8 - (x % 1.0) * 1.8; 
        break; 
       case WaveForm.Square: 
        y = ((x % 1.0) < 0.5)? 0.7: -0.7; 
        break; 
       default: y = 0; break; 
      } 
      p[i] = (short)(y * max16bit * amp); 
     } 
     buffer->AudioDataByteSize = sampleCount * 2; 
    } 
} 

또한 이러한 정의하려면