iOS 음성을 텍스트 API에, 텍스트를 음성 API에 함께 사용하려고합니다. 기본적으로 사용자는 말을하고 텍스트는 텍스트로 기록됩니다. 그런 다음 사용자는 텍스트보기의 텍스트에서 음성으로 텍스트를 제공하는 버튼을 누릅니다. 내 오류는 사용자가 speech-to-text 단추를 누르면 text to speech 단추가 작동을 멈추는 것입니다. 그러나 텍스트 단추로 연설을 전혀 누르지 않고 키보드를 통해 텍스트보기에 일부 텍스트를 입력하지 않으면 텍스트가 음성 단추로 작동합니다. 텍스트 버튼을 연설하는 데 문제가 있다고 생각합니다. 내 코드에서 어떤 오류도 발생하지 않고 있으며 무슨 일이 일어나고 있는지 혼란 스럽다. 누를 때텍스트 음성 변환 오류, 텍스트 음성 변환 버튼과 충돌
import UIKit
import Speech
import AVFoundation
class SpeechRecognitionViewController: UIViewController, SFSpeechRecognizerDelegate, UIPickerViewDataSource, UIPickerViewDelegate {
private var speechRecognizer: SFSpeechRecognizer!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest!
private var recognitionTask: SFSpeechRecognitionTask!
private let audioEngine = AVAudioEngine()
private var locales: [Locale]!
private let defaultLocale = Locale(identifier: "en-US")
@IBOutlet weak var recordBtn: UIButton!
@IBOutlet weak var speaker: UIButton!
@IBOutlet weak var textView: UITextField!
//@IBOutlet weak var textView: UITextView!
//@IBOutlet private weak var recordBtn : UIButton!
//@IBOutlet private weak var picker: UIPickerView!
@IBOutlet weak var picker: UIPickerView!
override func viewDidLoad() {
super.viewDidLoad()
recordBtn.isEnabled = false
locales = SFSpeechRecognizer.supportedLocales().map({$0})
let index = NSArray(array: locales).index(of: defaultLocale)
picker.selectRow(index, inComponent: 0, animated: false)
prepareRecognizer(locale: defaultLocale)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
SFSpeechRecognizer.requestAuthorization { authStatus in
/*
The callback may not be called on the main thread. Add an
operation to the main queue to update the record button's state.
*/
OperationQueue.main.addOperation {
switch authStatus {
case .authorized:
self.recordBtn.isEnabled = true
case .denied:
self.recordBtn.isEnabled = false
self.recordBtn.setTitle("User denied access to speech recognition", for: .disabled)
case .restricted:
self.recordBtn.isEnabled = false
self.recordBtn.setTitle("Speech recognition restricted on this device", for: .disabled)
case .notDetermined:
self.recordBtn.isEnabled = false
self.recordBtn.setTitle("Speech recognition not yet authorized", for: .disabled)
}
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
private func prepareRecognizer(locale: Locale) {
speechRecognizer = SFSpeechRecognizer(locale: locale)!
speechRecognizer.delegate = self
}
private func startRecording() throws {
// Cancel the previous task if it's running.
if let recognitionTask = recognitionTask {
recognitionTask.cancel()
self.recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let inputNode = audioEngine.inputNode else { fatalError("Audio engine has no input node") }
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to created a SFSpeechAudioBufferRecognitionRequest object") }
// Configure request so that results are returned before audio recording is finished
recognitionRequest.shouldReportPartialResults = true
// A recognition task represents a speech recognition session.
// We keep a reference to the task so that it can be cancelled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
var isFinal = false
if let result = result {
self.textView.text = result.bestTranscription.formattedString
isFinal = result.isFinal
}
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
self.recordBtn.isEnabled = true
self.recordBtn.setTitle("Start Recording", for: [])
}
}
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()
textView.text = "(listening...)"
}
// =========================================================================
// MARK: - UIPickerViewDataSource
func numberOfComponents(in pickerView: UIPickerView) -> Int {
return 1
}
func pickerView(_ pickerView: UIPickerView, numberOfRowsInComponent component: Int) -> Int {
return locales.count
}
// =========================================================================
// MARK: - UIPickerViewDelegate
func pickerView(_ pickerView: UIPickerView, titleForRow row: Int, forComponent component: Int) -> String? {
return locales[row].identifier
}
func pickerView(_ pickerView: UIPickerView, didSelectRow row: Int, inComponent component: Int) {
let locale = locales[row]
prepareRecognizer(locale: locale)
}
// =========================================================================
// MARK: - SFSpeechRecognizerDelegate
public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
if available {
recordBtn.isEnabled = true
recordBtn.setTitle("Start Recording", for: [])
} else {
recordBtn.isEnabled = false
recordBtn.setTitle("Recognition not available", for: .disabled)
}
}
// =========================================================================
// MARK: - Actions
@IBAction func recordbuttontapped(_ sender: Any) {
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
recordBtn.isEnabled = false
recordBtn.setTitle("Stopping", for: .disabled)
} else {
try! startRecording()
recordBtn.setTitle("Stop recording", for: [])
}
}
@IBAction func speaktome(_ sender: Any) {
let something = textView.text!
let utterance = AVSpeechUtterance(string: something)
utterance.voice = AVSpeechSynthesisVoice(language: "en-US")
let synthesizer = AVSpeechSynthesizer()
synthesizer.speak(utterance)
}
}
안녕하십니까. 답장을 보내 주셔서 감사합니다. 제공되는 코드로이 작업을 수행하는 방법을 보여 주시겠습니까? 감사하겠습니다. 감사 – aneey123