script.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. import gradio as gr
  2. import speech_recognition as sr
  3. input_hijack = {
  4. 'state': False,
  5. 'value': ["", ""]
  6. }
  7. def do_stt(audio, text_state=""):
  8. transcription = ""
  9. r = sr.Recognizer()
  10. # Convert to AudioData
  11. audio_data = sr.AudioData(sample_rate=audio[0], frame_data=audio[1], sample_width=4)
  12. try:
  13. transcription = r.recognize_whisper(audio_data, language="english", model="base.en")
  14. except sr.UnknownValueError:
  15. print("Whisper could not understand audio")
  16. except sr.RequestError as e:
  17. print("Could not request results from Whisper", e)
  18. input_hijack.update({"state": True, "value": [transcription, transcription]})
  19. text_state += transcription + " "
  20. return text_state, text_state
  21. def update_hijack(val):
  22. input_hijack.update({"state": True, "value": [val, val]})
  23. return val
  24. def auto_transcribe(audio, audio_auto, text_state=""):
  25. if audio is None:
  26. return "", ""
  27. if audio_auto:
  28. return do_stt(audio, text_state)
  29. return "", ""
  30. def ui():
  31. tr_state = gr.State(value="")
  32. output_transcription = gr.Textbox(label="STT-Input",
  33. placeholder="Speech Preview. Click \"Generate\" to send",
  34. interactive=True)
  35. output_transcription.change(fn=update_hijack, inputs=[output_transcription], outputs=[tr_state])
  36. audio_auto = gr.Checkbox(label="Auto-Transcribe", value=True)
  37. with gr.Row():
  38. audio = gr.Audio(source="microphone")
  39. audio.change(fn=auto_transcribe, inputs=[audio, audio_auto, tr_state], outputs=[output_transcription, tr_state])
  40. transcribe_button = gr.Button(value="Transcribe")
  41. transcribe_button.click(do_stt, inputs=[audio, tr_state], outputs=[output_transcription, tr_state])