Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| from diffusers import DiffusionPipeline | |
| import torch | |
| # Load the models and tokenizers | |
| translation_model_name = "google/madlad400-3b-mt" | |
| translation_model = AutoModelForSeq2SeqLM.from_pretrained(translation_model_name) | |
| translation_tokenizer = AutoTokenizer.from_pretrained(translation_model_name) | |
| transcription_model = "chrisjay/fonxlsr" | |
| diffusion_model_name = "stabilityai/stable-diffusion-xl-base-1.0" | |
| diffusion_pipeline = DiffusionPipeline.from_pretrained(diffusion_model_name, torch_dtype=torch.float16) | |
| diffusion_pipeline = diffusion_pipeline.to("cuda") | |
| # Define the translation and transcription pipeline | |
| translation_pipeline = pipeline("translation", model=translation_model, tokenizer=translation_tokenizer, device_map="auto") | |
| transcription_pipeline = pipeline("automatic-speech-recognition", model=transcription_model, device_map="auto") | |
| # Define the function for transcribing and translating audio in Fon | |
| def transcribe_and_translate_audio_fon(audio_path, num_images=1): | |
| # Transcribe the audio to Fon using the transcription pipeline | |
| transcription_fon = transcription_pipeline(audio_path)["text"] | |
| # Translate the Fon transcription to French using the translation pipeline | |
| translation_result = translation_pipeline(transcription_fon, source_lang="fon", target_lang="fr") | |
| translation_fr = translation_result[0]["translation_text"] | |
| # Generate images based on the French translation using the diffusion model | |
| images = diffusion_pipeline(translation_fr, num_images_per_prompt=num_images)["images"] | |
| return images | |
| # Create a Gradio interface | |
| def process_audio(audio, num_images): | |
| images = transcribe_and_translate_audio_fon(audio, num_images) | |
| return images | |
| # Define Gradio interface components | |
| audio_input = gr.Audio(source="upload", type="filepath", label="Upload an audio file") | |
| image_output = gr.Gallery(label="Generated Images").style(grid=2) | |
| num_images_input = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Number of Images") | |
| # Launch Gradio interface | |
| interface = gr.Interface( | |
| fn=process_audio, | |
| inputs=[audio_input, num_images_input], | |
| outputs=image_output, | |
| title="Fon Audio to Image Translation", | |
| description="Upload an audio file in Fon, and the app will transcribe, translate to French, and generate related images." | |
| ) | |
| interface.launch() | |