demorrha / pages /main.py
rick
v1.2.11
6fc7c33 unverified
raw
history blame
37.8 kB
# Standard libraries
import base64
import io
import json
import os
import re
import tempfile
import time
from os import getenv
from typing import Any
from typing import Dict
from typing import IO
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from io import BytesIO
from copy import deepcopy
# Third-party libraries
import requests
import streamlit as st
#from audiorecorder import audiorecorder
from openai import OpenAI
from pydub import AudioSegment
import warnings
# Ignore DeprecationWarning
warnings.filterwarnings("ignore", category=DeprecationWarning)
from var_app import __version__
from core.files import load_ui_language
from core.files import read_file
from core.text_to_speech import openai_tts
from core.DetectLanguage import detect_language
# Au début du fichier, après les imports
st.set_page_config(
page_title=f"DEMORRHA - (v{__version__})",
page_icon="👹",
layout="wide",
initial_sidebar_state="collapsed"
)
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def get_translation(key: str) -> str:
"""
Obtient la traduction pour une clé donnée basée sur la langue d'interface sélectionnée.
"""
lang = st.session_state.get('interface_language', 'English')
return translations.get(lang, {}).get(key, key)
# Dictionary to store translations
translations = load_ui_language()
def process_tts_message(text_response: str) -> Tuple[Optional[bytes], Optional[float]]:
try:
tts_output_ = openai_tts(
tts_voice=st.session_state.tts_voice,
tts_model="tts-1",
response_format="mp3",
speed=1.0
).text_to_speech(text_response)
return tts_output_["data_bytes"], tts_output_["audio_duration"]
except Exception as e:
st.error(f"Une erreur s'est produite lors de la conversion texte-parole : {e}")
return None, None
# ecrire ici la fonction: split_audio
def split_audio(audio_file, max_size_mb: int = 25) -> List[bytes]:
"""
Divise un fichier audio en segments de taille maximale spécifiée.
Args:
audio_file: Fichier audio ouvert en mode binaire.
max_size_mb (int): Taille maximale de chaque segment en Mo.
Returns:
List[bytes]: Liste des segments audio divisés sous forme de bytes.
"""
try:
audio_file.seek(0)
audio = AudioSegment.from_file(audio_file)
duration_ms = len(audio)
segment_duration_ms = int(
(max_size_mb * 1024 * 1024 * 8) /
(audio.frame_rate * audio.sample_width * audio.channels)
)
segments = []
for start in range(0, duration_ms, segment_duration_ms):
end = min(start + segment_duration_ms, duration_ms)
segment = audio[start:end]
with io.BytesIO() as buffer:
segment.export(buffer, format="mp3")
segments.append(buffer.getvalue())
return segments
except Exception as e:
print(f"Une erreur s'est produite lors de la division de l'audio : {e}")
return []
def transcribe_audio(filepath: Union[str, IO], language: Optional[str] = None) -> str:
"""
Transcrit un fichier audio temporaire en texte.
Args:
filepath Chemin vers le fichier audio temporaire à transcrire.
language (Optional[str]): La langue de l'audio. Par défaut None.
Returns:
str: Le texte transcrit.
"""
max_size_mb = 25
try:
transcriptions = []
with open(filepath if isinstance(filepath, str) else filepath.name, "rb") as f:
# filepath peut etre un chemin vers un fichier audio ou un objet IO
# verifier si le fichier audio fait plus de 25 Mo
# Diviser l'audio en segments de taille maximale
#segments = split_audio(f, max_size_mb)
f.seek(0)
audio = AudioSegment.from_file(f)
duration_ms = len(audio)
segment_duration_ms = int(
(max_size_mb * 1024 * 1024 * 8) /
(audio.frame_rate * audio.sample_width * audio.channels)
)
for start in range(0, duration_ms, segment_duration_ms):
end = min(start + segment_duration_ms, duration_ms)
segment = audio[start:end]
buffer = BytesIO()
segment.export(buffer, format="mp3")
buffer.seek(0)
if not( language ):
response = client.audio.transcriptions.create(
model="whisper-1",
file=("audio.mp3", buffer),
response_format="text"
)
else:
response = client.audio.transcriptions.create(
model="whisper-1",
file=("audio.mp3", buffer),
language=language,
response_format="text"
)
transcriptions.append(response)
return " ".join(transcriptions)
except Exception as e:
print(f"Erreur lors de la transcription de l'audio : {e}")
return ""
def concatenate_audio_files(audio_list: List[Tuple[Union[bytes, str], float]]) -> Optional[bytes]:
"""
Concatène plusieurs fichiers audio avec des effets sonores.
Args:
audio_list (List[Tuple[Union[bytes, str], float]]): Une liste de tuples, chacun contenant
des octets audio (ou une chaîne base64) et la durée.
Returns:
Optional[bytes]: L'audio concaténé sous forme d'octets, ou None en cas d'erreur.
"""
# Créer un segment audio vide
final_audio = AudioSegment.empty()
try:
# Charger les effets sonores
begin_sound = AudioSegment.from_mp3(
"sound-effects/voice-message-play-begin/voice-message-play-begin-1.mp3"
)
end_sound = AudioSegment.from_mp3(
"sound-effects/voice-message-play-ending/voice-message-play-ending-1.mp3"
)
# 5 secondes de silence
silence = AudioSegment.silent(duration=1500) # 1500 ms = 1.5 secondes
for audio_data, _ in audio_list:
# Convertir en bytes si c'est une chaîne base64
if isinstance(audio_data, str):
audio_bytes = base64.b64decode(audio_data)
else:
audio_bytes = audio_data
# Convertir les octets en un segment audio
segment = AudioSegment.from_mp3(io.BytesIO(audio_bytes))
# Ajouter le son de début, le segment TTS, le son de fin et le silence
final_audio += begin_sound + segment + end_sound + silence
# Convertir le segment audio final en octets
buffer = io.BytesIO()
final_audio.export(buffer, format="mp3")
return buffer.getvalue()
except IOError as e:
print(f"Erreur lors de la lecture ou de l'écriture des fichiers audio : {e}")
return None
except Exception as e:
print(f"Une erreur inattendue s'est produite : {e}")
return None
def process_message(
message: str,
operation_prompt_: Optional[str] = "",
system_prompt_: Optional[str] = ""
):
"""
Traite les messages des utilisateurs et génère une réponse.
Args:
message (str): Le message d'entrée de l'utilisateur.
operation_prompt (str, optional): Prompt supplémentaire pour l'opération. Par défaut "".
tts_enabled (bool, optional): Si la synthèse vocale est activée. Par défaut False.
"""
payload_content = f'{operation_prompt_} :\n"""\n{message}\n"""'
st.session_state.messages = [
{"role": "system", "content": system_prompt_},
{"role": "user", "content": payload_content}
]
st.session_state["full_response"] = ""
try:
for response in client.chat.completions.create(
model="gpt-4o-mini",
messages=st.session_state.messages,
stream=True,
temperature=0.1):
st.session_state.full_response += (response.choices[0].delta.content or "")
yield st.session_state.full_response + "▌"
# Utiliser regex pour supprimer les trois premiers et derniers guillemets doubles
st.session_state.full_response = re.sub(r'^"{3}|"{3}$', '', st.session_state.full_response.strip())
st.session_state.messages = []
return st.session_state.full_response
except Exception as e:
st.error(f"Une erreur s'est produite lors de la génération de la réponse : {e}")
return ""
class GlobalSystemPrompts:
"""Class to store global system prompts."""
@staticmethod
def linguascribe():
"""
Retrieve the system prompt for the Linguascribe feature.
Returns:
str: The system prompt for Linguascribe.
"""
try:
system_prompt = read_file('linguascribe.prompt')
return system_prompt
except FileNotFoundError:
print("Le fichier 'linguascribe.prompt' n'a pas été trouvé.")
return ""
except IOError as e:
print(f"Erreur lors de la lecture du fichier 'linguascribe.prompt': {e}")
return ""
# Function to configure the translation mode
def set_translation_mode(from_lang: str, dest_lang: str) -> Tuple[str, str]:
"""
Configure les prompts globaux pour le mode de traduction.
Args:
from_lang (str): La langue source.
dest_lang (str): La langue de destination.
Returns:
Tuple[str, str]: Un tuple contenant le prompt système et le prompt d'opération.
"""
system_prompt_ = GlobalSystemPrompts.linguascribe()
operation_prompt_ = f"Translate({from_lang} to {dest_lang})"
return system_prompt_, operation_prompt_
# List of languages supported by the application
SUPPORTED_LANGUAGES = [
"Afrikaans", "Arabic", "Armenian", "Azerbaijani", "Belarusian", "Bosnian",
"Bulgarian", "Catalan", "Chinese", "Croatian", "Czech", "Danish", "Dutch",
"English", "Estonian", "Finnish", "French", "Galician", "German", "Greek",
"Hebrew", "Hindi", "Hungarian", "Icelandic", "Indonesian", "Italian",
"Japanese", "Kannada", "Kazakh", "Korean", "Latvian", "Lithuanian",
"Macedonian", "Malay", "Marathi", "Maori", "Nepali", "Norwegian", "Persian",
"Polish", "Portuguese", "Romanian", "Russian", "Serbian", "Slovak",
"Slovenian", "Spanish", "Swahili", "Swedish", "Tagalog", "Tamil", "Thai",
"Turkish", "Ukrainian", "Urdu", "Vietnamese", "Welsh"
]
LANGUAGES_EMOJI = {
"Afrikaans": "🇿🇦", "Arabic": "🇸🇦", "Armenian": "🇦🇲", "Azerbaijani": "🇦🇿", "Belarusian": "🇧🇾",
"Bosnian": "🇧🇦", "Bulgarian": "🇧🇬", "Catalan": "🇪🇸", "Chinese": "🇨🇳", "Croatian": "🇭🇷",
"Czech": "🇨🇿", "Danish": "🇩🇰", "Dutch": "🇳🇱", "English": "🇬🇧", "Estonian": "🇪🇪",
"Finnish": "🇫🇮", "French": "🇫🇷", "Galician": "🇪🇸", "German": "🇩🇪", "Greek": "🇬🇷",
"Hebrew": "🇮🇱", "Hindi": "🇮🇳", "Hungarian": "🇭🇺", "Icelandic": "🇮🇸", "Indonesian": "🇮🇩",
"Italian": "🇮🇹", "Japanese": "🇯🇵", "Kannada": "🇮🇳", "Kazakh": "🇰🇿", "Korean": "🇰🇷",
"Latvian": "🇱🇻", "Lithuanian": "🇱🇹", "Macedonian": "🇲🇰", "Malay": "🇲🇾", "Marathi": "🇮🇳",
"Maori": "🇳🇿", "Nepali": "🇳🇵", "Norwegian": "🇳🇴", "Persian": "🇮🇷", "Polish": "🇵🇱",
"Portuguese": "🇵🇹", "Romanian": "🇷🇴", "Russian": "🇷🇺", "Serbian": "🇷🇸", "Slovak": "🇸🇰",
"Slovenian": "🇸🇮", "Spanish": "🇪🇸", "Swahili": "🇰🇪", "Swedish": "🇸🇪", "Tagalog": "🇵🇭",
"Tamil": "🇮🇳", "Thai": "🇹🇭", "Turkish": "🇹🇷", "Ukrainian": "🇺🇦", "Urdu": "🇵🇰",
"Vietnamese": "🇻🇳", "Welsh": "🏴󠁧󠁢󠁷󠁬󠁳󠁿"
}
def convert_iso6391_to_language_name(language_code: str,
filter_mode=True) -> str:
"""
Convertit un code ISO 639-1 en nom de langue.
Args:
language_code (str): Le code ISO 639-1 de la langue.
Returns:
str: Le nom de la langue correspondant au code ISO 639-1, ou 'English' si non trouvé.
"""
# Dictionnaire associant les codes ISO 639-1 aux noms de langues
iso_to_language: Dict[str, str] = {
"af": "Afrikaans", "ar": "Arabic", "hy": "Armenian", "az": "Azerbaijani",
"be": "Belarusian", "bs": "Bosnian", "bg": "Bulgarian", "ca": "Catalan",
"zh": "Chinese", "hr": "Croatian", "cs": "Czech", "da": "Danish",
"nl": "Dutch", "en": "English", "et": "Estonian", "fi": "Finnish",
"fr": "French", "gl": "Galician", "de": "German", "el": "Greek",
"he": "Hebrew", "hi": "Hindi", "hu": "Hungarian", "is": "Icelandic",
"id": "Indonesian", "it": "Italian", "ja": "Japanese", "kn": "Kannada",
"kk": "Kazakh", "ko": "Korean", "lv": "Latvian", "lt": "Lithuanian",
"mk": "Macedonian", "ms": "Malay", "mr": "Marathi", "mi": "Maori",
"ne": "Nepali", "no": "Norwegian", "fa": "Persian", "pl": "Polish",
"pt": "Portuguese", "ro": "Romanian", "ru": "Russian", "sr": "Serbian",
"sk": "Slovak", "sl": "Slovenian", "es": "Spanish", "sw": "Swahili",
"sv": "Swedish", "tl": "Tagalog", "ta": "Tamil", "th": "Thai",
"tr": "Turkish", "uk": "Ukrainian", "ur": "Urdu", "vi": "Vietnamese",
"cy": "Welsh"
}
default_ = "English"
try:
# Retourner le nom de la langue correspondant au code ISO 639-1
return iso_to_language[language_code]
except KeyError:
if f"{language_code}" in iso_to_language.values():
return language_code
else:
# Gérer spécifiquement l'exception KeyError
print(f"Code de langue non trouvé : {language_code}")
return default_
def convert_language_name_to_iso6391(language_data: Union[str, Dict[str, str]]) -> str:
"""
Convertit un nom de langue en son code ISO 639-1.
Args:
language_data (Union[str, Dict[str, str]]): Le nom de la langue ou un dictionnaire
contenant le nom de la langue.
Returns:
str: Le code ISO 639-1 pour la langue donnée, ou 'en' si non trouvé.
"""
# Dictionnaire associant les noms de langues aux codes ISO 639-1
language_to_iso: Dict[str, str] = {
"Afrikaans": "af", "Arabic": "ar", "Armenian": "hy", "Azerbaijani": "az",
"Belarusian": "be", "Bosnian": "bs", "Bulgarian": "bg", "Catalan": "ca",
"Chinese": "zh", "Croatian": "hr", "Czech": "cs", "Danish": "da",
"Dutch": "nl", "English": "en", "Estonian": "et", "Finnish": "fi",
"French": "fr", "Galician": "gl", "German": "de", "Greek": "el",
"Hebrew": "he", "Hindi": "hi", "Hungarian": "hu", "Icelandic": "is",
"Indonesian": "id", "Italian": "it", "Japanese": "ja", "Kannada": "kn",
"Kazakh": "kk", "Korean": "ko", "Latvian": "lv", "Lithuanian": "lt",
"Macedonian": "mk", "Malay": "ms", "Marathi": "mr", "Maori": "mi",
"Nepali": "ne", "Norwegian": "no", "Persian": "fa", "Polish": "pl",
"Portuguese": "pt", "Romanian": "ro", "Russian": "ru", "Serbian": "sr",
"Slovak": "sk", "Slovenian": "sl", "Spanish": "es", "Swahili": "sw",
"Swedish": "sv", "Tagalog": "tl", "Tamil": "ta", "Thai": "th",
"Turkish": "tr", "Ukrainian": "uk", "Urdu": "ur", "Vietnamese": "vi",
"Welsh": "cy"
}
default_ = "en"
# Vérifier si language_data est un dictionnaire
if isinstance(language_data, dict):
language_name = language_data.get('language', '')
else:
language_name = language_data
try:
# Retourner le code ISO 639-1 correspondant au nom de la langue
return language_to_iso[language_name]
except KeyError:
if f"{language_name}" in language_to_iso.values():
return language_name
else:
# Gérer spécifiquement l'exception KeyError
print(f"Langue non trouvée : {language_name}")
return default_
def init_process_mode(
from_lang: str,
to_lang: str,
process_mode: Optional[str|list] = "translator"
) -> Tuple[str, str]:
"""
Initialise le mode de traitement pour la traduction si nécessaire.
Returns:
Tuple[str, str]: Un tuple contenant le prompt système et le prompt d'opération.
"""
# from (e.g.: st.session_state.language_detected)
# dest (e.g.: st.session_state.target_language)
if "translator" in process_mode:
system_prompt, operation_prompt = set_translation_mode(
from_lang=f"{from_lang}",
dest_lang=f"{to_lang}"
)
return system_prompt, operation_prompt
return "", ""
@st.dialog("Settings")
def tts_settings(name__tts_voice,
state__tts_with_text,
state__tts_with_audio,
state__autoplay_tts):
with st.expander(f"{get_translation('parametres_tts')}",
expanded=True,
icon="🔊"):
set__tts_voice = st.selectbox(
get_translation("choix_voix_tts"),
options=["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
index=list(["alloy", "echo", "fable", "onyx", "nova", "shimmer"]).index(name__tts_voice)
)
set__tts_with_text = st.checkbox(
get_translation("activer_tts_texte"),
value=state__tts_with_text
)
set__tts_with_audio = st.checkbox(
get_translation("activer_tts_audio"),
value=state__tts_with_audio
)
set__autoplay_tts = st.checkbox(
get_translation("lecture_auto_tts"),
value=state__autoplay_tts
)
if st.button("Submit"):
st.session_state.autoplay_tts = set__autoplay_tts
st.session_state.enable_tts_for_input_from_audio_record = set__tts_with_audio
st.session_state.enable_tts_for_input_from_text_field = set__tts_with_text
st.session_state.tts_voice = set__tts_voice
#st.session_state.
st.rerun()
@st.fragment
def recorder_released():
if "rec_widget" in st.session_state:
if st.session_state.rec_widget:
audio_recorded = True
else:
audio_recorded = False
if audio_recorded:
audio = AudioSegment.from_wav(io.BytesIO(st.session_state.rec_widget.getvalue()))
st.write(f"Frame rate: {audio.frame_rate}, Frame width: {audio.frame_width}, Duration: {audio.duration_seconds} seconds")
if not (st.session_state.language_detected):
# 1. Verifie si audio.duration_seconds est superieur a 600 secondes (10 minutes)
if audio.duration_seconds > 600:
# PyDub handles time in milliseconds
ten_minutes = 10 * 60 * 1000
first_ten_minutes_audio = audio[:ten_minutes]
else:
# less than ten minutes ... nervermind, the name of this variable is
first_ten_minutes_audio = deepcopy(audio)
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as tmp_extract:
first_ten_minutes_audio.export(tmp_extract, format="mp3")
tmp_extract.close()
# il faut transcrire sans specifier l'argument language dans la fonction transcribe_audio
# ensuite on pourra utiliser la fonction detect_language pour detecter la langue du texte transcrit
# Transcrire les 10 premiers minutes audio en texte
st.session_state.language_detected = detect_language(
input_text = transcribe_audio(tmp_extract),
temperature = 0.01
)
first_ten_minutes_audio = AudioSegment.empty()
st.markdown(
f"- {get_translation('langue_detectee')}".format(
f"{convert_iso6391_to_language_name(st.session_state.language_detected)}"
)
)
# ##############################################################
try:
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as tmp_file:
audio.export(tmp_file, format="mp3")
tmp_file.close()
# Transcrire l'audio en texte
st.session_state.transcription = transcribe_audio(
tmp_file,
language=st.session_state.language_detected
)
audio = AudioSegment.empty()
st.markdown(
f"🎤 {get_translation('transcription_audio')}".format(
f"{st.session_state.transcription}"
)
)
st.session_state.audio_list = []
for cursor_selected_lang in st.session_state.selected_languages:
st.session_state.target_language = cursor_selected_lang["iso-639-1"]
st.session_state.full_response = ""
# Initialisation du mode de traitement pour la langue cible actuelle
st.session_state.system_prompt, st.session_state.operation_prompt = init_process_mode(from_lang=
(
st.session_state.language_detected if "language_detected" in st.session_state.language_detected else convert_language_name_to_iso6391(
st.session_state.interface_language
)
),
to_lang=st.session_state.target_language
)
with st.chat_message("assistant", avatar="👻"):
message_placeholder = st.empty()
st.session_state.response_generator = process_message(
st.session_state.transcription,
st.session_state.operation_prompt,
st.session_state.system_prompt
)
for response_chunk in st.session_state.response_generator:
message_placeholder.markdown(response_chunk)
st.session_state.end_response = st.session_state.response_generator.close()
if st.session_state.full_response != "":
message_placeholder.markdown(st.session_state.full_response)
if st.session_state.enable_tts_for_input_from_audio_record:
st.session_state.tts_audio, st.session_state.tts_duration = process_tts_message(st.session_state.full_response)
if st.session_state.tts_audio:
st.session_state.audio_list.append(
( st.session_state.tts_audio,
st.session_state.tts_duration )
)
else:
pass
if st.session_state.audio_list:
st.session_state.final_audio = concatenate_audio_files(st.session_state.audio_list)
with st.container(border=True):
# Générer un nom de fichier unique
st.session_state.timestamp = time.strftime("%Y%m%d-%H%M%S")
st.session_state.langues = "_".join([lang["iso-639-1"] for lang in st.session_state.selected_languages])
st.session_state.nom_fichier = f"reponse_audio_{st.session_state.langues}_{st.session_state.timestamp}.mp3"
st.audio(st.session_state.final_audio,
format="audio/mp3",
autoplay=st.session_state.autoplay_tts)
st.download_button(
label=f"📥 {get_translation('telecharger_audio')}",
data=st.session_state.final_audio,
file_name=st.session_state.nom_fichier,
mime="audio/mp3",
use_container_width=True,
type="primary",
key=f"download_button_{st.session_state.langues}_{st.session_state.timestamp}",
)
except Exception as e:
st.error(f"[AUDIO] - Erreur lors de l'exportation de l'audio : {str(e)}")
#finally:
# clear_inputs_garbages()
# if 'tmp_file' in locals():
# os.unlink(tmp_file.name)
##st.session_state.rec_widget.close()
#if "rec_widget" in st.session_state:
# list_dir_obj = dir(st.session_state.rec_widget)
# for itl in list_dir_obj:
# if "__" not in itl:
# st.write(f"***{itl}*** :\n{getattr(st.session_state.rec_widget, itl).__doc__}")
def main_page():
"""Page principale de l'application."""
# Initialisation des variables d'état de session
if "ui_loaded" not in st.session_state:
st.session_state["ui_loaded"] = False
if "language_detected" not in st.session_state:
st.session_state["language_detected"] = None
if "process_mode" not in st.session_state:
st.session_state["process_mode"] = "translation"
if "target_language" not in st.session_state:
st.session_state.target_language = "en"
if "selected_languages" not in st.session_state:
st.session_state.selected_languages = [
{"language": "English", "iso-639-1": "en"}
]
if "interface_language_select" not in st.session_state:
st.session_state.interface_language_select = "English" # Langue par défaut
if "enable_tts_for_input_from_audio_record" not in st.session_state:
st.session_state["enable_tts_for_input_from_audio_record"] = False
if "autoplay_tts" not in st.session_state:
st.session_state["autoplay_tts"] = False
if "enable_tts_for_input_from_text_field" not in st.session_state:
st.session_state["enable_tts_for_input_from_text_field"] = False
if "tts_voice" not in st.session_state:
st.session_state["tts_voice"] = "onyx"
# Initialisation de l'historique des messages avec le prompt système
if "messages" not in st.session_state:
st.session_state.messages = []
def on_languages_change() -> None:
#clear_inputs_garbages()
"""Fonction de rappel pour le changement de langue(s) de destination."""
selected_language_names: List[str] = st.session_state.language_selector
st.session_state.selected_languages = [
{"language": lang, "iso-639-1": convert_language_name_to_iso6391(lang)}
for lang in selected_language_names
]
# Configuration de la barre latérale
with st.sidebar:
st.logo("img/logo_2.png", icon_image="img/logo_2.png")
st.header(get_translation("sidebar_titre"))
st.write(f"#### Settings")
if st.button(f"Text-To-Speech"):
tts_settings(
name__tts_voice = st.session_state.tts_voice,
state__tts_with_text = st.session_state.enable_tts_for_input_from_text_field,
state__tts_with_audio = st.session_state.enable_tts_for_input_from_audio_record,
state__autoplay_tts = st.session_state.autoplay_tts
)
with st.expander(f"{get_translation('a_propos')}",
expanded=False,
icon="ℹ️"):
st.subheader(f"version: {__version__}")
st.info(get_translation("info_app"))
with st.expander(f"{get_translation('selection_langue')}",
expanded=True,
icon="🌐"):
# Conteneur pour la sélection de langue
# Sélection multiple des langues de destination
st.multiselect(
label=get_translation("langues_destination"),
placeholder=get_translation("placeholder_langues"),
options=SUPPORTED_LANGUAGES,
default=["English"],
key="language_selector",
max_selections=4,
on_change=on_languages_change,
format_func=lambda lang: f"{LANGUAGES_EMOJI.get(lang, '')} {lang}"
)
with st.container(border=True):
# Interface utilisateur pour le chat textuel
st.session_state.user_input = st.chat_input(
get_translation("entrez_message")
)
st.experimental_audio_input("Record a voice message",on_change=recorder_released, key="rec_widget")
#audiorecorder(
# start_prompt=get_translation("cliquez_enregistrer"),
# stop_prompt=get_translation("cliquez_arreter"),
# pause_prompt=get_translation("cliquez_pause"),
# show_visualizer=True,
# key="vocal_chat_input"
#)
if st.session_state.user_input:
# Réinitialiser l'état précédent
st.session_state.full_response = ""
with st.chat_message("user", avatar="👤"):
st.markdown(st.session_state.user_input)
# Traitement du message texte de l'utilisateur
if st.session_state.language_detected is None:
st.session_state.language_detected = detect_language(
input_text=st.session_state.user_input, temperature=0.01
)
st.session_state.audio_list = []
for cursor_selected_lang in st.session_state.selected_languages:
st.session_state.target_language = cursor_selected_lang["iso-639-1"]
target_language_name = cursor_selected_lang["language"]
# Réinitialiser les messages avant de traiter une nouvelle entrée
st.session_state.messages = []
st.session_state.full_response = ""
# Initialisation du mode de traitement pour la langue cible actuelle
st.session_state.system_prompt, st.session_state.operation_prompt = init_process_mode(from_lang=
(
st.session_state.language_detected if "language_detected" in st.session_state.language_detected else convert_language_name_to_iso6391(
st.session_state.interface_language
)
),
to_lang=st.session_state.target_language
)
# display error with st.error ; if (st.session_state.system_prompt, st.session_state.operation_prompt) is "", "" or None, None, and raise error
if (not st.session_state.system_prompt) or (not st.session_state.operation_prompt):
st.error("Erreur : Les prompts système ou d'opération sont vides.")
raise ValueError("Les prompts système ou d'opération ne peuvent pas être vides.")
with st.status(f'({target_language_name}) - {get_translation("traduction_en_cours")}', expanded=True) as response_status:
with st.chat_message("assistant", avatar="👻"):
message_placeholder = st.empty()
st.session_state.response_generator = process_message(
st.session_state.user_input,
st.session_state.operation_prompt,
st.session_state.system_prompt
)
response_status.update(label=f'({target_language_name}) - {get_translation("traduction_en_cours")}', state="running", expanded=True)
for response_chunk in st.session_state.response_generator:
message_placeholder.markdown(response_chunk)
st.session_state.end_response = st.session_state.response_generator.close() # Obtenir la réponse complète à la fin
if st.session_state.full_response != "":
message_placeholder.markdown(st.session_state.full_response)
if st.session_state.enable_tts_for_input_from_text_field:
response_status.update(label=f'({target_language_name}) - {get_translation("traduction_terminee")} ; {get_translation("synthese_vocale_en_cours")}', state="running", expanded=False)
st.session_state.tts_audio, st.session_state.tts_duration = process_tts_message(st.session_state.full_response)
del st.session_state.full_response
if st.session_state.tts_audio:
st.audio(base64.b64decode(st.session_state.tts_audio.encode()), format="audio/mp3", autoplay=False)
st.session_state.audio_list.append((base64.b64decode(st.session_state.tts_audio.encode()), st.session_state.tts_duration))
response_status.update(label=f'({target_language_name}) - {get_translation("traduction_terminee")} ; {get_translation("synthese_vocale_terminee")}', state="complete", expanded=False)
else:
response_status.update(label=f'({target_language_name}) - {get_translation("erreur_synthese_vocale")}', state="error", expanded=False)
else:
response_status.update(label=f'({target_language_name}) - {get_translation("traduction_terminee")}', state="complete", expanded=False)
else:
response_status.update(label=f'({target_language_name}) - {get_translation("erreur_traduction")}', state="error", expanded=False)
if st.session_state.audio_list:
with st.status(f"{get_translation('concatenation_audio_en_cours')}", expanded=False) as audio_status:
audio_status.update(label=f"{get_translation('concatenation_audio_en_cours')}", state="running", expanded=False)
try:
st.session_state.final_audio = concatenate_audio_files(st.session_state.audio_list)
with st.container(border=True):
# Générer un nom de fichier unique
st.session_state.timestamp = time.strftime("%Y%m%d-%H%M%S")
st.session_state.langues = "_".join([lang["iso-639-1"] for lang in st.session_state.selected_languages])
st.session_state.nom_fichier = f"reponse_audio_{st.session_state.langues}_{st.session_state.timestamp}.mp3"
st.audio(st.session_state.final_audio, format="audio/mp3", autoplay=st.session_state.autoplay_tts)
st.download_button(
label=f"📥 {get_translation('telecharger_audio')}",
data=st.session_state.final_audio,
file_name=st.session_state.nom_fichier,
mime="audio/mp3",
use_container_width=True,
type="primary",
key=f"download_button_{st.session_state.langues}_{st.session_state.timestamp}",
)
# ##
audio_status.update(label=f"{get_translation('concatenation_audio_terminee')}", state="complete", expanded=True)
except Exception as e:
st.error(f"{get_translation('erreur_concatenation_audio')} : {str(e)}")
# ##
audio_status.update(label=f"{get_translation('erreur_concatenation_audio')} : {str(e)}", state="error", expanded=True)
#clear_inputs_garbages()
# Interface utilisateur pour l'enregistrement audio
# st.write(f"🗣️ {get_translation('enregistrez_message')}")
def clear_inputs_garbages(sessions_state_list: Optional[list] =
[ 'transcription', 'operation_prompt', 'system_prompt',
'audio_list', 'full_response', 'tts_audio',
'tts_duration', 'timestamp', 'langues',
'nom_fichier', 'final_audio', 'response_generator',
'end_response', 'messages', 'audio', 'user_input' ]
):
def delete_session_state_var(var_name: str):
if f"{var_name}" in st.session_state:
del st.session_state[f"{var_name}"]
for it_var_name in sessions_state_list:
delete_session_state_var(it_var_name)
#clear_inputs_garbages()
main_page()