from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
import os
from dotenv import load_dotenv
load_dotenv()
import time
import requests
import tempfile
import re
from io import BytesIO
from gtts import gTTS
from elevenlabs.client import ElevenLabs
from elevenlabs import play
from pydub import AudioSegment
from groq import Groq
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
import os
from dotenv import load_dotenv
load_dotenv()
import time
import requests
import tempfile
import re
from io import BytesIO
from gtts import gTTS
from elevenlabs.client import ElevenLabs
from elevenlabs import play
from pydub import AudioSegment
from groq import Groq
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
import os
from dotenv import load_dotenv
load_dotenv()
2. 加载API密钥并初始化模型
Plain text
Copy to clipboard
Open code in new window
EnlighterJS 3 Syntax Highlighter
# Initialize the Groq client
client = Groq(api_key=os.getenv('GROQ_API_KEY'))
# Initialize the Groq model for LLM responses
llm = ChatOpenAI(
model_name="gpt-4.1-mini",
openai_api_key=os.getenv("COMET_API_KEY"),
openai_api_base="https://api.cometapi.com/v1"
)
# Set the path to ffmpeg executable
AudioSegment.converter = "/bin/ffmpeg"
# Initialize the Groq client
client = Groq(api_key=os.getenv('GROQ_API_KEY'))
# Initialize the Groq model for LLM responses
llm = ChatOpenAI(
model_name="gpt-4.1-mini",
openai_api_key=os.getenv("COMET_API_KEY"),
openai_api_base="https://api.cometapi.com/v1"
)
# Set the path to ffmpeg executable
AudioSegment.converter = "/bin/ffmpeg"
# Initialize the Groq client
client = Groq(api_key=os.getenv('GROQ_API_KEY'))
# Initialize the Groq model for LLM responses
llm = ChatOpenAI(
model_name="gpt-4.1-mini",
openai_api_key=os.getenv("COMET_API_KEY"),
openai_api_base="https://api.cometapi.com/v1"
)
# Set the path to ffmpeg executable
AudioSegment.converter = "/bin/ffmpeg"
5. **Prioritize user safety**, and clearly instruct them what *not* to do as well.
6. If the situation involves **suicidal thoughts or mental distress**, respond with compassion and direct them to appropriate mental health helplines and safety actions.
If the user's query is not related to an emergency, respond with:
"I can only assist with urgent emergency-related issues. Please contact a general support line for non-emergency questions."
Use an authoritative, supportive tone, short and direct sentences, and tailor your guidance to **urban and rural Indian contexts**.
st.error(f"Error during LLM response generation: {e}")
return"Error"
def get_llm_response(query, chat_history):
try:
template = template = """
You are an experienced Emergency Response Phone Operator trained to handle critical situations in India.
Your role is to guide users calmly and clearly during emergencies involving:
- Medical crises (injuries, heart attacks, etc.)
- Fire incidents
- Police/law enforcement assistance
- Suicide prevention or mental health crises
You must:
1. **Remain calm and assertive**, as if speaking on a phone call.
2. **Ask for and confirm key details** like location, condition of the person, number of people involved, etc.
3. **Provide immediate and practical steps** the user can take before help arrives.
4. **Share accurate, India-based emergency helpline numbers** (e.g., 112, 102, 108, 1091, 1098, 9152987821, etc.).
5. **Prioritize user safety**, and clearly instruct them what *not* to do as well.
6. If the situation involves **suicidal thoughts or mental distress**, respond with compassion and direct them to appropriate mental health helplines and safety actions.
If the user's query is not related to an emergency, respond with:
"I can only assist with urgent emergency-related issues. Please contact a general support line for non-emergency questions."
Use an authoritative, supportive tone, short and direct sentences, and tailor your guidance to **urban and rural Indian contexts**.
**Chat History:** {chat_history}
**User:** {user_query}
"""
prompt = ChatPromptTemplate.from_template(template)
chain = prompt | llm | StrOutputParser()
response_gen = chain.stream({
"chat_history": chat_history,
"user_query": query
})
response_text = ''.join(list(response_gen))
response_text = remove_punctuation(response_text)
# Remove repeated text
response_lines = response_text.split('\n')
unique_lines = list(dict.fromkeys(response_lines)) # Removing duplicates
cleaned_response = '\n'.join(unique_lines)
return cleaned_responseChatbot
except Exception as e:
st.error(f"Error during LLM response generation: {e}")
return "Error"
def get_llm_response(query, chat_history):
try:
template = template = """
You are an experienced Emergency Response Phone Operator trained to handle critical situations in India.
Your role is to guide users calmly and clearly during emergencies involving:
- Medical crises (injuries, heart attacks, etc.)
- Fire incidents
- Police/law enforcement assistance
- Suicide prevention or mental health crises
You must:
1. **Remain calm and assertive**, as if speaking on a phone call.
2. **Ask for and confirm key details** like location, condition of the person, number of people involved, etc.
3. **Provide immediate and practical steps** the user can take before help arrives.
4. **Share accurate, India-based emergency helpline numbers** (e.g., 112, 102, 108, 1091, 1098, 9152987821, etc.).
5. **Prioritize user safety**, and clearly instruct them what *not* to do as well.
6. If the situation involves **suicidal thoughts or mental distress**, respond with compassion and direct them to appropriate mental health helplines and safety actions.
If the user's query is not related to an emergency, respond with:
"I can only assist with urgent emergency-related issues. Please contact a general support line for non-emergency questions."
Use an authoritative, supportive tone, short and direct sentences, and tailor your guidance to **urban and rural Indian contexts**.
**Chat History:** {chat_history}
**User:** {user_query}
"""
prompt = ChatPromptTemplate.from_template(template)
chain = prompt | llm | StrOutputParser()
response_gen = chain.stream({
"chat_history": chat_history,
"user_query": query
})
response_text = ''.join(list(response_gen))
response_text = remove_punctuation(response_text)
# Remove repeated text
response_lines = response_text.split('\n')
unique_lines = list(dict.fromkeys(response_lines)) # Removing duplicates
cleaned_response = '\n'.join(unique_lines)
return cleaned_responseChatbot
except Exception as e:
st.error(f"Error during LLM response generation: {e}")
return "Error"
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')as f:
for chunk in response_stream:
f.write(chunk)
return f.name
except requests.ConnectionError:
st.error("Failed to generate welcome message due to connection error.")
except Exception as e:
st.error(f"Error creating welcome message: {e}")
returnNone
def create_welcome_message():
welcome_text = (
"Hello, you’ve reached the Emergency Help Desk. "
"Please let me know if it's a medical, fire, police, or mental health emergency—"
"I'm here to guide you right away."
)
try:
# Request speech synthesis (streaming generator)
response_stream = tts_client.text_to_speech.convert(
text=welcome_text,
voice_id="JBFqnCBsd6RMkjVDRZzb",
model_id="eleven_multilingual_v2",
output_format="mp3_44100_128",
)
# Save streamed bytes to temp file
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as f:
for chunk in response_stream:
f.write(chunk)
return f.name
except requests.ConnectionError:
st.error("Failed to generate welcome message due to connection error.")
except Exception as e:
st.error(f"Error creating welcome message: {e}")
return None
def create_welcome_message():
welcome_text = (
"Hello, you’ve reached the Emergency Help Desk. "
"Please let me know if it's a medical, fire, police, or mental health emergency—"
"I'm here to guide you right away."
)
try:
# Request speech synthesis (streaming generator)
response_stream = tts_client.text_to_speech.convert(
text=welcome_text,
voice_id="JBFqnCBsd6RMkjVDRZzb",
model_id="eleven_multilingual_v2",
output_format="mp3_44100_128",
)
# Save streamed bytes to temp file
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as f:
for chunk in response_stream:
f.write(chunk)
return f.name
except requests.ConnectionError:
st.error("Failed to generate welcome message due to connection error.")
except Exception as e:
st.error(f"Error creating welcome message: {e}")
return None
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
from audio_recorder_streamlit import audio_recorder
from utils import *
import tempfile
import re # This can be removed if not used
from io import BytesIO
from pydub import AudioSegment
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
from audio_recorder_streamlit import audio_recorder
from utils import *
import tempfile
import re # This can be removed if not used
from io import BytesIO
from pydub import AudioSegment
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import streamlit as st
from audio_recorder_streamlit import audio_recorder
from utils import *
st.title(":blue[Emergency Help Bot] 🚨🚑🆘")
st.sidebar.image('./emergency.jpg', use_column_width=True)
st.title(":blue[Emergency Help Bot] 🚨🚑🆘")
st.sidebar.image('./emergency.jpg', use_column_width=True)
我们将设置会话状态,以跟踪聊天和音频内容
Plain text
Copy to clipboard
Open code in new window
EnlighterJS 3 Syntax Highlighter
if"chat_history"notin st.session_state:
st.session_state.chat_history = []
if"chat_histories"notin st.session_state:
st.session_state.chat_histories = []
if"played_audios"notin st.session_state:
st.session_state.played_audios = {}
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "chat_histories" not in st.session_state:
st.session_state.chat_histories = []
if "played_audios" not in st.session_state:
st.session_state.played_audios = {}
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "chat_histories" not in st.session_state:
st.session_state.chat_histories = []
if "played_audios" not in st.session_state:
st.session_state.played_audios = {}
调用实用程序函数
我们将从应答方创建欢迎信息介绍。这将是我们对话的开始。
Plain text
Copy to clipboard
Open code in new window
EnlighterJS 3 Syntax Highlighter
iflen(st.session_state.chat_history) == 0:
welcome_audio_path = create_welcome_message()
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
if len(st.session_state.chat_history) == 0:
welcome_audio_path = create_welcome_message()
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
st.session_state.played_audios[welcome_audio_path] = False
if len(st.session_state.chat_history) == 0:
welcome_audio_path = create_welcome_message()
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
st.session_state.played_audios[welcome_audio_path] = False
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
if st.button("Start New Chat"):
st.session_state.chat_histories.append(st.session_state.chat_history)
welcome_audio_path = create_welcome_message()
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
if st.button("Start New Chat"):
st.session_state.chat_histories.append(st.session_state.chat_history)
welcome_audio_path = create_welcome_message()
st.session_state.chat_history = [AIMessage(content="Hello, you’ve reached the Emergency Help Desk. Please let me know if it's a medical, fire, police, or mental health emergency—I'm here to guide you right away.", audio_file=welcome_audio_path)]
在应用程序的主页上,我们将以点击播放音频文件的形式直观显示聊天记录
Plain text
Copy to clipboard
Open code in new window
EnlighterJS 3 Syntax Highlighter
for msg in st.session_state.chat_history:
ifisinstance(msg, AIMessage):
with st.chat_message("AI"):
st.audio(msg.audio_file, format="audio/mp3")
else: # HumanMessage
with st.chat_message("user"):
st.audio(msg.audio_file, format="audio/wav")
for msg in st.session_state.chat_history:
if isinstance(msg, AIMessage):
with st.chat_message("AI"):
st.audio(msg.audio_file, format="audio/mp3")
else: # HumanMessage
with st.chat_message("user"):
st.audio(msg.audio_file, format="audio/wav")
for msg in st.session_state.chat_history:
if isinstance(msg, AIMessage):
with st.chat_message("AI"):
st.audio(msg.audio_file, format="audio/mp3")
else: # HumanMessage
with st.chat_message("user"):
st.audio(msg.audio_file, format="audio/wav")
评论留言