Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain.memory import ConversationBufferMemory | |
from models.openai.finetuned_models import finetuned_models, get_finetuned_chain | |
from models.openai.role_models import get_role_chain, role_templates | |
def add_initial_message(model_name, memory): | |
if "Spanish" in model_name: | |
memory.chat_memory.add_ai_message("Hola necesito ayuda") | |
else: | |
memory.chat_memory.add_ai_message("Hi I need help") | |
def clear_memory(memories): | |
for memory in memories: | |
if memory not in st.session_state: | |
st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper') | |
st.session_state[memory].clear() | |
def create_memory_add_initial_message(memories, language): | |
for memory in memories: | |
if memory not in st.session_state: | |
st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper') | |
add_initial_message(language, st.session_state[memory]) | |
if len(st.session_state[memory].buffer_as_messages) < 1: | |
add_initial_message(language, st.session_state[memory]) | |
def get_chain(issue, language, source, memory, temperature): | |
if source in ("Finetuned OpenAI"): | |
OA_engine = finetuned_models[f"{issue}-{language}"] | |
return get_finetuned_chain(OA_engine, memory, temperature) | |
if source in ('OpenAI GPT3.5'): | |
template = role_templates[f"{issue}-{language}"] | |
return get_role_chain(template, memory, temperature) |