import os
from dotenv import load_dotenv
load_dotenv()
import warnings
warnings.filterwarnings('ignore')22 LangChain: Memory
22.1 Outline
- ConversationBufferMemory
- ConversationBufferWindowMemory
- ConversationTokenBufferMemory
- ConversationSummaryMemory
22.2 ConversationBufferMemory
Note: LLM’s do not always produce the same results. When executing the code in your notebook, you may get slightly different answers that those in the video.
# account for deprecation of LLM model
import datetime
# Get the current date
current_date = datetime.datetime.now().date()
# Define the date after which the model should be set to "gpt-3.5-turbo"
target_date = datetime.date(2024, 6, 12)
# Set the model variable based on the current date
if current_date > target_date:
llm_model = "gpt-3.5-turbo"
else:
llm_model = "gpt-3.5-turbo-0301"from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemoryllm = ChatOpenAI(temperature=0.0, model=llm_model)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True
)conversation.predict(input="Hi, my name is Andrew")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI:
> Finished chain.
"Hello Andrew! It's nice to meet you. How can I assist you today?"
conversation.predict(input="What is 1+1?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI:
> Finished chain.
'1+1 equals 2. Is there anything else you would like to know?'
conversation.predict(input="What is my name?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI: 1+1 equals 2. Is there anything else you would like to know?
Human: What is my name?
AI:
> Finished chain.
'Your name is Andrew.'
print(memory.buffer)Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI: 1+1 equals 2. Is there anything else you would like to know?
Human: What is my name?
AI: Your name is Andrew.
memory.load_memory_variables({}){'history': "Human: Hi, my name is Andrew\nAI: Hello Andrew! It's nice to meet you. How can I assist you today?\nHuman: What is 1+1?\nAI: 1+1 equals 2. Is there anything else you would like to know?\nHuman: What is my name?\nAI: Your name is Andrew."}
memory = ConversationBufferMemory()memory.save_context({"input": "Hi"},
{"output": "What's up"})print(memory.buffer)memory.load_memory_variables({})memory.save_context({"input": "Not much, just hanging"},
{"output": "Cool"})memory.load_memory_variables({})22.3 ConversationBufferWindowMemory
from langchain.memory import ConversationBufferWindowMemorymemory = ConversationBufferWindowMemory(k=1) memory.save_context({"input": "Hi"},
{"output": "What's up"})
memory.save_context({"input": "Not much, just hanging"},
{"output": "Cool"})memory.load_memory_variables({})llm = ChatOpenAI(temperature=0.0, model=llm_model)
memory = ConversationBufferWindowMemory(k=1)
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=False
)conversation.predict(input="Hi, my name is Andrew")conversation.predict(input="What is 1+1?")conversation.predict(input="What is my name?")22.4 ConversationTokenBufferMemory
#!pip install tiktokenfrom langchain.memory import ConversationTokenBufferMemory
from langchain.llms import OpenAI
llm = ChatOpenAI(temperature=0.0, model=llm_model)memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=50)
memory.save_context({"input": "AI is what?!"},
{"output": "Amazing!"})
memory.save_context({"input": "Backpropagation is what?"},
{"output": "Beautiful!"})
memory.save_context({"input": "Chatbots are what?"},
{"output": "Charming!"})memory.load_memory_variables({})22.5 ConversationSummaryMemory
from langchain.memory import ConversationSummaryBufferMemory# create a long string
schedule = "There is a meeting at 8am with your product team. \
You will need your powerpoint presentation prepared. \
9am-12pm have time to work on your LangChain \
project which will go quickly because Langchain is such a powerful tool. \
At Noon, lunch at the italian resturant with a customer who is driving \
from over an hour away to meet you to understand the latest in AI. \
Be sure to bring your laptop to show the latest LLM demo."
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)
memory.save_context({"input": "Hello"}, {"output": "What's up"})
memory.save_context({"input": "Not much, just hanging"},
{"output": "Cool"})
memory.save_context({"input": "What is on the schedule today?"},
{"output": f"{schedule}"})memory.load_memory_variables({})conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True
)conversation.predict(input="What would be a good demo to show?")memory.load_memory_variables({})Reminder: Download your notebook to you local computer to save your work.