import os
from dotenv import load_dotenv
load_dotenv()
import warnings
'ignore') warnings.filterwarnings(
18 LangChain: Memory
18.1 Outline
- ConversationBufferMemory
- ConversationBufferWindowMemory
- ConversationTokenBufferMemory
- ConversationSummaryMemory
18.2 ConversationBufferMemory
Note: LLM’s do not always produce the same results. When executing the code in your notebook, you may get slightly different answers that those in the video.
# account for deprecation of LLM model
import datetime
# Get the current date
= datetime.datetime.now().date()
current_date
# Define the date after which the model should be set to "gpt-3.5-turbo"
= datetime.date(2024, 6, 12)
target_date
# Set the model variable based on the current date
if current_date > target_date:
= "gpt-3.5-turbo"
llm_model else:
= "gpt-3.5-turbo-0301" llm_model
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
= ChatOpenAI(temperature=0.0, model=llm_model)
llm = ConversationBufferMemory()
memory = ConversationChain(
conversation =llm,
llm= memory,
memory =True
verbose )
input="Hi, my name is Andrew") conversation.predict(
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI:
> Finished chain.
"Hello Andrew! It's nice to meet you. How can I assist you today?"
input="What is 1+1?") conversation.predict(
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI:
> Finished chain.
'1+1 equals 2. Is there anything else you would like to know?'
input="What is my name?") conversation.predict(
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI: 1+1 equals 2. Is there anything else you would like to know?
Human: What is my name?
AI:
> Finished chain.
'Your name is Andrew.'
print(memory.buffer)
Human: Hi, my name is Andrew
AI: Hello Andrew! It's nice to meet you. How can I assist you today?
Human: What is 1+1?
AI: 1+1 equals 2. Is there anything else you would like to know?
Human: What is my name?
AI: Your name is Andrew.
memory.load_memory_variables({})
{'history': "Human: Hi, my name is Andrew\nAI: Hello Andrew! It's nice to meet you. How can I assist you today?\nHuman: What is 1+1?\nAI: 1+1 equals 2. Is there anything else you would like to know?\nHuman: What is my name?\nAI: Your name is Andrew."}
= ConversationBufferMemory() memory
"input": "Hi"},
memory.save_context({"output": "What's up"}) {
print(memory.buffer)
memory.load_memory_variables({})
"input": "Not much, just hanging"},
memory.save_context({"output": "Cool"}) {
memory.load_memory_variables({})
18.3 ConversationBufferWindowMemory
from langchain.memory import ConversationBufferWindowMemory
= ConversationBufferWindowMemory(k=1) memory
"input": "Hi"},
memory.save_context({"output": "What's up"})
{"input": "Not much, just hanging"},
memory.save_context({"output": "Cool"}) {
memory.load_memory_variables({})
= ChatOpenAI(temperature=0.0, model=llm_model)
llm = ConversationBufferWindowMemory(k=1)
memory = ConversationChain(
conversation =llm,
llm= memory,
memory =False
verbose )
input="Hi, my name is Andrew") conversation.predict(
input="What is 1+1?") conversation.predict(
input="What is my name?") conversation.predict(
18.4 ConversationTokenBufferMemory
#!pip install tiktoken
from langchain.memory import ConversationTokenBufferMemory
from langchain.llms import OpenAI
= ChatOpenAI(temperature=0.0, model=llm_model) llm
= ConversationTokenBufferMemory(llm=llm, max_token_limit=50)
memory "input": "AI is what?!"},
memory.save_context({"output": "Amazing!"})
{"input": "Backpropagation is what?"},
memory.save_context({"output": "Beautiful!"})
{"input": "Chatbots are what?"},
memory.save_context({"output": "Charming!"}) {
memory.load_memory_variables({})
18.5 ConversationSummaryMemory
from langchain.memory import ConversationSummaryBufferMemory
# create a long string
= "There is a meeting at 8am with your product team. \
schedule You will need your powerpoint presentation prepared. \
9am-12pm have time to work on your LangChain \
project which will go quickly because Langchain is such a powerful tool. \
At Noon, lunch at the italian resturant with a customer who is driving \
from over an hour away to meet you to understand the latest in AI. \
Be sure to bring your laptop to show the latest LLM demo."
= ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)
memory "input": "Hello"}, {"output": "What's up"})
memory.save_context({"input": "Not much, just hanging"},
memory.save_context({"output": "Cool"})
{"input": "What is on the schedule today?"},
memory.save_context({"output": f"{schedule}"}) {
memory.load_memory_variables({})
= ConversationChain(
conversation =llm,
llm= memory,
memory =True
verbose )
input="What would be a good demo to show?") conversation.predict(
memory.load_memory_variables({})
Reminder: Download your notebook to you local computer to save your work.