from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
load_dotenv()
True
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
load_dotenv()
True
= ChatPromptTemplate.from_template("tell me a short joke about {topic}")
prompt = ChatOpenAI()
model = StrOutputParser()
output_parser
= prompt | model | output_parser
chain
"topic": "ice cream"}) chain.invoke({
'Why did the ice cream truck break down? Because it had too many "scoops" on board!'
print(prompt.invoke({"topic": "ice cream"}))
messages=[HumanMessage(content='tell me a short joke about ice cream')]
from langchain_core.messages.human import HumanMessage
= [HumanMessage(content='tell me a short joke about ice cream')]
messages model.invoke(messages)
AIMessage(content='Why did the ice cream go to therapy? Because it had too many sprinkles of anxiety!', response_metadata={'token_usage': {'completion_tokens': 19, 'prompt_tokens': 15, 'total_tokens': 34}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-7f790959-6dd6-4ffc-bd43-d4f0608dafe0-0')
# Bitwise OR
= 5 # Binary: 0101
x = 3 # Binary: 0011
y
= x | y # Bitwise OR
result print(result) # Output: 7 (Binary: 0111)
7
from abc import ABC, abstractmethod
class CRunnable(ABC):
def __init__(self):
self.next = None
@abstractmethod
def process(self, data):
"""
This method must be implemented by subclasses to define
data processing behavior.
"""
pass
def invoke(self, data):
if self.next is not None:
return self.next.invoke(processed_data)
return processed_data
def __or__(self, other):
return CRunnableSequence(self, other)
class CRunnableSequence(CRunnable):
def __init__(self, first, second):
super().__init__()
self.first = first
self.second = second
def process(self, data):
return data
def invoke(self, data):
= self.first.invoke(data)
first_result return self.second.invoke(first_result)
class AddTen(CRunnable):
def process(self, data):
print("AddTen: ", data)
return data + 10
class MultiplyByTwo(CRunnable):
def process(self, data):
print("Multiply by 2: ", data)
return data * 2
class ConvertToString(CRunnable):
def process(self, data):
print("Convert to string: ", data)
return f"Result: {data}"
= AddTen()
a = MultiplyByTwo()
b = ConvertToString()
c
= a | b | c chain
= chain.invoke(10)
result print(result)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[20], line 1 ----> 1 result = chain.invoke(10) 2 print(result) Cell In[17], line 34, in CRunnableSequence.invoke(self, data) 33 def invoke(self, data): ---> 34 first_result = self.first.invoke(data) 35 return self.second.invoke(first_result) Cell In[17], line 34, in CRunnableSequence.invoke(self, data) 33 def invoke(self, data): ---> 34 first_result = self.first.invoke(data) 35 return self.second.invoke(first_result) Cell In[17], line 19, in CRunnable.invoke(self, data) 17 if self.next is not None: 18 return self.next.invoke(processed_data) ---> 19 return processed_data NameError: name 'processed_data' is not defined
from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableParallel
= RunnablePassthrough() | RunnablePassthrough () | RunnablePassthrough ()
chain "hello") chain.invoke(
'hello'
def input_to_upper(input: str):
= input.upper()
output return output
= RunnablePassthrough() | RunnableLambda(input_to_upper) | RunnablePassthrough()
chain "hello") chain.invoke(
'HELLO'
= RunnableParallel({"x": RunnablePassthrough(), "y": RunnablePassthrough()}) chain
"hello") chain.invoke(
{'x': 'hello', 'y': 'hello'}
"input": "hello", "input2": "goodbye"}) chain.invoke({
{'x': {'input': 'hello', 'input2': 'goodbye'},
'y': {'input': 'hello', 'input2': 'goodbye'}}
= RunnableParallel({"x": RunnablePassthrough(), "y": lambda z: z["input2"]}) chain
"input": "hello", "input2": "goodbye"}) chain.invoke({
{'x': {'input': 'hello', 'input2': 'goodbye'}, 'y': 'goodbye'}
def find_keys_to_uppercase(input: dict):
= input.get("input", "not found").upper()
output return output
= RunnableParallel({"x": RunnablePassthrough() | RunnableLambda(find_keys_to_uppercase), "y": lambda z: z["input2"]}) chain
"input": "hello", "input2": "goodbye"}) chain.invoke({
{'x': 'HELLO', 'y': 'goodbye'}
= RunnableParallel({"x": RunnablePassthrough()})
chain
def assign_func(input):
return 100
def multiply(input):
return input * 10
"input": "hello", "input2": "goodbye"}) chain.invoke({
{'x': {'input': 'hello', 'input2': 'goodbye'}}
= RunnableParallel({"x": RunnablePassthrough()}).assign(extra=RunnableLambda(assign_func)) chain
= chain.invoke({"input": "hello", "input2": "goodbye"})
result print(result)
{'x': {'input': 'hello', 'input2': 'goodbye'}, 'extra': 100}
def extractor(input: dict):
return input.get("extra", "Key not found")
def cupper(upper: str):
return str(upper).upper()
= RunnableLambda(extractor) | RunnableLambda(cupper) new_chain
"extra": "test"}) new_chain.invoke({
'TEST'
= chain | new_chain
final_chain "input": "hello", "input2": "goodbye"}) final_chain.invoke({
'100'
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
= FAISS.from_texts(
vectorstore "Cats love thuna"], embedding=OpenAIEmbeddings()
[
)= vectorstore.as_retriever()
retriever = """Answer the question based only on the following context:
template {context}
Question: {question}
"""
= ChatPromptTemplate.from_template(template=template)
prompt
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
= (
rag_chain "context": retriever | format_docs, "question": RunnablePassthrough()})
RunnableParallel({| prompt
| ChatOpenAI()
| StrOutputParser()
)
"Eat") retriever.invoke(
[Document(page_content='Cats love thuna')]
"What do cats like to eat?") rag_chain.invoke(
'Tuna'