from operator import itemgetter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
5 LCEL - Compose
5.1 Contructor
def length_function(text):
return len(text)
def _multiple_length_function(text1, text2):
return len(text1) * len(text2)
def multiple_length_function(_dict):
return _multiple_length_function(_dict["text1"], _dict["text2"])
= ChatOpenAI()
model
= ChatPromptTemplate.from_template("what is {a} + {b}")
prompt
= prompt | model
chain1
= (
chain
{"a": itemgetter("foo") | RunnableLambda(length_function),
"b": {"text1": itemgetter("foo"), "text2": itemgetter("bar")}
| RunnableLambda(multiple_length_function),
}| prompt
| model
)
"foo": "bar", "bar": "gah"}) chain.invoke({
AIMessage(content='3 + 9 = 12', response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 14, 'total_tokens': 21}, 'model_name': 'gpt-3.5-turbo', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5d89d47c-8eda-4f70-b70a-9a953b7e7973-0')
5.2 @chain
Decorator
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import chain
= ChatPromptTemplate.from_template("Tell me a joke about {topic}")
prompt1 = ChatPromptTemplate.from_template("What is the subject of this joke: {joke}")
prompt2
@chain
def custom_chain(text):
= prompt1.invoke({"topic": text})
prompt_val1 = ChatOpenAI().invoke(prompt_val1)
output1 = StrOutputParser().invoke(output1)
parsed_output1 = prompt2 | ChatOpenAI() | StrOutputParser()
chain2 return chain2.invoke({"joke": parsed_output1})
"bears") custom_chain.invoke(
'The subject of this joke is the bear.'