RunnableLambda has Run custom functions It provides the ability to do it.
Through this, the developer Define your own function (D) RunnableLambda You can run it using
For example, you can define and run functions that perform tasks such as data preprocessing, calculation, or interaction with external APIs.
How to run custom functions
caution
Custom function RunnableLambda You can use it by wrapping it, but the point to watch out for here There is only one factor that a custom function can receive is.
If you want to implement it as a function that receives multiple arguments, you need to write a wrapper that accepts a single input and releases it with multiple arguments.
Copy
# Configuration file for managing API keys as environment variables
from dotenv import load_dotenv
# Load API key information
load_dotenv()
Copy
True
Copy
# LangSmith set up trackingww. https://smith.langchain.com
# !pip install langchain-teddynote
from langchain_teddynote import logging
# Enter a project name.
logging.langsmith("LCEL-Advanced")
from operator import itemgetter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
def length_function(text): # Function that returns the length of text
return len(text)
def _multiple_length_function(text1, text2): # Function to multiply the lengths of two texts
return len(text1) * len(text2)
def multiple_length_function( # A wrapper function that connects to a function that takes two arguments.
_dict,
): # A function that multiplies the lengths of "text1" and "text2" in a dictionary.
return _multiple_length_function(_dict["text1"], _dict["text2"])
# Create a prompt template
prompt = ChatPromptTemplate.from_template("what is {a} + {b}?")
# Initializing the ChatOpenAI model
model = ChatOpenAI()
# Create a chain by connecting prompts and models
chain1 = prompt | model
# Chain configuration
chain = (
{
"a": itemgetter("input_1") | RunnableLambda(length_function),
"b": {"text1": itemgetter("input_1"), "text2": itemgetter("input_2")}
| RunnableLambda(multiple_length_function),
}
| prompt
| model
| StrOutputParser()
)
# Executes a chain with the given arguments.
chain.invoke({"input_1": "bar", "input_2": "gah"})
'3 + 9 = 12'
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableConfig
import json
def parse_or_fix(text: str, config: RunnableConfig):
# Create a prompt template that modifies the following text.
fixing_chain = (
ChatPromptTemplate.from_template(
"Fix the following text:\n\ntext\n{input}\n\nError: {error}"
" Don't narrate, just respond with the fixed data."
)
| ChatOpenAI()
| StrOutputParser()
)
# You can try up to 3 times.
for _ in range(3):
try:
# Parses text into JSON format.
return json.loads(text)
except Exception as e:
# If an error occurs during parsing, the correction chain is called to correct the text.
text = fixing_chain.invoke({"input": text, "error": e}, config)
print(f"config: {config}")
# If parsing fails, it returns the string "Failed to parse".
return "Failed to parse"
from langchain.callbacks import get_openai_callback
with get_openai_callback() as cb:
# Call the parse_or_fix function using RunnableLambda.
output = RunnableLambda(parse_or_fix).invoke(
input="{foo:: bar}",
config={"tags": ["my-tag"], "callbacks": [cb]}, # Pass the config.
)
# Prints the modified results.
print(f"\n\nThe revised result:\n{output}")