# Set up LangSmith tracking: https:
# !pip install -qU langchain-teddynote
from langchain_teddynote import logging
# Enter a project name.
logging.langsmith("CH02-Prompt")
from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
Method 1. Create a PromptTemplate object using the from_template() method
Variable to be substituted { variable } Define the template by tying it together.
Copy
Copy
country You can create sentences by putting values in variables.
Copy
Copy
Copy
Copy
Copy
Method 2. Create PromptTemplate objects simultaneously with prompt
For further validation input_variables Please specify
These variables cause exceptions when inconsistent compared to the variables in the template string during instantiation.
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
partial_variables : Fill partial variables
partial The general use of is to partially use the function. This use case Variables you always want to bring in a common way This is the case.
Representative example Date or time is.
Let's say you always have a prompt that you want the current date to be displayed. You can't even hardcode the prompt, and passing it along with other input variables is cumbersome. In this case always present Function to return date It is very convenient if you can partially change the prompt using.
The following code is the Python code that saves the date today.
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Copy
Reading template from file
Copy
Copy
If any of the Window users have an error, run with the code below (encoding settings)
Copy
Copy
Copy
Copy
Copy
ChatPromptTemplate
ChatPromptTemplate You can take advantage of it when you want to inject a silver chat list as a prompt.
The message is organized in tuple format, ( role , message ) Can be configured to create a list.
role - "system" : System setting message. This is mainly a prompt related to global settings. - "human" : User input message. - "ai" : AI's answer message.
Copy
Copy
Copy
Copy
Copy
Copy
You can get the result by injecting the message you generated right away.
Copy
Copy
This time, let's create a chain.
Copy
Copy
Copy
MessagePlaceholder
LangChain also has full control over the messages to be rendered during formatting MessagePlaceholder Gives.
It may be useful if you are not sure which role to use for the message prompt template, or if you want to insert a list of messages during formatting.
Copy
Copy
conversation When I want to add a dialog later MessagesPlaceholder You can use
from langchain_core.prompts import PromptTemplate
# template definition. {country} is a variable that will be used as a placeholder for
the value that follows template = "What is the capital of {country}?"
# Create a PromptTemplate object using the from_template method
prompt = PromptTemplate.from_template(template) prompt
# Create a prompt. Use the format method to put values into variables.
prompt = prompt.format(country="South Korea") prompt
'Where is the capital of Korea?'
# Define a template template = "What is the capital of {country}?"
#Create a Prompt Template object using the from_template method
prompt = PromptTemplate.from_template(template)
# Create a chain
#chain = prompt
chain = prompt | llm
# The value entered in the country variable is automatically substituted and executed
chain.invoke("Republic of Korea").content
'The capital of Korea is Seoul.'
# template definition
template = "{country}"What is the capital of {country}?"
# Create a prompt_template using the PromptTemplate object
prompt = PromptTemplate(
template=template,
input_variables=["country"],
)
prompt
# Create a prompt
prompt.format(country="South Korea")
'Where is the capital of Korea?'
# template definition
template = "{country1}class {country2}"Where are the capitals of each?"
# PromptTemplate by using objects prompt_template generation
prompt = PromptTemplate(
template=template,
input_variables=["country1"],
partial_variables={
"country2": "USA" # dictionary in the form of partial_variables to pass on
},
)
prompt
PromptTemplate (input_variables=['country1'], partial_variables={'country2':'USA'},
template='{country1} and {country2}
prompt.format(country1="Canada")
prompt_partial
PromptTemplate (input_variables=['country1'], partial_variables={'country2':'Canada'},
template='{country1} and {country2}
prompt_partial.format(country1="korea")
'Where are the capitals of Korea and Canada?'
chain = prompt_partial | llm
chain.invoke("korea").content
'The capital of Korea is Seoul, and the capital of Canada is Ottawa.'
'The capital of Korea is Seoul and the capital of Australia is Canberra.'
from datetime import datetime
# Print today's date
datetime.now().strftime("%B %d")
'June 20'
prompt = PromptTemplate(
template="Today's date is {today} no see. Celebrities whose birthday is today {n} Please list the names. Please indicate your date of birth.",
input_variables=["n"],
partial_variables={
"today": get_today # dictionary in the form of partial_variables to pass on
},
)
# prompt generation
prompt.format(n=3)
'Today's date is June 20. List three celebrities this birthday today.
Please indicate your date of birth.'
# chain creates.
chain = prompt | llm
# chain after running, check the results.
print(chain.invoke(3).content)
# chain After running, check the results.
print(chain.invoke({"today": "Jan 02", "n": 3}).content)
One. Kate Bosworth -2 January 1983
2. Tia Carrere-2 January 1967
3. Christy Turlington-2 January 1969
from langchain_core.prompts import load_prompt
prompt = load_prompt("prompts/fruit_color.yaml")
prompt
PromptTemplate (input_variables=['fruit'], template='{fruit} What is the color of?')
# from langchain_teddynote.prompts import load_prompt
# # Windows user only: encoding cp949 set to
# load_prompt("prompts/fruit_color.yaml", encoding="cp949")
Please tell us about the capital of Korea.
Organize the features of the capital to the following form.
Please write 300 characters inside and outside.
Please write in Koreanglo.
----
[Form]
One. area
2. population
3. Historical place
4. Specialties
#Answer:
from langchain_core.prompts import ChatPromptTemplate
chat_prompt = ChatPromptTemplate.from_template("{country}what is the capital of?")
chat_prompt
from langchain_core.prompts import ChatPromptTemplate
chat_template = ChatPromptTemplate.from_messages(
[
# role, message
("system", "You are kind AI I am an assistant. Your name is {name} no see."),
("human", "nice to meet you!"),
("ai", "Hello! How can I help you?"),
("human", "{user_input}"),
]
)
# chat message creates.
messages = chat_template.format_messages(
name="teddy", user_input="What is your name?"
)
messages
[SystemMessage (content=' You are a friendly AI assistant. Your name is Teddy.'),
HumanMessage (content='Nice to meet you!'), AIMessage (content='Hello!
What can I help you with?'), HumanMessage (content='What is your name?')]
llm.invoke(messages).content
'My name is Teddy. Please tell me if you need help!'
chain = chat_template | llm
chain.invoke({"name": "Teddy", "user_input": "what is your name?"}).content
'My name is Teddy. How can I help you?'
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
chat_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a summary expert AI You are an assistant. Your job is to summarize the conversation with key words..",
),
MessagesPlaceholder(variable_name="conversation"),
("human", "The conversation so far {word_count} Summarize in words."),
]
)
chat_prompt
ChatPromptTemplate (input_varibles=['conversation','word_count'],
input_types={'conversation': typing.List.AIMessage, langchain_core.mess
Your mission is to summarize the conversation with the main keywords.')),
MessagesPlaceholder (variable_name='conversation'),
HumanMessagePromptTemplate (prompt=PromptTemplate (input_varibles=['word_count'],
template<TA
formatted_chat_prompt = chat_prompt.format(
word_count=5,
conversation=[
("human", "Hello! I'm Teddy, a new employee today. Nice to meet you."),
("ai", "Nice to meet you! I look forward to working with you in the future."),
],
)
print(formatted_chat_prompt)
System: You are a summary expert AI assistant. Your mission is to summarize the conversation with the main keyword.
Human: Hello! I am a new Teddy today. Nice to meet you.
AI: Nice to meet you! Please do well in the future.
Human: Summarize the conversation so far in 5 words.
# chain run and check results
chain.invoke(
{
"word_count": 5,
"conversation": [
(
"human",
"Hello! I'm Teddy, a new employee today. Nice to meet you.",
),
("ai", "Nice to meet you! I look forward to working with you in the future."),
],
}
)