from langchain.prompts import ChatPromptTemplate
from langchain_anthropic import ChatAnthropic
from langchain_ollama import ChatOllama
from langchain_fireworks import ChatFireworks
from experiment_xml import (
pydantic_to_xml_instructions,
run_xml_experiment,
)
ANTHROPIC_API_KEY = "<API KEY>"
FIREWORKS_API_KEY = "<API KEY>"
import streamlit as st
LANGSMITH_API_KEY = st.secrets["api_keys"]["LANGSMITH_API_KEY"]
ANTHROPIC_API_KEY = st.secrets["api_keys"]["ANTHROPIC_API_KEY"]
FIREWORKS_API_KEY = st.secrets["api_keys"]["FIREWORKS_API_KEY"]
experiment_date = "21-02-25"
n_iter = 1
Let's start by creating a LLM model to run our structured output queries. Use a temperature of 0 to improve structured output generation (but at the cost of "creativity").
# LLM parameters
temperature = 0.8
timeout = 30
num_ctx = 8192
num_predict = 4096
llm_models_test = {
"Ollama_llama32": ChatOllama(
model="llama3.2",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_phi3": ChatOllama(
model="phi3",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_deepseekr1": ChatOllama(
model="deepseek-r1",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
}
llm_models = {
"Ollama_llama32": ChatOllama(
model="llama3.2",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_nemotron": ChatOllama(
model="nemotron-mini",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_phi3": ChatOllama(
model="phi3",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_phi4": ChatOllama(
model="phi4",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"Ollama_deepseekr1": ChatOllama(
model="deepseek-r1",
temperature=temperature,
num_ctx=num_ctx,
num_thread=1,
num_predict=num_predict,
),
"fireworks_llama31": ChatFireworks(
model_name="accounts/fireworks/models/llama-v3p1-70b-instruct",
api_key=FIREWORKS_API_KEY,
temperature=temperature,
timeout=timeout,
),
"fireworks_llama32": ChatFireworks(
model_name="accounts/fireworks/models/llama-v3p2-3b-instruct",
api_key=FIREWORKS_API_KEY,
temperature=temperature,
timeout=timeout,
),
"fireworks_llama33": ChatFireworks(
model_name="accounts/fireworks/models/llama-v3p3-70b-instruct",
api_key=FIREWORKS_API_KEY,
temperature=temperature,
timeout=timeout,
),
"fireworks_qwen25": ChatFireworks(
model_name="accounts/fireworks/models/qwen2p5-72b-instruct",
api_key=FIREWORKS_API_KEY,
temperature=temperature,
timeout=timeout,
),
}
llm_models_with_anthropic = {
**llm_models,
"Anthropic_Sonnet_35": ChatAnthropic(
model="claude-3-5-sonnet-20241022",
api_key=ANTHROPIC_API_KEY,
timeout=timeout,
),
"Anthropic_Haiku_35": ChatAnthropic(
model="claude-3-5-haiku-20241022",
api_key=ANTHROPIC_API_KEY,
timeout=timeout,
),
"Anthropic_Haiku_3": ChatAnthropic(
model="claude-3-haiku-20240307",
api_key=ANTHROPIC_API_KEY,
timeout=timeout,
),
}
test_science_prompt_txt = """
You are a professional science writer tasked with responding to members of
the general public who write in asking questions about science.
Write an article responding to a writer's question for publication in a
science magazine intended for a general readership with a high-school education.
You should write clearly and compellingly, include all relavent context,
and provide motivating stories where applicable.
Your response must be less than 200 words.
The question given to you is the following:
{question}
"""
questions = [
"What is the oldest recorded fossil?",
"What is a black hole?",
"How far away is the sun?",
"Which other planet in the Solar System has a surface gravity closest to that of the Earth?",
"Eris, Haumea, Makemake and Ceres are all examples of what?",
"Why does earth have seasons? Do other planets exhibit seasons too?",
"What causes the aurora borealis?",
"Why is the sky blue?",
"How do bees communicate?",
"What is the smallest unit of life?",
"How do plants make their own food?",
"Why do we dream?",
"What is the theory of relativity?",
"How do volcanoes erupt?",
"What is the speed of light?",
"How do magnets work?",
"What is the purpose of DNA?",
"What are the different types of galaxies?",
"Why do some animals hibernate?",
"How do vaccines work?",
]
prompt_direct = ChatPromptTemplate.from_template(test_science_prompt_txt)
prompt_system_format = ChatPromptTemplate.from_messages(
[
(
"system",
"Return a publishable article in the requested format.\n{format_instructions}",
),
("human", test_science_prompt_txt),
]
)
prompt_system_plus_reminder_format = ChatPromptTemplate.from_messages(
[
(
"system",
"Return a publishable article in the requested format.\n{format_instructions}",
),
(
"human",
test_science_prompt_txt + "\nYour response must be in valid XML.",
),
]
)
prompt_user_format = ChatPromptTemplate.from_template(
test_science_prompt_txt + "\n{format_instructions}"
)
from pydantic_xml import BaseXmlModel, element, attr
class ArticleResponse1XML(BaseXmlModel, tag="article"):
"""Structured article for publication answering a reader's question."""
title: str = element(description="Title of the article")
answer: str = element(
description="Provide a detailed description of historical events to answer the question"
)
number: int = element(description="A number that is most relevant to the question.")
class ArticleResponse1nointXML(BaseXmlModel, tag="article"):
"""Structured article for publication answering a reader's question."""
title: str = element(description="Title of the article")
answer: str = element(
description="Provide a detailed description of historical events to answer the question"
)
number: str = element(description="A number that is most relevant to the question.")
# Lists of simple types
class ArticleResponse2XML(BaseXmlModel, tag="article"):
"""Structured article for publication answering a reader's question."""
title: str = element(description="Title of the article")
answer: str = element(description="Answer the writer's question")
further_questions: list[str] = element(
tag="further_question",
description="A list of related questions that may be of interest to the readers.",
)
# Nested types
class HistoricalEventXML(BaseXmlModel):
"""The year and explanation of a historical event."""
year: str = element(description="The year of the historical event")
event: str = element(
description="A clear and concise explanation of what happened in this event"
)
class ArticleResponse3XML(BaseXmlModel, tag="article"):
"""Structured article for publication answering a reader's question."""
title: str = element(description="[Title of the article]")
historical_event_1: HistoricalEventXML = element(
description="A first historical event relevant to the question"
)
historical_event_2: HistoricalEventXML = element(
description="A second historical event relevant to the question"
)
# Lists of custom types
class ArticleResponse4XML(BaseXmlModel, tag="article"):
"""Structured article for publication answering a reader's question."""
title: str = element(description="Title of the article")
historical_timeline: list[HistoricalEventXML] = element(
description="A list of historical events relevant to the question"
)
structured_formats_xml = [
dict(pydantic=schema, format_instructions=pydantic_to_xml_instructions(schema))
for schema in [
ArticleResponse1XML,
ArticleResponse1nointXML,
ArticleResponse2XML,
ArticleResponse3XML,
ArticleResponse4XML,
]
]
from langchain.output_parsers import XMLOutputParser
xml_output_parsers = [
XMLOutputParser(name="article", tags=["article", "title", "answer", "number"]),
XMLOutputParser(
name="article", tags=["article", "title", "answer", "further_question"]
),
XMLOutputParser(
name="article",
tags=[
"article",
"title",
"historical_event_1",
"year",
"event",
"historical_event_2",
"year",
"event",
],
),
XMLOutputParser(
name="article", tags=["article", "title", "historical_event", "year", "event"]
),
]
structured_formats_output_parser_xml = [
dict(pydantic=schema, format_instructions=op.get_format_instructions())
for schema, op in zip(
[
ArticleResponse1nointXML,
ArticleResponse2XML,
ArticleResponse3XML,
ArticleResponse4XML,
],
xml_output_parsers,
)
]
Example prompt
print(pydantic_to_xml_instructions(ArticleResponse2XML))
You must respond only in XML using the following schema. Do not provide any explanation outside the XML. <article> <!--Structured article for publication answering a reader's question.--> <title> {Title of the article - must be type str} </title> <answer> {Answer the writer's question - must be type str} </answer> <!-- First list element --> <further_question> {A list of related questions that may be of interest to the readers. - must be type str} </further_question> <!-- Next list element --> <further_question> {A list of related questions that may be of interest to the readers. - must be type str} </further_question> <!-- Etc --> <further_question> ... </further_question> </article>
System prompt
if "structure_support_by_model_sp" not in locals():
structure_support_by_model_sp = {}
_ = run_xml_experiment(
prompt_system_format,
questions,
llm_models,
structured_formats_xml,
n_iter=1,
results_out=structure_support_by_model_sp,
save_file_name=f"exp5_xml_output_sys_{experiment_date}.pkl",
)
Model: Ollama_llama32 Output: ArticleResponse1XML Pos: 1 Error: ValidationError ..Error: ValidationError .Error: ValidationError .Error: ValidationError ..Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError ..Error: ValidationError .Error: ValidationError .Error: ValidationError .Error: ValidationError . Model: Ollama_llama32 Output: ArticleResponse1nointXML Pos: 2 .................... Model: Ollama_llama32 Output: ArticleResponse2XML Pos: 3 .........Error: XMLSyntaxError ....Error: XMLSyntaxError ....... Model: Ollama_llama32 Output: ArticleResponse3XML Pos: 4 Error: ValidationError ..Error: ValidationError .
User prompt
if "structure_support_by_model_up" not in locals():
structure_support_by_model_up = {}
_ = run_xml_experiment(
prompt_user_format,
questions,
llm_models,
structured_formats_xml,
n_iter=1,
results_out=structure_support_by_model_up,
save_file_name=f"exp5_xml_output_user_{experiment_date}.pkl",
)
System prompt with "format reminder"
if "structure_support_by_model_sprem" not in locals():
structure_support_by_model_sprem = {}
_ = run_xml_experiment(
prompt_system_plus_reminder_format,
questions,
llm_models,
structured_formats_xml,
n_iter=1,
results_out=structure_support_by_model_sprem,
save_file_name=f"exp5_xml_output_sys_w_reminder_{experiment_date}.pkl",
)
Output parsers with system prompt
if "structure_support_by_model_parsers" not in locals():
structure_support_by_model_parsers = {}
_ = run_xml_experiment(
prompt_system_format,
questions,
llm_models_test,
structured_formats_output_parser_xml,
n_iter=1,
results_out=structure_support_by_model_parsers,
save_file_name=f"exp5_xml_output_parser_{experiment_date}.pkl",
)
Extract all error messages & count
import pandas as pd
def results_to_df(ss_results, key="valid"):
df = pd.DataFrame.from_dict(
{
mname: {
tname: ss_results[mname][tname][key] * 100 / n_questions
for tname in ss_results[mname].keys()
}
for mname in ss_results.keys()
},
orient="index",
)
return df
def analyse_errors_from_results(ss_results, method="code"):
error_counts = {}
for mname in ss_results.keys():
error_counts[mname] = {}
for tname in ss_results[mname].keys():
validation_error = 0
json_error = 0
unknown_error = 0
# Count errors by failure code above
if method == "code":
error_types = pd.Series(
output["error_type"]
for output in ss_results[mname][tname]["outputs"]
)
error_codes = error_types.value_counts()
for e_name, e_count in error_codes.items():
error_counts[mname][(tname, e_name)] = e_count
elif method == "parse":
# Count errors by parsing error message
errors = (
output["error_message"]
for output in ss_results[mname][tname]["outputs"]
)
for error in errors:
if error is None:
continue
if error.lower().find("opening and ending tag mismatch") >= 0:
error_str = "XML tag mismatch"
elif error.lower().find("extracterror") >= 0:
error_str = "Missing main tags"
elif error.lower().find("input should be a valid integer") >= 0:
error_str = "Validation error (int)"
elif error.lower().find("premature end of data in tag") >= 0:
error_str = "Premature end"
elif error.lower().find("field required") >= 0:
error_str = "Missing field"
elif error.lower().find("expected '>'") >= 0:
error_str = "Tag malformed"
elif (
error.lower().find("extra content at the end of the document")
>= 0
):
error_str = "Tag malformed"
else:
error_str = error
error_counts[mname][(tname, error_str)] = (
error_counts[mname].get((tname, error_str), 0) + 1
)
else:
raise NameError(f"Method {method} not supported")
return pd.DataFrame.from_dict(error_counts, orient="index")
analyse_errors_from_results(structure_support_by_model_sp, method="parse")
if 0:
for output in structure_support_by_model_sp["Ollama_deepseekr1"][
"ArticleResponse3XML"
]["outputs"]:
if output["error_type"] != "ok":
print(output["error_message"], "\n")
print(output["raw"].content)
results_list = {
"System Prompt": structure_support_by_model_sp,
"System Prompt (FM)": structure_support_by_model_sprem,
"User Prompt": structure_support_by_model_up,
"Output Parsers": structure_support_by_model_parsers,
}
df_results = {}
for name, ss_results in results_list.items():
df_results[name] = pd.DataFrame.from_dict(
{
mname: {
tname: ss_results[mname][tname]["valid"] * 100
for tname in ss_results[mname].keys()
}
for mname in ss_results.keys()
},
orient="index",
)
display(name)
df = pd.concat(df_results).swaplevel(axis=0).sort_index(axis=0)
df