How to customize attributes of traces
Oftentimes, you will want to customize various attributes of the traces you log to LangSmith.
Logging to a specific project
As mentioned in the Concepts section, LangSmith uses the concept of a Project
to group traces. If left unspecified, the tracer project is set to default
. You can set the LANGCHAIN_PROJECT
environment variable to configure a custom project name for an entire application run. This should be done before executing your program.
export LANGCHAIN_PROJECT="My Project"
Changing the destination project at runtime
When global environment variables are too broad, you can also set the project name at program runtime. This is useful when you want to log traces to different projects within the same application.
- Python
- TypeScript
- LangChain (Python)
- LangChain (JS)
import openai
from langsmith import traceable
from langsmith.run_trees import RunTree
client = openai.Client()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
# Use the @traceable decorator with the 'project_name' parameter to log traces to LangSmith
# Ensure that the LANGCHAIN_TRACING_V2 environment variables are set for @traceable to work
@traceable(
run_type="llm",
name="OpenAI Call Decorator",
)
def call_openai(
messages: list[dict], model: str = "gpt-3.5-turbo"
) -> str:
return client.chat.completions.create(
model=model,
messages=messages,
).choices[0].message.content
# You can specify the Project via the project_name parameter
call_openai(
messages,
langsmith_extra={"project_name": "My Project"},
)
# The wrapped OpenAI client accepts all the same langsmith_extra parameters
# as @traceable decorated functions, and logs traces to langsmith automatically
from langsmith import wrappers
wrapped_client = wrappers.wrap_openai(client)
wrapped_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
langsmith_extra={"project_name": "My Project"},
)
# Alternatively, create a RunTree object
# You can set the project name using the project_name parameter
rt = RunTree(
run_type="llm",
name="OpenAI Call RunTree",
inputs={"messages": messages},
project_name="My Project"
)
chat_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
)
# End and submit the run
await rt.end(outputs=chat_completion)
rt.post()
import OpenAI from "openai";
import { RunTree } from "langsmith";
const client = new OpenAI()
const messages = [
{role: "system", content: "You are a helpful assistant."},
{role: "user", content: "Hello!"}
]
// Create a RunTree object
// You can set the project name using the project_name parameter
const rt = new RunTree({
run_type: "llm",
name: "OpenAI Call RunTree",
inputs: { messages },
project_name: "My Project"
})
await rt.postRun();
const chatCompletion = await client.chat.completions.create({
model: "gpt-3.5-turbo",
messages: messages,
});
// End and submit the run
await rt.end(chatCompletion);
await rt.patchRun();
# You can set the project name for a specific tracer instance:
from langchain.callbacks.tracers import LangChainTracer
tracer = LangChainTracer(project_name="My Project")
chain.invoke({"query": "How many people live in canada as of 2023?"}, config={"callbacks": [tracer]})
# LangChain python also supports a context manager for tracing a specific block of code.
# You can set the project name using the project_name parameter.
from langchain_core.tracers.context import tracing_v2_enabled
with tracing_v2_enabled(project_name="My Project"):
chain.invoke({"query": "How many people live in canada as of 2023?"})
// You can set the project name for a specific tracer instance:
import { LangChainTracer } from "langchain/callbacks";
const tracer = new LangChainTracer({ projectName: "My Project" });
await chain.invoke(
{
query: "How many people live in canada as of 2023?"
},
{ callbacks: [tracer] }
);
Adding metadata and tags to traces
LangSmith supports sending arbitrary metadata and tags along with traces. This is useful for associating additional information with a trace, such as the environment in which it was executed, or the user who initiated it. For more information on metadata and tags, see the Concepts page. For information on how to query traces and runs by metadata and tags, see the Querying Traces page.
- Python
- TypeScript
- LangChain (Python)
- LangChain (JS)
import openai
from langsmith import traceable
from langsmith.run_trees import RunTree
client = openai.Client()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
# Use the @traceable decorator with tags and metadata
# Ensure that the LANGCHAIN_TRACING_V2 environment variables are set for @traceable to work
@traceable(
run_type="llm",
name="OpenAI Call Decorator",
tags=["my-tag"],
metadata={"my-key": "my-value"}
)
def call_openai(
messages: list[dict], model: str = "gpt-3.5-turbo"
) -> str:
return client.chat.completions.create(
model=model,
messages=messages,
).choices[0].message.content
call_openai(
messages,
# You can provide tags and metadata at invocation time
# via the langsmith_extra parameter
langsmith_extra={"tags": ["my-other-tag"], "metadata": {"my-other-key": "my-value"}}
)
# Alternatively, you can create a RunTree object with tags and metadata
rt = RunTree(
run_type="llm",
name="OpenAI Call RunTree",
inputs={"messages": messages},
tags=["my-tag"],
extra={"metadata": {"my-key": "my-value"}}
)
chat_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
)
# End and submit the run
await rt.end(outputs=chat_completion)
rt.post()
import OpenAI from "openai";
import { RunTree } from "langsmith";
const client = new OpenAI();
const messages = [
{role: "system", content: "You are a helpful assistant."},
{role: "user", content: "Hello!"}
]
// Create a RunTree object
const rt = new RunTree({
run_type: "llm",
name: "OpenAI Call RunTree",
inputs: { messages },
tags: ["my-tag"],
extra: {metadata: {"my-key": "my-value"}}
})
await rt.postRun();
const chatCompletion = await client.chat.completions.create({
model: "gpt-3.5-turbo",
messages: messages,
});
// End and submit the run
await rt.end(chatCompletion)
await rt.patchRun()
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful AI."),
("user", "{input}")
])
chat_model = ChatOpenAI()
output_parser = StrOutputParser()
# Tags and metadata can be configured with RunnableConfig
chain = (prompt | chat_model | output_parser).with_config({"tags": ["top-level-tag"], "metadata": {"top-level-key": "top-level-value"}})
# Tags and metadata can also be passed at runtime
chain.invoke({"input": "What is the meaning of life?"}, {"tags": ["shared-tags"], "metadata": {"shared-key": "shared-value"}})
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful AI."],
["user", "{input}"]
])
const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" });
const outputParser = new StringOutputParser();
// Tags and metadata can be configured with RunnableConfig
const chain = (prompt.pipe(model).pipe(outputParser)).withConfig({"tags": ["top-level-tag"], "metadata": {"top-level-key": "top-level-value"}});
// Tags and metadata can also be passed at runtime
await chain.invoke({input: "What is the meaning of life?"}, {tags: ["shared-tags"], metadata: {"shared-key": "shared-value"}})
Customizing the run name
When you create a run, you can specify a name for the run. This name is used to identify the run in LangSmith and can be used to filter and group runs. The name is also used as the title of the run in the LangSmith UI.
- Python
- TypeScript
- LangChain (Python)
- LangChain (JS)
import openai
from langsmith import traceable
from langsmith.run_trees import RunTree
client = openai.Client()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
# Use the @traceable decorator with the name parameter
# Ensure that the LANGCHAIN_TRACING_V2 environment variables are set for @traceable to work
@traceable(
run_type="llm",
name="OpenAI Call Decorator",
)
def call_openai(
messages: list[dict], model: str = "gpt-3.5-turbo"
) -> str:
return client.chat.completions.create(
model=model,
messages=messages,
).choices[0].message.content
call_openai(messages)
# Alternatively, use the name parameter of the RunTree object
rt = RunTree(
run_type="llm",
name="OpenAI Call RunTree",
inputs={"messages": messages},
)
chat_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
)
# End and submit the run
await rt.end(outputs=chat_completion)
rt.post()
import OpenAI from "openai";
import { RunTree } from "langsmith";
const client = new OpenAI();
const messages = [
{role: "system", content: "You are a helpful assistant."},
{role: "user", content: "Hello!"}
]
// Create a RunTree object
const rt = new RunTree({
run_type: "llm",
name: "OpenAI Call RunTree",
inputs: { messages },
})
await rt.postRun()
const chatCompletion = await client.chat.completions.create({
model: "gpt-3.5-turbo",
messages: messages,
});
// End and submit the run
await rt.end(chatCompletion)
await rt.patchRun()
# When tracing within LangChain, run names default to the class name of the traced object (e.g., 'ChatOpenAI').
# (Note: this is not currently supported directly on LLM objects.)
...
configured_chain = chain.with_config({"run_name": "MyCustomChain"})
configured_chain.invoke({"query": "What is the meaning of life?"})
// When tracing within LangChain, run names default to the class name of the traced object (e.g., 'ChatOpenAI').
// (Note: this is not currently supported directly on LLM objects.)
...
const configuredChain = chain.withConfig({ runName: "MyCustomChain" });
await configuredChain.invoke({query: "What is the meaning of life?"});
For more examples of with LangChain, check out the recipe on customizing run names.
Updating a run
The following fields can be updated when patching a run with the SDK or API.
end_time
:datetime.datetime
error
:str | None
outputs
:dict | None
events
:list[dict] | None
Masking inputs and outputs
In some situations, you may need to hide the inputs and outputs of your traces for privacy or security reasons. LangSmith provides a way to filter the inputs and outputs of your traces before they are sent to the LangSmith backend, so our servers never see the original values.
If you want to completely hide the inputs and outputs of your traces, you can set the following environment variables when running your application:
LANGCHAIN_HIDE_INPUTS=true
LANGCHAIN_HIDE_OUTPUTS=true
This works for both the LangSmith SDK and LangChain.
You can also customize and override this behavior for a given Client instance. This can be done by setting the hide_inputs
and hide_outputs
parameters on the Client
object.
- Python
- TypeScript
- LangChain (Python)
- LangChain (JS)
import openai
from langsmith import Client
from langsmith.wrappers import wrap_openai
openai_client = wrap_openai(openai.Client())
langsmith_client = Client(
hide_inputs=lambda inputs: {}, hide_outputs=lambda outputs: {}
)
# The linked run will have its metadata present, but the inputs will be hidden
openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
langsmith_extra={"client": langsmith_client},
)
# The linked run will not have hidden inputs and outputs
openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
)
import { Client } from "langsmith";
import { wrapOpenAI } from "langsmith/wrappers";
import OpenAI from "openai";
const langsmithClient = new Client({
hideInputs: (inputs) => ({}),
hideOutputs: (outputs) => ({}),
});
(async () => {
// The linked run will have its metadata present, but the inputs will be hidden
const filteredOAIClient = wrapOpenAI(new OpenAI(), {
client: langsmithClient,
});
await filteredOAIClient.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello!" },
],
});
const openaiClient = wrapOpenAI(new OpenAI());
// The linked run will not have hidden inputs and outputs
await openaiClient.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello!" },
],
});
})();
from langchain_core.tracers.context import tracing_v2_enabled
from langchain_openai import ChatOpenAI
from langsmith import Client
def filter_inputs(inputs: dict):
# You can define custom filtering here
return {}
def filter_outputs(outputs: dict):
# You can define custom filtering here
return {}
llm = ChatOpenAI()
# You can configure tracing using the context manager below
# or by directly creating a LangChainTracer object
with tracing_v2_enabled(
"test-filtering",
client=Client(hide_inputs=filter_inputs, hide_outputs=filter_outputs),
) as cb:
llm.invoke("Say foo")
# The linked run will have its metadata present, but the inputs will be hidden
print(cb.get_run_url())
with tracing_v2_enabled("test-filtering", client=Client()) as cb:
llm.invoke("Say bar")
# The linked run will not have hidden inputs and outputs
print(cb.get_run_url())
import { ChatOpenAI } from "@langchain/openai";
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
import { Client} from "langsmith";
(async () => {
const llm = new ChatOpenAI();
const client = new Client({
hideInputs: (inputs) => ({}),
hideOutputs: (outputs) => ({}),
})
const tracer = new LangChainTracer({ client });
// The traced run will have its metadata present, but the inputs will be hidden
await llm.invoke("Say foo", { callbacks: [tracer] });
const unfilteredTracer = new LangChainTracer();
// The traced run will not have hidden inputs and outputs
await llm.invoke("Say bar", { callbacks: [unfilteredTracer] });
})();