[{"data":1,"prerenderedAt":2424},["ShallowReactive",2],{"\u002Fblog\u002Fllm-cost-monitoring-navigation":3,"\u002Fblog\u002Fllm-cost-monitoring":232,"$Vs9m8Q-E34":2326},[4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124,128,132,136,140,144,148,152,156,160,164,168,172,176,180,184,188,192,196,200,204,208,212,216,220,224,228],{"title":5,"path":6,"stem":7},"Blog","\u002Fblog","blog\u002Findex",{"title":9,"path":10,"stem":11},"AI Agent Observability Explained: Key Concepts and Standards","\u002Fblog\u002Fai-agent-observability","blog\u002Fai-agent-observability",{"title":13,"path":14,"stem":15},"From Traditional Monitoring to AI-Enhanced Observability","\u002Fblog\u002Fai-enhanced-observability","blog\u002Fai-enhanced-observability",{"title":17,"path":18,"stem":19},"Application Performance Monitoring (APM) Guide for DevOps Teams in 2025","\u002Fblog\u002Fapplication-performance-monitoring","blog\u002Fapplication-performance-monitoring",{"title":21,"path":22,"stem":23},"Integrating CI\u002FCD Pipelines with Observability Tools","\u002Fblog\u002Fcicd-observability-integration","blog\u002Fcicd-observability-integration",{"title":25,"path":26,"stem":27},"Cloud Microservices Monitoring on AWS and Azure with OpenTelemetry","\u002Fblog\u002Fcloud-microservices-monitoring","blog\u002Fcloud-microservices-monitoring",{"title":29,"path":30,"stem":31},"Guide to Crontab Logs - How to Find and Read Crontab Logs","\u002Fblog\u002Fcrontab-logs","blog\u002Fcrontab-logs",{"title":33,"path":34,"stem":35},"13 Best DataDog Competitors in 2025: Complete Comparison Guide","\u002Fblog\u002Fdatadog-competitors","blog\u002Fdatadog-competitors",{"title":37,"path":38,"stem":39},"Is Datadog Worth the Price? An In-Depth Cost Analysis","\u002Fblog\u002Fdatadog-pricing","blog\u002Fdatadog-pricing",{"title":41,"path":42,"stem":43},"Debugging Microservices in Production with Distributed Tracing","\u002Fblog\u002Fdebugging-microservices","blog\u002Fdebugging-microservices",{"title":45,"path":46,"stem":47},"Docker Logs Command Reference: tail, follow, since Options","\u002Fblog\u002Fdocker-logs-tail","blog\u002Fdocker-logs-tail",{"title":49,"path":50,"stem":51},"What Is Full Stack Observability and Why Is It Important?","\u002Fblog\u002Ffull-stack-observability","blog\u002Ffull-stack-observability",{"title":53,"path":54,"stem":55},"go-redis joins Redis org on GitHub","\u002Fblog\u002Fgo-redis-v9","blog\u002Fgo-redis-v9",{"title":57,"path":58,"stem":59},"Go Context timeouts can be harmful","\u002Fblog\u002Fgolang-context-timeout","blog\u002Fgolang-context-timeout",{"title":61,"path":62,"stem":63},"Golang Functional Options are named args on steroids","\u002Fblog\u002Fgolang-functional-options","blog\u002Fgolang-functional-options",{"title":65,"path":66,"stem":67},"Tips on writing JSON REST APIs in Go","\u002Fblog\u002Fgolang-json-rest-api","blog\u002Fgolang-json-rest-api",{"title":69,"path":70,"stem":71},"Golang Logging Libraries in 2025","\u002Fblog\u002Fgolang-logging","blog\u002Fgolang-logging",{"title":73,"path":74,"stem":75},"Golang memory arenas [101 guide]","\u002Fblog\u002Fgolang-memory-arena","blog\u002Fgolang-memory-arena",{"title":77,"path":78,"stem":79},"Golang Monitoring using OpenTelemetry","\u002Fblog\u002Fgolang-monitoring","blog\u002Fgolang-monitoring",{"title":81,"path":82,"stem":83},"How to Choose an APM Solution: 5 Critical Questions for 2025","\u002Fblog\u002Fhow-to-choose-an-apm","blog\u002Fhow-to-choose-an-apm",{"title":85,"path":86,"stem":87},"Kafka Event-Driven Microservices: Monitoring and Observability","\u002Fblog\u002Fkafka-event-driven-microservices","blog\u002Fkafka-event-driven-microservices",{"title":89,"path":90,"stem":91},"kubectl logs Command Reference and Documentation","\u002Fblog\u002Fkubectl-logs","blog\u002Fkubectl-logs",{"title":93,"path":94,"stem":95},"Kubernetes Microservices Monitoring and Observability with OpenTelemetry","\u002Fblog\u002Fkubernetes-microservices-monitoring","blog\u002Fkubernetes-microservices-monitoring",{"title":97,"path":98,"stem":99},"LangChain Observability: Monitoring Guide for Production Apps","\u002Fblog\u002Flangchain-observability","blog\u002Flangchain-observability",{"title":101,"path":102,"stem":103},"LLM Cost Monitoring with OpenTelemetry","\u002Fblog\u002Fllm-cost-monitoring","blog\u002Fllm-cost-monitoring",{"title":105,"path":106,"stem":107},"Microservices Architecture: Patterns, Design Principles, and Observability","\u002Fblog\u002Fmicroservices-architecture","blog\u002Fmicroservices-architecture",{"title":109,"path":110,"stem":111},"Monitoring Polyglot Microservices: Python, Node.js, and Go with OpenTelemetry","\u002Fblog\u002Fmonitoring-polyglot-microservices","blog\u002Fmonitoring-polyglot-microservices",{"title":113,"path":114,"stem":115},"What Does No Healthy Upstream Mean and How to Fix It","\u002Fblog\u002Fno-healthy-upstream","blog\u002Fno-healthy-upstream",{"title":117,"path":118,"stem":119},"Node.js Performance Monitoring Guide","\u002Fblog\u002Fnodejs-monitoring","blog\u002Fnodejs-monitoring",{"title":121,"path":122,"stem":123},"6 Free & Open-Source Log Management Tools in 2026","\u002Fblog\u002Fopen-source-log-management","blog\u002Fopen-source-log-management",{"title":125,"path":126,"stem":127},"OpenTelemetry for AI Systems: LLM and Agent Observability (2026)","\u002Fblog\u002Fopentelemetry-ai-systems","blog\u002Fopentelemetry-ai-systems",{"title":129,"path":130,"stem":131},"Top OpenTelemetry Backends for Storage & Visualization","\u002Fblog\u002Fopentelemetry-backend","blog\u002Fopentelemetry-backend",{"title":133,"path":134,"stem":135},"12 OpenTelemetry-Compatible Platforms You Should Know in 2025","\u002Fblog\u002Fopentelemetry-compatible-platforms","blog\u002Fopentelemetry-compatible-platforms",{"title":137,"path":138,"stem":139},"Monitoring cache stats using OpenTelemetry Go Metrics","\u002Fblog\u002Fopentelemetry-go-metrics-cache-stats","blog\u002Fopentelemetry-go-metrics-cache-stats",{"title":141,"path":142,"stem":143},"Instrument Go app with OpenTelemetry Tracing","\u002Fblog\u002Fopentelemetry-go-tracing","blog\u002Fopentelemetry-go-tracing",{"title":145,"path":146,"stem":147},"OpenTelemetry Java Agent for Spring Boot: Complete Setup Guide","\u002Fblog\u002Fopentelemetry-java-agent-spring-boot","blog\u002Fopentelemetry-java-agent-spring-boot",{"title":149,"path":150,"stem":151},"OpenTelemetry Manual Instrumentation in Java: Custom Spans & Business Metrics","\u002Fblog\u002Fopentelemetry-java-manual-instrumentation","blog\u002Fopentelemetry-java-manual-instrumentation",{"title":153,"path":154,"stem":155},"What is Performance Engineering and How it can Reduce costs?","\u002Fblog\u002Fperformance-engineering","blog\u002Fperformance-engineering",{"title":157,"path":158,"stem":159},"Uptrace Pricing Update — April 2026","\u002Fblog\u002Fpricing-update-april-2026","blog\u002Fpricing-update-april-2026",{"title":161,"path":162,"stem":163},"Prometheus Monitoring in 5 Minutes: Set Up Your First Alert","\u002Fblog\u002Fprometheus-monitoring","blog\u002Fprometheus-monitoring",{"title":165,"path":166,"stem":167},"How to Use FastAPI [Detailed Python Guide]","\u002Fblog\u002Fpython-fastapi","blog\u002Fpython-fastapi",{"title":169,"path":170,"stem":171},"Python Logging Config: dictConfig, QueueHandler & Thread Safety","\u002Fblog\u002Fpython-logging","blog\u002Fpython-logging",{"title":173,"path":174,"stem":175},"How to Monitor RabbitMQ","\u002Fblog\u002Frabbitmq-monitoring","blog\u002Frabbitmq-monitoring",{"title":177,"path":178,"stem":179},"Redis Monitoring Performance Metrics","\u002Fblog\u002Fredis-monitoring","blog\u002Fredis-monitoring",{"title":181,"path":182,"stem":183},"What is Single Pane of Glass? Solution to Unified IT Management","\u002Fblog\u002Fsingle-pane-of-glass","blog\u002Fsingle-pane-of-glass",{"title":185,"path":186,"stem":187},"Defining SLA\u002FSLO-Driven Monitoring Requirements in 2025","\u002Fblog\u002Fsla-slo-monitoring-requirements","blog\u002Fsla-slo-monitoring-requirements",{"title":189,"path":190,"stem":191},"Splunk Pricing & Costs: Free vs Enterprise","\u002Fblog\u002Fsplunk-pricing","blog\u002Fsplunk-pricing",{"title":193,"path":194,"stem":195},"Monitoring Spring Boot Microservices with Actuator, Micrometer, and OpenTelemetry","\u002Fblog\u002Fspring-boot-microservices-monitoring","blog\u002Fspring-boot-microservices-monitoring",{"title":197,"path":198,"stem":199},"How to Become an SRE Engineer","\u002Fblog\u002Fsre-engineer-career","blog\u002Fsre-engineer-career",{"title":201,"path":202,"stem":203},"Evaluating Synthetic Monitoring Platforms: What to Look for in 2025","\u002Fblog\u002Fsynthetic-monitoring","blog\u002Fsynthetic-monitoring",{"title":205,"path":206,"stem":207},"Syslog Implementation: Servers, Integration and Best Practices","\u002Fblog\u002Fsyslog-guide","blog\u002Fsyslog-guide",{"title":209,"path":210,"stem":211},"A Developer's Framework for Selecting the Right Tracing Vendor","\u002Fblog\u002Ftracing-vendor-framework","blog\u002Ftracing-vendor-framework",{"title":213,"path":214,"stem":215},"Install ZFS on Ubuntu [Debian]","\u002Fblog\u002Fubuntu-install-zfs","blog\u002Fubuntu-install-zfs",{"title":217,"path":218,"stem":219},"How to Fix \"Upstream Connect Error\" in 7 Different Contexts","\u002Fblog\u002Fupstream-connect-error","blog\u002Fupstream-connect-error",{"title":221,"path":222,"stem":223},"Uptrace v1.6 is available","\u002Fblog\u002Fuptrace-v16","blog\u002Fuptrace-v16",{"title":225,"path":226,"stem":227},"Uptrace v1.7 is released","\u002Fblog\u002Fuptrace-v17","blog\u002Fuptrace-v17",{"title":229,"path":230,"stem":231},"Uptrace v2.0: The Future of Observability is Here","\u002Fblog\u002Fuptrace-v20","blog\u002Fuptrace-v20",{"page":233,"surround":2321},{"id":234,"title":101,"author":235,"author_site":236,"body":237,"date":2308,"description":2309,"extension":2310,"image":2311,"meta":2312,"navigation":567,"path":102,"seo":2319,"stem":103,"__hash__":2320},"blog\u002Fblog\u002Fllm-cost-monitoring.md","abandurchin",null,{"type":238,"value":239,"toc":2297},"minimark",[240,244,247,259,264,273,276,287,293,299,303,306,454,463,466,470,482,652,655,663,679,683,686,1318,1324,1328,1331,1809,1812,1816,1824,2110,2116,2120,2130,2180,2187,2191,2200,2216,2231,2242,2250,2254,2257,2269,2278,2284,2293],[241,242,243],"p",{},"Teams running LLM applications in production face a cost problem that traditional APM tools were never designed to solve. CPU and memory costs are relatively predictable — a web service processing 1,000 requests per second costs roughly the same week over week. LLM API costs are not. A single user session can cost $0.01 or $5 depending on prompt length, model choice, conversation history, and how many retries happen inside your chain. Without instrumentation, cost anomalies are invisible until the monthly invoice.",[241,245,246],{},"The standard pattern: a team launches a feature using GPT-5, everything looks fine in staging, and then production traffic reveals that a small percentage of requests trigger long multi-turn conversations that cost 50× more than the average. By the time the bill arrives, the cost has already happened.",[241,248,249,250,254,255,258],{},"OpenTelemetry's GenAI semantic conventions solve this at the instrumentation layer. The ",[251,252,253],"code",{},"gen_ai.usage.input_tokens"," and ",[251,256,257],{},"gen_ai.usage.output_tokens"," attributes are captured automatically per API call, giving you token-level visibility that you can turn into dollar figures, per-request cost breakdowns, and budget alerts — using the same observability stack you already have.",[260,261,263],"h2",{"id":262},"why-standard-apm-misses-llm-costs","Why Standard APM Misses LLM Costs",[241,265,266,267,272],{},"Traditional ",[268,269,271],"a",{"href":270},"\u002Fopentelemetry\u002Fapm","APM"," tracks latency, error rates, and throughput. These metrics are meaningful for LLM applications too, but they say nothing about financial cost. A request that takes 3 seconds and costs $0.002 looks identical in APM to one that takes 3 seconds and costs $0.40. Both have the same latency. Only token counts tell you the difference.",[241,274,275],{},"Three things make LLM costs hard to track without dedicated instrumentation:",[241,277,278,282,283,286],{},[279,280,281],"strong",{},"Token consumption is buried inside SDK calls."," Unless you manually read ",[251,284,285],{},"response.usage"," after every API call and record it somewhere, the data never appears in your traces or metrics. Most applications don't do this consistently.",[241,288,289,292],{},[279,290,291],{},"Costs happen across chained calls."," A LangChain agent might make 8 OpenAI calls to answer a single user question. The cost of the full interaction is the sum of all 8, but standard tracing only shows individual requests — not their aggregate cost under a parent operation.",[241,294,295,298],{},[279,296,297],{},"Model prices vary widely and change."," GPT-5.4 costs 12× more per input token than GPT-5.4-nano ($2.50 vs $0.20 per 1M tokens). Reasoning models like o3 and o4-mini bill internal \"thinking\" tokens that never appear in the response but still cost money. If your application conditionally uses different models, you need model-level attribution to understand your cost structure.",[260,300,302],{"id":301},"llm-pricing-reference","LLM Pricing Reference",[241,304,305],{},"Current pricing for the most common models (April 2026 — always verify against provider docs as prices change):",[307,308,309,328],"table",{},[310,311,312],"thead",{},[313,314,315,319,322,325],"tr",{},[316,317,318],"th",{},"Model",[316,320,321],{},"Input (per 1M tokens)",[316,323,324],{},"Output (per 1M tokens)",[316,326,327],{},"Notes",[329,330,331,346,360,374,387,401,415,428,442],"tbody",{},[313,332,333,337,340,343],{},[334,335,336],"td",{},"gpt-5.4",[334,338,339],{},"$2.50",[334,341,342],{},"$15.00",[334,344,345],{},"OpenAI flagship (Mar 2026)",[313,347,348,351,354,357],{},[334,349,350],{},"gpt-5",[334,352,353],{},"$1.25",[334,355,356],{},"$10.00",[334,358,359],{},"Good balance of cost and capability",[313,361,362,365,368,371],{},[334,363,364],{},"gpt-5.4-mini",[334,366,367],{},"$0.75",[334,369,370],{},"$4.50",[334,372,373],{},"Mid-tier, good for most tasks",[313,375,376,379,382,384],{},[334,377,378],{},"gpt-5.4-nano",[334,380,381],{},"$0.20",[334,383,353],{},[334,385,386],{},"Lowest cost in GPT-5.4 family",[313,388,389,392,395,398],{},[334,390,391],{},"o3",[334,393,394],{},"$2.00",[334,396,397],{},"$8.00",[334,399,400],{},"Reasoning model — see note below",[313,402,403,406,409,412],{},[334,404,405],{},"o4-mini",[334,407,408],{},"$1.10",[334,410,411],{},"$4.40",[334,413,414],{},"Compact reasoning model",[313,416,417,420,423,425],{},[334,418,419],{},"claude-sonnet-4.6",[334,421,422],{},"$3.00",[334,424,342],{},[334,426,427],{},"Anthropic recommended",[313,429,430,433,436,439],{},[334,431,432],{},"claude-haiku-4.5",[334,434,435],{},"$1.00",[334,437,438],{},"$5.00",[334,440,441],{},"Anthropic budget tier",[313,443,444,447,449,451],{},[334,445,446],{},"gemini-2.5-pro",[334,448,353],{},[334,450,356],{},[334,452,453],{},"Contexts under 200K tokens",[241,455,456,459,460,462],{},[279,457,458],{},"Reasoning models (o3, o4-mini) require special handling."," These models use internal \"reasoning tokens\" during inference that are billed as output tokens but not returned in the response. ",[251,461,257],{}," includes these hidden tokens, so actual cost can be significantly higher than visible completion length suggests. Set conservative alert thresholds for o-series models and treat output token counts as an upper bound on reasoning effort.",[241,464,465],{},"Output tokens are consistently more expensive than input tokens — 4–8× for most models. Applications generating long completions (code, detailed explanations) have very different cost profiles from those producing short factual answers.",[260,467,469],{"id":468},"capturing-token-usage-with-opentelemetry","Capturing Token Usage with OpenTelemetry",[241,471,472,473,476,477,254,479,481],{},"The ",[251,474,475],{},"opentelemetry-instrumentation-openai-v2"," package automatically records ",[251,478,253],{},[251,480,257],{}," on every span. No manual response parsing required:",[483,484,489],"pre",{"className":485,"code":486,"language":487,"meta":488,"style":488},"language-python shiki shiki-themes github-light","from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n\nprovider = TracerProvider()\nprovider.add_span_processor(\n    BatchSpanProcessor(OTLPSpanExporter())\n)\ntrace.set_tracer_provider(provider)\n\nOpenAIInstrumentor().instrument()\n\n# All subsequent OpenAI calls are automatically traced with token counts\nfrom openai import OpenAI\nclient = OpenAI()\n","python","",[251,490,491,510,523,536,549,562,569,581,587,593,599,605,610,616,621,628,641],{"__ignoreMap":488},[492,493,496,500,504,507],"span",{"class":494,"line":495},"line",1,[492,497,499],{"class":498},"sD7c4","from",[492,501,503],{"class":502},"sgsFI"," opentelemetry.instrumentation.openai_v2 ",[492,505,506],{"class":498},"import",[492,508,509],{"class":502}," OpenAIInstrumentor\n",[492,511,513,515,518,520],{"class":494,"line":512},2,[492,514,499],{"class":498},[492,516,517],{"class":502}," opentelemetry ",[492,519,506],{"class":498},[492,521,522],{"class":502}," trace\n",[492,524,526,528,531,533],{"class":494,"line":525},3,[492,527,499],{"class":498},[492,529,530],{"class":502}," opentelemetry.sdk.trace ",[492,532,506],{"class":498},[492,534,535],{"class":502}," TracerProvider\n",[492,537,539,541,544,546],{"class":494,"line":538},4,[492,540,499],{"class":498},[492,542,543],{"class":502}," opentelemetry.sdk.trace.export ",[492,545,506],{"class":498},[492,547,548],{"class":502}," BatchSpanProcessor\n",[492,550,552,554,557,559],{"class":494,"line":551},5,[492,553,499],{"class":498},[492,555,556],{"class":502}," opentelemetry.exporter.otlp.proto.grpc.trace_exporter ",[492,558,506],{"class":498},[492,560,561],{"class":502}," OTLPSpanExporter\n",[492,563,565],{"class":494,"line":564},6,[492,566,568],{"emptyLinePlaceholder":567},true,"\n",[492,570,572,575,578],{"class":494,"line":571},7,[492,573,574],{"class":502},"provider ",[492,576,577],{"class":498},"=",[492,579,580],{"class":502}," TracerProvider()\n",[492,582,584],{"class":494,"line":583},8,[492,585,586],{"class":502},"provider.add_span_processor(\n",[492,588,590],{"class":494,"line":589},9,[492,591,592],{"class":502},"    BatchSpanProcessor(OTLPSpanExporter())\n",[492,594,596],{"class":494,"line":595},10,[492,597,598],{"class":502},")\n",[492,600,602],{"class":494,"line":601},11,[492,603,604],{"class":502},"trace.set_tracer_provider(provider)\n",[492,606,608],{"class":494,"line":607},12,[492,609,568],{"emptyLinePlaceholder":567},[492,611,613],{"class":494,"line":612},13,[492,614,615],{"class":502},"OpenAIInstrumentor().instrument()\n",[492,617,619],{"class":494,"line":618},14,[492,620,568],{"emptyLinePlaceholder":567},[492,622,624],{"class":494,"line":623},15,[492,625,627],{"class":626},"sAwPA","# All subsequent OpenAI calls are automatically traced with token counts\n",[492,629,631,633,636,638],{"class":494,"line":630},16,[492,632,499],{"class":498},[492,634,635],{"class":502}," openai ",[492,637,506],{"class":498},[492,639,640],{"class":502}," OpenAI\n",[492,642,644,647,649],{"class":494,"line":643},17,[492,645,646],{"class":502},"client ",[492,648,577],{"class":498},[492,650,651],{"class":502}," OpenAI()\n",[241,653,654],{},"Each span now carries the token breakdown:",[483,656,661],{"className":657,"code":659,"language":660},[658],"language-text","Span: gen_ai.operation.name = \"chat\"\n  gen_ai.system              = \"openai\"\n  gen_ai.request.model       = \"gpt-5\"\n  gen_ai.usage.input_tokens  = 312\n  gen_ai.usage.output_tokens = 87\n  gen_ai.response.finish_reason = \"stop\"\n","text",[251,662,659],{"__ignoreMap":488},[241,664,665,666,669,670,673,674,678],{},"For Anthropic, the equivalent package is ",[251,667,668],{},"opentelemetry-instrumentation-anthropic",". Both emit the same ",[251,671,672],{},"gen_ai.*"," attributes, so your queries and dashboards work across providers. For full setup instructions and available options, see the ",[268,675,677],{"href":676},"\u002Fguides\u002Fopentelemetry-openai","OpenAI instrumentation guide",".",[260,680,682],{"id":681},"calculating-cost-per-request","Calculating Cost Per Request",[241,684,685],{},"With token counts on spans, cost calculation is straightforward. Add it as a custom span attribute so it's queryable alongside everything else:",[483,687,689],{"className":485,"code":688,"language":487,"meta":488,"style":488},"from opentelemetry import trace\n\n# Keep pricing in one place — update when providers change rates\nMODEL_PRICING = {\n    \"gpt-5.4\":              {\"input\": 2.50,  \"output\": 15.00},\n    \"gpt-5\":                {\"input\": 1.25,  \"output\": 10.00},\n    \"gpt-5.4-mini\":         {\"input\": 0.75,  \"output\": 4.50},\n    \"gpt-5.4-nano\":         {\"input\": 0.20,  \"output\": 1.25},\n    \"o3\":                   {\"input\": 2.00,  \"output\": 8.00},\n    \"o4-mini\":              {\"input\": 1.10,  \"output\": 4.40},\n    \"claude-sonnet-4-6\":    {\"input\": 3.00,  \"output\": 15.00},\n    \"claude-haiku-4-5\":     {\"input\": 1.00,  \"output\": 5.00},\n    \"gemini-2.5-pro\":       {\"input\": 1.25,  \"output\": 10.00},\n}\n\ndef calculate_cost_usd(model: str, input_tokens: int, output_tokens: int) -> float:\n    pricing = MODEL_PRICING.get(model, {\"input\": 0.0, \"output\": 0.0})\n    return (input_tokens * pricing[\"input\"] + output_tokens * pricing[\"output\"]) \u002F 1_000_000\n\ntracer = trace.get_tracer(__name__)\n\ndef chat_with_cost(prompt: str, model: str = \"gpt-5\") -> str:\n    with tracer.start_as_current_span(\"llm.chat\") as span:\n        span.set_attribute(\"gen_ai.request.model\", model)\n\n        response = client.chat.completions.create(\n            model=model,\n            messages=[{\"role\": \"user\", \"content\": prompt}]\n        )\n\n        input_tokens = response.usage.prompt_tokens\n        output_tokens = response.usage.completion_tokens\n        cost = calculate_cost_usd(model, input_tokens, output_tokens)\n\n        span.set_attribute(\"gen_ai.usage.input_tokens\", input_tokens)\n        span.set_attribute(\"gen_ai.usage.output_tokens\", output_tokens)\n        span.set_attribute(\"llm.cost.usd\", cost)\n\n        return response.choices[0].message.content\n",[251,690,691,701,705,710,722,754,780,806,830,856,881,906,932,956,961,965,1000,1032,1073,1078,1094,1099,1128,1149,1161,1166,1177,1189,1216,1222,1227,1238,1249,1260,1265,1276,1287,1298,1303],{"__ignoreMap":488},[492,692,693,695,697,699],{"class":494,"line":495},[492,694,499],{"class":498},[492,696,517],{"class":502},[492,698,506],{"class":498},[492,700,522],{"class":502},[492,702,703],{"class":494,"line":512},[492,704,568],{"emptyLinePlaceholder":567},[492,706,707],{"class":494,"line":525},[492,708,709],{"class":626},"# Keep pricing in one place — update when providers change rates\n",[492,711,712,716,719],{"class":494,"line":538},[492,713,715],{"class":714},"sYu0t","MODEL_PRICING",[492,717,718],{"class":498}," =",[492,720,721],{"class":502}," {\n",[492,723,724,728,731,734,737,740,743,746,748,751],{"class":494,"line":551},[492,725,727],{"class":726},"sYBdl","    \"gpt-5.4\"",[492,729,730],{"class":502},":              {",[492,732,733],{"class":726},"\"input\"",[492,735,736],{"class":502},": ",[492,738,739],{"class":714},"2.50",[492,741,742],{"class":502},",  ",[492,744,745],{"class":726},"\"output\"",[492,747,736],{"class":502},[492,749,750],{"class":714},"15.00",[492,752,753],{"class":502},"},\n",[492,755,756,759,762,764,766,769,771,773,775,778],{"class":494,"line":564},[492,757,758],{"class":726},"    \"gpt-5\"",[492,760,761],{"class":502},":                {",[492,763,733],{"class":726},[492,765,736],{"class":502},[492,767,768],{"class":714},"1.25",[492,770,742],{"class":502},[492,772,745],{"class":726},[492,774,736],{"class":502},[492,776,777],{"class":714},"10.00",[492,779,753],{"class":502},[492,781,782,785,788,790,792,795,797,799,801,804],{"class":494,"line":571},[492,783,784],{"class":726},"    \"gpt-5.4-mini\"",[492,786,787],{"class":502},":         {",[492,789,733],{"class":726},[492,791,736],{"class":502},[492,793,794],{"class":714},"0.75",[492,796,742],{"class":502},[492,798,745],{"class":726},[492,800,736],{"class":502},[492,802,803],{"class":714},"4.50",[492,805,753],{"class":502},[492,807,808,811,813,815,817,820,822,824,826,828],{"class":494,"line":583},[492,809,810],{"class":726},"    \"gpt-5.4-nano\"",[492,812,787],{"class":502},[492,814,733],{"class":726},[492,816,736],{"class":502},[492,818,819],{"class":714},"0.20",[492,821,742],{"class":502},[492,823,745],{"class":726},[492,825,736],{"class":502},[492,827,768],{"class":714},[492,829,753],{"class":502},[492,831,832,835,838,840,842,845,847,849,851,854],{"class":494,"line":589},[492,833,834],{"class":726},"    \"o3\"",[492,836,837],{"class":502},":                   {",[492,839,733],{"class":726},[492,841,736],{"class":502},[492,843,844],{"class":714},"2.00",[492,846,742],{"class":502},[492,848,745],{"class":726},[492,850,736],{"class":502},[492,852,853],{"class":714},"8.00",[492,855,753],{"class":502},[492,857,858,861,863,865,867,870,872,874,876,879],{"class":494,"line":595},[492,859,860],{"class":726},"    \"o4-mini\"",[492,862,730],{"class":502},[492,864,733],{"class":726},[492,866,736],{"class":502},[492,868,869],{"class":714},"1.10",[492,871,742],{"class":502},[492,873,745],{"class":726},[492,875,736],{"class":502},[492,877,878],{"class":714},"4.40",[492,880,753],{"class":502},[492,882,883,886,889,891,893,896,898,900,902,904],{"class":494,"line":601},[492,884,885],{"class":726},"    \"claude-sonnet-4-6\"",[492,887,888],{"class":502},":    {",[492,890,733],{"class":726},[492,892,736],{"class":502},[492,894,895],{"class":714},"3.00",[492,897,742],{"class":502},[492,899,745],{"class":726},[492,901,736],{"class":502},[492,903,750],{"class":714},[492,905,753],{"class":502},[492,907,908,911,914,916,918,921,923,925,927,930],{"class":494,"line":607},[492,909,910],{"class":726},"    \"claude-haiku-4-5\"",[492,912,913],{"class":502},":     {",[492,915,733],{"class":726},[492,917,736],{"class":502},[492,919,920],{"class":714},"1.00",[492,922,742],{"class":502},[492,924,745],{"class":726},[492,926,736],{"class":502},[492,928,929],{"class":714},"5.00",[492,931,753],{"class":502},[492,933,934,937,940,942,944,946,948,950,952,954],{"class":494,"line":612},[492,935,936],{"class":726},"    \"gemini-2.5-pro\"",[492,938,939],{"class":502},":       {",[492,941,733],{"class":726},[492,943,736],{"class":502},[492,945,768],{"class":714},[492,947,742],{"class":502},[492,949,745],{"class":726},[492,951,736],{"class":502},[492,953,777],{"class":714},[492,955,753],{"class":502},[492,957,958],{"class":494,"line":618},[492,959,960],{"class":502},"}\n",[492,962,963],{"class":494,"line":623},[492,964,568],{"emptyLinePlaceholder":567},[492,966,967,970,974,977,980,983,986,989,991,994,997],{"class":494,"line":630},[492,968,969],{"class":498},"def",[492,971,973],{"class":972},"s7eDp"," calculate_cost_usd",[492,975,976],{"class":502},"(model: ",[492,978,979],{"class":714},"str",[492,981,982],{"class":502},", input_tokens: ",[492,984,985],{"class":714},"int",[492,987,988],{"class":502},", output_tokens: ",[492,990,985],{"class":714},[492,992,993],{"class":502},") -> ",[492,995,996],{"class":714},"float",[492,998,999],{"class":502},":\n",[492,1001,1002,1005,1007,1010,1013,1015,1017,1020,1023,1025,1027,1029],{"class":494,"line":643},[492,1003,1004],{"class":502},"    pricing ",[492,1006,577],{"class":498},[492,1008,1009],{"class":714}," MODEL_PRICING",[492,1011,1012],{"class":502},".get(model, {",[492,1014,733],{"class":726},[492,1016,736],{"class":502},[492,1018,1019],{"class":714},"0.0",[492,1021,1022],{"class":502},", ",[492,1024,745],{"class":726},[492,1026,736],{"class":502},[492,1028,1019],{"class":714},[492,1030,1031],{"class":502},"})\n",[492,1033,1035,1038,1041,1044,1047,1049,1052,1055,1058,1060,1062,1064,1067,1070],{"class":494,"line":1034},18,[492,1036,1037],{"class":498},"    return",[492,1039,1040],{"class":502}," (input_tokens ",[492,1042,1043],{"class":498},"*",[492,1045,1046],{"class":502}," pricing[",[492,1048,733],{"class":726},[492,1050,1051],{"class":502},"] ",[492,1053,1054],{"class":498},"+",[492,1056,1057],{"class":502}," output_tokens ",[492,1059,1043],{"class":498},[492,1061,1046],{"class":502},[492,1063,745],{"class":726},[492,1065,1066],{"class":502},"]) ",[492,1068,1069],{"class":498},"\u002F",[492,1071,1072],{"class":714}," 1_000_000\n",[492,1074,1076],{"class":494,"line":1075},19,[492,1077,568],{"emptyLinePlaceholder":567},[492,1079,1081,1084,1086,1089,1092],{"class":494,"line":1080},20,[492,1082,1083],{"class":502},"tracer ",[492,1085,577],{"class":498},[492,1087,1088],{"class":502}," trace.get_tracer(",[492,1090,1091],{"class":714},"__name__",[492,1093,598],{"class":502},[492,1095,1097],{"class":494,"line":1096},21,[492,1098,568],{"emptyLinePlaceholder":567},[492,1100,1102,1104,1107,1110,1112,1115,1117,1119,1122,1124,1126],{"class":494,"line":1101},22,[492,1103,969],{"class":498},[492,1105,1106],{"class":972}," chat_with_cost",[492,1108,1109],{"class":502},"(prompt: ",[492,1111,979],{"class":714},[492,1113,1114],{"class":502},", model: ",[492,1116,979],{"class":714},[492,1118,718],{"class":498},[492,1120,1121],{"class":726}," \"gpt-5\"",[492,1123,993],{"class":502},[492,1125,979],{"class":714},[492,1127,999],{"class":502},[492,1129,1131,1134,1137,1140,1143,1146],{"class":494,"line":1130},23,[492,1132,1133],{"class":498},"    with",[492,1135,1136],{"class":502}," tracer.start_as_current_span(",[492,1138,1139],{"class":726},"\"llm.chat\"",[492,1141,1142],{"class":502},") ",[492,1144,1145],{"class":498},"as",[492,1147,1148],{"class":502}," span:\n",[492,1150,1152,1155,1158],{"class":494,"line":1151},24,[492,1153,1154],{"class":502},"        span.set_attribute(",[492,1156,1157],{"class":726},"\"gen_ai.request.model\"",[492,1159,1160],{"class":502},", model)\n",[492,1162,1164],{"class":494,"line":1163},25,[492,1165,568],{"emptyLinePlaceholder":567},[492,1167,1169,1172,1174],{"class":494,"line":1168},26,[492,1170,1171],{"class":502},"        response ",[492,1173,577],{"class":498},[492,1175,1176],{"class":502}," client.chat.completions.create(\n",[492,1178,1180,1184,1186],{"class":494,"line":1179},27,[492,1181,1183],{"class":1182},"sqxcx","            model",[492,1185,577],{"class":498},[492,1187,1188],{"class":502},"model,\n",[492,1190,1192,1195,1197,1200,1203,1205,1208,1210,1213],{"class":494,"line":1191},28,[492,1193,1194],{"class":1182},"            messages",[492,1196,577],{"class":498},[492,1198,1199],{"class":502},"[{",[492,1201,1202],{"class":726},"\"role\"",[492,1204,736],{"class":502},[492,1206,1207],{"class":726},"\"user\"",[492,1209,1022],{"class":502},[492,1211,1212],{"class":726},"\"content\"",[492,1214,1215],{"class":502},": prompt}]\n",[492,1217,1219],{"class":494,"line":1218},29,[492,1220,1221],{"class":502},"        )\n",[492,1223,1225],{"class":494,"line":1224},30,[492,1226,568],{"emptyLinePlaceholder":567},[492,1228,1230,1233,1235],{"class":494,"line":1229},31,[492,1231,1232],{"class":502},"        input_tokens ",[492,1234,577],{"class":498},[492,1236,1237],{"class":502}," response.usage.prompt_tokens\n",[492,1239,1241,1244,1246],{"class":494,"line":1240},32,[492,1242,1243],{"class":502},"        output_tokens ",[492,1245,577],{"class":498},[492,1247,1248],{"class":502}," response.usage.completion_tokens\n",[492,1250,1252,1255,1257],{"class":494,"line":1251},33,[492,1253,1254],{"class":502},"        cost ",[492,1256,577],{"class":498},[492,1258,1259],{"class":502}," calculate_cost_usd(model, input_tokens, output_tokens)\n",[492,1261,1263],{"class":494,"line":1262},34,[492,1264,568],{"emptyLinePlaceholder":567},[492,1266,1268,1270,1273],{"class":494,"line":1267},35,[492,1269,1154],{"class":502},[492,1271,1272],{"class":726},"\"gen_ai.usage.input_tokens\"",[492,1274,1275],{"class":502},", input_tokens)\n",[492,1277,1279,1281,1284],{"class":494,"line":1278},36,[492,1280,1154],{"class":502},[492,1282,1283],{"class":726},"\"gen_ai.usage.output_tokens\"",[492,1285,1286],{"class":502},", output_tokens)\n",[492,1288,1290,1292,1295],{"class":494,"line":1289},37,[492,1291,1154],{"class":502},[492,1293,1294],{"class":726},"\"llm.cost.usd\"",[492,1296,1297],{"class":502},", cost)\n",[492,1299,1301],{"class":494,"line":1300},38,[492,1302,568],{"emptyLinePlaceholder":567},[492,1304,1306,1309,1312,1315],{"class":494,"line":1305},39,[492,1307,1308],{"class":498},"        return",[492,1310,1311],{"class":502}," response.choices[",[492,1313,1314],{"class":714},"0",[492,1316,1317],{"class":502},"].message.content\n",[241,1319,472,1320,1323],{},[251,1321,1322],{},"llm.cost.usd"," attribute is now queryable in your observability backend: filter by model, sum over time ranges, group by service or user.",[260,1325,1327],{"id":1326},"tracking-total-cost-per-agent-run","Tracking Total Cost per Agent Run",[241,1329,1330],{},"When a single user operation triggers multiple LLM calls — a LangChain agent, a multi-step chain, any orchestrated workflow — you want the total cost of the full interaction, not just individual calls. Wrap the operation in a parent span and aggregate:",[483,1332,1334],{"className":485,"code":1333,"language":487,"meta":488,"style":488},"def run_research_agent(question: str, user_id: str) -> str:\n    with tracer.start_as_current_span(\"agent.run\") as parent_span:\n        parent_span.set_attribute(\"app.user_id\", user_id)\n        parent_span.set_attribute(\"app.operation\", \"research\")\n\n        total_cost = 0.0\n        total_input_tokens = 0\n        total_output_tokens = 0\n\n        # Step 1: decompose the question (cheap model)\n        with tracer.start_as_current_span(\"agent.decompose\") as span:\n            response = client.chat.completions.create(\n                model=\"gpt-5.4-nano\",\n                messages=[{\"role\": \"user\", \"content\": f\"Break this into sub-questions: {question}\"}]\n            )\n            step_cost = calculate_cost_usd(\n                \"gpt-5.4-nano\",\n                response.usage.prompt_tokens,\n                response.usage.completion_tokens\n            )\n            span.set_attribute(\"llm.cost.usd\", step_cost)\n            total_cost += step_cost\n            total_input_tokens += response.usage.prompt_tokens\n            total_output_tokens += response.usage.completion_tokens\n            sub_questions = response.choices[0].message.content\n\n        # Step 2: answer each sub-question (full model)\n        with tracer.start_as_current_span(\"agent.answer\") as span:\n            response = client.chat.completions.create(\n                model=\"gpt-5\",\n                messages=[{\"role\": \"user\", \"content\": sub_questions}]\n            )\n            step_cost = calculate_cost_usd(\n                \"gpt-5\",\n                response.usage.prompt_tokens,\n                response.usage.completion_tokens\n            )\n            span.set_attribute(\"llm.cost.usd\", step_cost)\n            total_cost += step_cost\n            total_input_tokens += response.usage.prompt_tokens\n            total_output_tokens += response.usage.completion_tokens\n            answer = response.choices[0].message.content\n\n        # Record totals on the parent span\n        parent_span.set_attribute(\"llm.cost.usd\", total_cost)\n        parent_span.set_attribute(\"llm.total_input_tokens\", total_input_tokens)\n        parent_span.set_attribute(\"llm.total_output_tokens\", total_output_tokens)\n\n        return answer\n",[251,1335,1336,1359,1375,1386,1400,1404,1414,1424,1433,1437,1442,1458,1467,1480,1522,1527,1537,1544,1549,1554,1558,1568,1579,1588,1597,1610,1614,1619,1634,1642,1653,1674,1678,1686,1693,1697,1701,1705,1713,1721,1730,1739,1753,1758,1764,1774,1785,1796,1801],{"__ignoreMap":488},[492,1337,1338,1340,1343,1346,1348,1351,1353,1355,1357],{"class":494,"line":495},[492,1339,969],{"class":498},[492,1341,1342],{"class":972}," run_research_agent",[492,1344,1345],{"class":502},"(question: ",[492,1347,979],{"class":714},[492,1349,1350],{"class":502},", user_id: ",[492,1352,979],{"class":714},[492,1354,993],{"class":502},[492,1356,979],{"class":714},[492,1358,999],{"class":502},[492,1360,1361,1363,1365,1368,1370,1372],{"class":494,"line":512},[492,1362,1133],{"class":498},[492,1364,1136],{"class":502},[492,1366,1367],{"class":726},"\"agent.run\"",[492,1369,1142],{"class":502},[492,1371,1145],{"class":498},[492,1373,1374],{"class":502}," parent_span:\n",[492,1376,1377,1380,1383],{"class":494,"line":525},[492,1378,1379],{"class":502},"        parent_span.set_attribute(",[492,1381,1382],{"class":726},"\"app.user_id\"",[492,1384,1385],{"class":502},", user_id)\n",[492,1387,1388,1390,1393,1395,1398],{"class":494,"line":538},[492,1389,1379],{"class":502},[492,1391,1392],{"class":726},"\"app.operation\"",[492,1394,1022],{"class":502},[492,1396,1397],{"class":726},"\"research\"",[492,1399,598],{"class":502},[492,1401,1402],{"class":494,"line":551},[492,1403,568],{"emptyLinePlaceholder":567},[492,1405,1406,1409,1411],{"class":494,"line":564},[492,1407,1408],{"class":502},"        total_cost ",[492,1410,577],{"class":498},[492,1412,1413],{"class":714}," 0.0\n",[492,1415,1416,1419,1421],{"class":494,"line":571},[492,1417,1418],{"class":502},"        total_input_tokens ",[492,1420,577],{"class":498},[492,1422,1423],{"class":714}," 0\n",[492,1425,1426,1429,1431],{"class":494,"line":583},[492,1427,1428],{"class":502},"        total_output_tokens ",[492,1430,577],{"class":498},[492,1432,1423],{"class":714},[492,1434,1435],{"class":494,"line":589},[492,1436,568],{"emptyLinePlaceholder":567},[492,1438,1439],{"class":494,"line":595},[492,1440,1441],{"class":626},"        # Step 1: decompose the question (cheap model)\n",[492,1443,1444,1447,1449,1452,1454,1456],{"class":494,"line":601},[492,1445,1446],{"class":498},"        with",[492,1448,1136],{"class":502},[492,1450,1451],{"class":726},"\"agent.decompose\"",[492,1453,1142],{"class":502},[492,1455,1145],{"class":498},[492,1457,1148],{"class":502},[492,1459,1460,1463,1465],{"class":494,"line":607},[492,1461,1462],{"class":502},"            response ",[492,1464,577],{"class":498},[492,1466,1176],{"class":502},[492,1468,1469,1472,1474,1477],{"class":494,"line":612},[492,1470,1471],{"class":1182},"                model",[492,1473,577],{"class":498},[492,1475,1476],{"class":726},"\"gpt-5.4-nano\"",[492,1478,1479],{"class":502},",\n",[492,1481,1482,1485,1487,1489,1491,1493,1495,1497,1499,1501,1504,1507,1510,1513,1516,1519],{"class":494,"line":618},[492,1483,1484],{"class":1182},"                messages",[492,1486,577],{"class":498},[492,1488,1199],{"class":502},[492,1490,1202],{"class":726},[492,1492,736],{"class":502},[492,1494,1207],{"class":726},[492,1496,1022],{"class":502},[492,1498,1212],{"class":726},[492,1500,736],{"class":502},[492,1502,1503],{"class":498},"f",[492,1505,1506],{"class":726},"\"Break this into sub-questions: ",[492,1508,1509],{"class":714},"{",[492,1511,1512],{"class":502},"question",[492,1514,1515],{"class":714},"}",[492,1517,1518],{"class":726},"\"",[492,1520,1521],{"class":502},"}]\n",[492,1523,1524],{"class":494,"line":623},[492,1525,1526],{"class":502},"            )\n",[492,1528,1529,1532,1534],{"class":494,"line":630},[492,1530,1531],{"class":502},"            step_cost ",[492,1533,577],{"class":498},[492,1535,1536],{"class":502}," calculate_cost_usd(\n",[492,1538,1539,1542],{"class":494,"line":643},[492,1540,1541],{"class":726},"                \"gpt-5.4-nano\"",[492,1543,1479],{"class":502},[492,1545,1546],{"class":494,"line":1034},[492,1547,1548],{"class":502},"                response.usage.prompt_tokens,\n",[492,1550,1551],{"class":494,"line":1075},[492,1552,1553],{"class":502},"                response.usage.completion_tokens\n",[492,1555,1556],{"class":494,"line":1080},[492,1557,1526],{"class":502},[492,1559,1560,1563,1565],{"class":494,"line":1096},[492,1561,1562],{"class":502},"            span.set_attribute(",[492,1564,1294],{"class":726},[492,1566,1567],{"class":502},", step_cost)\n",[492,1569,1570,1573,1576],{"class":494,"line":1101},[492,1571,1572],{"class":502},"            total_cost ",[492,1574,1575],{"class":498},"+=",[492,1577,1578],{"class":502}," step_cost\n",[492,1580,1581,1584,1586],{"class":494,"line":1130},[492,1582,1583],{"class":502},"            total_input_tokens ",[492,1585,1575],{"class":498},[492,1587,1237],{"class":502},[492,1589,1590,1593,1595],{"class":494,"line":1151},[492,1591,1592],{"class":502},"            total_output_tokens ",[492,1594,1575],{"class":498},[492,1596,1248],{"class":502},[492,1598,1599,1602,1604,1606,1608],{"class":494,"line":1163},[492,1600,1601],{"class":502},"            sub_questions ",[492,1603,577],{"class":498},[492,1605,1311],{"class":502},[492,1607,1314],{"class":714},[492,1609,1317],{"class":502},[492,1611,1612],{"class":494,"line":1168},[492,1613,568],{"emptyLinePlaceholder":567},[492,1615,1616],{"class":494,"line":1179},[492,1617,1618],{"class":626},"        # Step 2: answer each sub-question (full model)\n",[492,1620,1621,1623,1625,1628,1630,1632],{"class":494,"line":1191},[492,1622,1446],{"class":498},[492,1624,1136],{"class":502},[492,1626,1627],{"class":726},"\"agent.answer\"",[492,1629,1142],{"class":502},[492,1631,1145],{"class":498},[492,1633,1148],{"class":502},[492,1635,1636,1638,1640],{"class":494,"line":1218},[492,1637,1462],{"class":502},[492,1639,577],{"class":498},[492,1641,1176],{"class":502},[492,1643,1644,1646,1648,1651],{"class":494,"line":1224},[492,1645,1471],{"class":1182},[492,1647,577],{"class":498},[492,1649,1650],{"class":726},"\"gpt-5\"",[492,1652,1479],{"class":502},[492,1654,1655,1657,1659,1661,1663,1665,1667,1669,1671],{"class":494,"line":1229},[492,1656,1484],{"class":1182},[492,1658,577],{"class":498},[492,1660,1199],{"class":502},[492,1662,1202],{"class":726},[492,1664,736],{"class":502},[492,1666,1207],{"class":726},[492,1668,1022],{"class":502},[492,1670,1212],{"class":726},[492,1672,1673],{"class":502},": sub_questions}]\n",[492,1675,1676],{"class":494,"line":1240},[492,1677,1526],{"class":502},[492,1679,1680,1682,1684],{"class":494,"line":1251},[492,1681,1531],{"class":502},[492,1683,577],{"class":498},[492,1685,1536],{"class":502},[492,1687,1688,1691],{"class":494,"line":1262},[492,1689,1690],{"class":726},"                \"gpt-5\"",[492,1692,1479],{"class":502},[492,1694,1695],{"class":494,"line":1267},[492,1696,1548],{"class":502},[492,1698,1699],{"class":494,"line":1278},[492,1700,1553],{"class":502},[492,1702,1703],{"class":494,"line":1289},[492,1704,1526],{"class":502},[492,1706,1707,1709,1711],{"class":494,"line":1300},[492,1708,1562],{"class":502},[492,1710,1294],{"class":726},[492,1712,1567],{"class":502},[492,1714,1715,1717,1719],{"class":494,"line":1305},[492,1716,1572],{"class":502},[492,1718,1575],{"class":498},[492,1720,1578],{"class":502},[492,1722,1724,1726,1728],{"class":494,"line":1723},40,[492,1725,1583],{"class":502},[492,1727,1575],{"class":498},[492,1729,1237],{"class":502},[492,1731,1733,1735,1737],{"class":494,"line":1732},41,[492,1734,1592],{"class":502},[492,1736,1575],{"class":498},[492,1738,1248],{"class":502},[492,1740,1742,1745,1747,1749,1751],{"class":494,"line":1741},42,[492,1743,1744],{"class":502},"            answer ",[492,1746,577],{"class":498},[492,1748,1311],{"class":502},[492,1750,1314],{"class":714},[492,1752,1317],{"class":502},[492,1754,1756],{"class":494,"line":1755},43,[492,1757,568],{"emptyLinePlaceholder":567},[492,1759,1761],{"class":494,"line":1760},44,[492,1762,1763],{"class":626},"        # Record totals on the parent span\n",[492,1765,1767,1769,1771],{"class":494,"line":1766},45,[492,1768,1379],{"class":502},[492,1770,1294],{"class":726},[492,1772,1773],{"class":502},", total_cost)\n",[492,1775,1777,1779,1782],{"class":494,"line":1776},46,[492,1778,1379],{"class":502},[492,1780,1781],{"class":726},"\"llm.total_input_tokens\"",[492,1783,1784],{"class":502},", total_input_tokens)\n",[492,1786,1788,1790,1793],{"class":494,"line":1787},47,[492,1789,1379],{"class":502},[492,1791,1792],{"class":726},"\"llm.total_output_tokens\"",[492,1794,1795],{"class":502},", total_output_tokens)\n",[492,1797,1799],{"class":494,"line":1798},48,[492,1800,568],{"emptyLinePlaceholder":567},[492,1802,1804,1806],{"class":494,"line":1803},49,[492,1805,1308],{"class":498},[492,1807,1808],{"class":502}," answer\n",[241,1810,1811],{},"With this structure you can query both individual step costs and total operation cost from the same trace.",[260,1813,1815],{"id":1814},"recording-cost-as-an-opentelemetry-metric","Recording Cost as an OpenTelemetry Metric",[241,1817,1818,1819,1823],{},"Spans are good for per-request cost. For aggregate spend over time — daily cost, cost by model, cost rate anomalies — ",[268,1820,1822],{"href":1821},"\u002Fopentelemetry\u002Fmetrics","OpenTelemetry metrics"," are the right tool. A counter accumulates continuously and can be queried for any time window:",[483,1825,1827],{"className":485,"code":1826,"language":487,"meta":488,"style":488},"from opentelemetry import metrics\n\nmeter = metrics.get_meter(__name__)\n\n# Counter for total cost in USD\ncost_counter = meter.create_counter(\n    name=\"llm.cost.usd\",\n    description=\"Cumulative LLM API cost in USD\",\n    unit=\"USD\",\n)\n\n# Histogram for per-request cost distribution\ncost_histogram = meter.create_histogram(\n    name=\"llm.cost.per_request.usd\",\n    description=\"Cost distribution per LLM request\",\n    unit=\"USD\",\n)\n\ndef tracked_completion(model: str, messages: list) -> str:\n    response = client.chat.completions.create(model=model, messages=messages)\n\n    cost = calculate_cost_usd(\n        model,\n        response.usage.prompt_tokens,\n        response.usage.completion_tokens\n    )\n\n    labels = {\"gen_ai.request.model\": model, \"service.name\": \"my-service\"}\n    cost_counter.add(cost, labels)\n    cost_histogram.record(cost, labels)\n\n    return response.choices[0].message.content\n",[251,1828,1829,1840,1844,1858,1862,1867,1877,1888,1900,1912,1916,1920,1925,1935,1946,1957,1967,1971,1975,1998,2024,2028,2037,2042,2047,2052,2057,2061,2086,2091,2096,2100],{"__ignoreMap":488},[492,1830,1831,1833,1835,1837],{"class":494,"line":495},[492,1832,499],{"class":498},[492,1834,517],{"class":502},[492,1836,506],{"class":498},[492,1838,1839],{"class":502}," metrics\n",[492,1841,1842],{"class":494,"line":512},[492,1843,568],{"emptyLinePlaceholder":567},[492,1845,1846,1849,1851,1854,1856],{"class":494,"line":525},[492,1847,1848],{"class":502},"meter ",[492,1850,577],{"class":498},[492,1852,1853],{"class":502}," metrics.get_meter(",[492,1855,1091],{"class":714},[492,1857,598],{"class":502},[492,1859,1860],{"class":494,"line":538},[492,1861,568],{"emptyLinePlaceholder":567},[492,1863,1864],{"class":494,"line":551},[492,1865,1866],{"class":626},"# Counter for total cost in USD\n",[492,1868,1869,1872,1874],{"class":494,"line":564},[492,1870,1871],{"class":502},"cost_counter ",[492,1873,577],{"class":498},[492,1875,1876],{"class":502}," meter.create_counter(\n",[492,1878,1879,1882,1884,1886],{"class":494,"line":571},[492,1880,1881],{"class":1182},"    name",[492,1883,577],{"class":498},[492,1885,1294],{"class":726},[492,1887,1479],{"class":502},[492,1889,1890,1893,1895,1898],{"class":494,"line":583},[492,1891,1892],{"class":1182},"    description",[492,1894,577],{"class":498},[492,1896,1897],{"class":726},"\"Cumulative LLM API cost in USD\"",[492,1899,1479],{"class":502},[492,1901,1902,1905,1907,1910],{"class":494,"line":589},[492,1903,1904],{"class":1182},"    unit",[492,1906,577],{"class":498},[492,1908,1909],{"class":726},"\"USD\"",[492,1911,1479],{"class":502},[492,1913,1914],{"class":494,"line":595},[492,1915,598],{"class":502},[492,1917,1918],{"class":494,"line":601},[492,1919,568],{"emptyLinePlaceholder":567},[492,1921,1922],{"class":494,"line":607},[492,1923,1924],{"class":626},"# Histogram for per-request cost distribution\n",[492,1926,1927,1930,1932],{"class":494,"line":612},[492,1928,1929],{"class":502},"cost_histogram ",[492,1931,577],{"class":498},[492,1933,1934],{"class":502}," meter.create_histogram(\n",[492,1936,1937,1939,1941,1944],{"class":494,"line":618},[492,1938,1881],{"class":1182},[492,1940,577],{"class":498},[492,1942,1943],{"class":726},"\"llm.cost.per_request.usd\"",[492,1945,1479],{"class":502},[492,1947,1948,1950,1952,1955],{"class":494,"line":623},[492,1949,1892],{"class":1182},[492,1951,577],{"class":498},[492,1953,1954],{"class":726},"\"Cost distribution per LLM request\"",[492,1956,1479],{"class":502},[492,1958,1959,1961,1963,1965],{"class":494,"line":630},[492,1960,1904],{"class":1182},[492,1962,577],{"class":498},[492,1964,1909],{"class":726},[492,1966,1479],{"class":502},[492,1968,1969],{"class":494,"line":643},[492,1970,598],{"class":502},[492,1972,1973],{"class":494,"line":1034},[492,1974,568],{"emptyLinePlaceholder":567},[492,1976,1977,1979,1982,1984,1986,1989,1992,1994,1996],{"class":494,"line":1075},[492,1978,969],{"class":498},[492,1980,1981],{"class":972}," tracked_completion",[492,1983,976],{"class":502},[492,1985,979],{"class":714},[492,1987,1988],{"class":502},", messages: ",[492,1990,1991],{"class":714},"list",[492,1993,993],{"class":502},[492,1995,979],{"class":714},[492,1997,999],{"class":502},[492,1999,2000,2003,2005,2008,2011,2013,2016,2019,2021],{"class":494,"line":1080},[492,2001,2002],{"class":502},"    response ",[492,2004,577],{"class":498},[492,2006,2007],{"class":502}," client.chat.completions.create(",[492,2009,2010],{"class":1182},"model",[492,2012,577],{"class":498},[492,2014,2015],{"class":502},"model, ",[492,2017,2018],{"class":1182},"messages",[492,2020,577],{"class":498},[492,2022,2023],{"class":502},"messages)\n",[492,2025,2026],{"class":494,"line":1096},[492,2027,568],{"emptyLinePlaceholder":567},[492,2029,2030,2033,2035],{"class":494,"line":1101},[492,2031,2032],{"class":502},"    cost ",[492,2034,577],{"class":498},[492,2036,1536],{"class":502},[492,2038,2039],{"class":494,"line":1130},[492,2040,2041],{"class":502},"        model,\n",[492,2043,2044],{"class":494,"line":1151},[492,2045,2046],{"class":502},"        response.usage.prompt_tokens,\n",[492,2048,2049],{"class":494,"line":1163},[492,2050,2051],{"class":502},"        response.usage.completion_tokens\n",[492,2053,2054],{"class":494,"line":1168},[492,2055,2056],{"class":502},"    )\n",[492,2058,2059],{"class":494,"line":1179},[492,2060,568],{"emptyLinePlaceholder":567},[492,2062,2063,2066,2068,2071,2073,2076,2079,2081,2084],{"class":494,"line":1191},[492,2064,2065],{"class":502},"    labels ",[492,2067,577],{"class":498},[492,2069,2070],{"class":502}," {",[492,2072,1157],{"class":726},[492,2074,2075],{"class":502},": model, ",[492,2077,2078],{"class":726},"\"service.name\"",[492,2080,736],{"class":502},[492,2082,2083],{"class":726},"\"my-service\"",[492,2085,960],{"class":502},[492,2087,2088],{"class":494,"line":1218},[492,2089,2090],{"class":502},"    cost_counter.add(cost, labels)\n",[492,2092,2093],{"class":494,"line":1224},[492,2094,2095],{"class":502},"    cost_histogram.record(cost, labels)\n",[492,2097,2098],{"class":494,"line":1229},[492,2099,568],{"emptyLinePlaceholder":567},[492,2101,2102,2104,2106,2108],{"class":494,"line":1240},[492,2103,1037],{"class":498},[492,2105,1311],{"class":502},[492,2107,1314],{"class":714},[492,2109,1317],{"class":502},[241,2111,2112,2113,678],{},"The counter gives you cumulative spend that you can diff over any window. The histogram shows your cost distribution — whether you have occasional expensive outlier requests or a uniformly expensive workload. For broader AI metrics patterns — GPU utilization, inference latency histograms, sampling strategies for high-volume workloads — see ",[268,2114,2115],{"href":126},"OpenTelemetry for AI Systems",[260,2117,2119],{"id":2118},"cost-visibility-in-langchain-applications","Cost Visibility in LangChain Applications",[241,2121,2122,2123,2126,2127,678],{},"For LangChain chains and agents, ",[251,2124,2125],{},"LangChainInstrumentor"," captures spans for each chain step. Combine it with per-call cost attribution using the pattern above. For a deeper walkthrough of LangChain-specific monitoring patterns including silent failure detection, see the ",[268,2128,2129],{"href":98},"LangChain observability guide",[483,2131,2133],{"className":485,"code":2132,"language":487,"meta":488,"style":488},"from opentelemetry.instrumentation.langchain import LangChainInstrumentor\nfrom opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor\n\n# Both instrumentors together: LangChain provides chain structure,\n# OpenAI instrumentation provides token counts on each LLM call\nLangChainInstrumentor().instrument()\nOpenAIInstrumentor().instrument()\n",[251,2134,2135,2147,2157,2161,2166,2171,2176],{"__ignoreMap":488},[492,2136,2137,2139,2142,2144],{"class":494,"line":495},[492,2138,499],{"class":498},[492,2140,2141],{"class":502}," opentelemetry.instrumentation.langchain ",[492,2143,506],{"class":498},[492,2145,2146],{"class":502}," LangChainInstrumentor\n",[492,2148,2149,2151,2153,2155],{"class":494,"line":512},[492,2150,499],{"class":498},[492,2152,503],{"class":502},[492,2154,506],{"class":498},[492,2156,509],{"class":502},[492,2158,2159],{"class":494,"line":525},[492,2160,568],{"emptyLinePlaceholder":567},[492,2162,2163],{"class":494,"line":538},[492,2164,2165],{"class":626},"# Both instrumentors together: LangChain provides chain structure,\n",[492,2167,2168],{"class":494,"line":551},[492,2169,2170],{"class":626},"# OpenAI instrumentation provides token counts on each LLM call\n",[492,2172,2173],{"class":494,"line":564},[492,2174,2175],{"class":502},"LangChainInstrumentor().instrument()\n",[492,2177,2178],{"class":494,"line":571},[492,2179,615],{"class":502},[241,2181,2182,2183,2186],{},"With both active, your trace shows the chain as the parent span and individual LLM calls — with ",[251,2184,2185],{},"gen_ai.usage.*"," attributes — as children. You can sum token counts across children to derive chain-level cost.",[260,2188,2190],{"id":2189},"cost-dashboards-and-alerts-in-uptrace","Cost Dashboards and Alerts in Uptrace",[241,2192,2193,2194,2196,2197,2199],{},"Uptrace stores ",[251,2195,2185],{}," and custom ",[251,2198,1322],{}," attributes as queryable numeric fields in ClickHouse. Once traces and metrics are flowing, useful queries include:",[241,2201,2202,2205,2208,2209,2211,2212,2215],{},[279,2203,2204],{},"Daily cost by model:",[2206,2207],"br",{},"\nGroup ",[251,2210,1322],{}," metric by ",[251,2213,2214],{},"gen_ai.request.model",", sum over 24h. This shows which model drives the most spend and whether usage shifted after a deployment.",[241,2217,2218,2221,2223,2224,2227,2228,2230],{},[279,2219,2220],{},"P99 cost per agent run:",[2206,2222],{},"\nFilter parent spans with ",[251,2225,2226],{},"app.operation = \"research\"",", take the 99th percentile of ",[251,2229,1322],{},". High P99 means a small percentage of runs is generating disproportionate cost.",[241,2232,2233,2236,2238,2239,2241],{},[279,2234,2235],{},"Cost rate alert:",[2206,2237],{},"\nAlert when the rate of ",[251,2240,1322],{}," counter exceeds your threshold — for example, if hourly spend exceeds $50 when normal is under $10. This catches runaway loops or unexpected traffic spikes before they compound.",[241,2243,2244,2245,2249],{},"Configure your Uptrace DSN and OTLP endpoint via the ",[268,2246,2248],{"href":2247},"\u002Fget","getting started guide"," to begin streaming telemetry.",[260,2251,2253],{"id":2252},"cost-optimization-signals","Cost Optimization Signals",[241,2255,2256],{},"Collected cost data reveals optimization opportunities that are invisible without instrumentation:",[241,2258,2259,2262,2263,2265,2266,2268],{},[279,2260,2261],{},"Model downgrade candidates."," Compare response quality versus cost across models for your specific use cases. If ",[251,2264,364],{}," handles 80% of your requests acceptably at 3× lower cost than GPT-5.4, routing those requests to the cheaper model has an immediate impact. ",[251,2267,378],{}," reduces input cost by 12× for simple classification or extraction tasks.",[241,2270,2271,2274,2275,2277],{},[279,2272,2273],{},"Prompt length outliers."," High ",[251,2276,253],{}," on specific endpoints points to prompts that have grown with accumulated context or system prompt bloat. Trimming 200 tokens from a system prompt that runs on every request saves proportionally.",[241,2279,2280,2283],{},[279,2281,2282],{},"Retry amplification."," If your error handling retries failed LLM calls, each retry doubles or triples the cost of that request. Token-level tracing makes retry patterns visible — high input token counts on spans with errors often indicate retry loops.",[241,2285,2286,2289,2290,2292],{},[279,2287,2288],{},"Conversation history accumulation."," Chat applications that include full conversation history in every prompt pay linearly more as conversations grow. Seeing ",[251,2291,253],{}," increase monotonically across a session identifies this pattern.",[2294,2295,2296],"style",{},"html pre.shiki code .sD7c4, html code.shiki .sD7c4{--shiki-default:#D73A49}html pre.shiki code .sgsFI, html code.shiki .sgsFI{--shiki-default:#24292E}html pre.shiki code .sAwPA, html code.shiki .sAwPA{--shiki-default:#6A737D}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html pre.shiki code .sYu0t, html code.shiki .sYu0t{--shiki-default:#005CC5}html pre.shiki code .sYBdl, html code.shiki .sYBdl{--shiki-default:#032F62}html pre.shiki code .s7eDp, html code.shiki .s7eDp{--shiki-default:#6F42C1}html pre.shiki code .sqxcx, html code.shiki .sqxcx{--shiki-default:#E36209}",{"title":488,"searchDepth":538,"depth":512,"links":2298},[2299,2300,2301,2302,2303,2304,2305,2306,2307],{"id":262,"depth":512,"text":263},{"id":301,"depth":512,"text":302},{"id":468,"depth":512,"text":469},{"id":681,"depth":512,"text":682},{"id":1326,"depth":512,"text":1327},{"id":1814,"depth":512,"text":1815},{"id":2118,"depth":512,"text":2119},{"id":2189,"depth":512,"text":2190},{"id":2252,"depth":512,"text":2253},"2026-04-07T00:00:00.000Z","Track and control OpenAI, Anthropic, and other LLM API costs with OpenTelemetry. Monitor token usage per request, calculate real-time spend, and set budget alerts.","md","\u002Fblog\u002Fllm-cost-monitoring\u002Fcover.webp",{"keyword_difficulty":2313,"readingTime":2314},"medium",{"text":2315,"minutes":2316,"time":2317,"words":2318},"8 min read",7.815,468900,1563,{"title":101,"description":2309},"qTLX-ZGJknyjiBaFEnSVZW5agI0x06NDuHL0Ca09Qnw",[2322,2324],{"title":97,"path":98,"stem":99,"description":2323,"children":-1},"Monitor LangChain applications in production: track token costs, debug chain failures and measure performance with OpenTelemetry. Includes code examples and deployment guides.",{"title":105,"path":106,"stem":107,"description":2325,"children":-1},"What is microservices architecture and when to use it? Explore key design patterns, service decomposition, communication strategies, and monitoring techniques for building scalable and observable systems.",{"id":2327,"node_id":2328,"name":2329,"full_name":2330,"private":2331,"owner":2332,"html_url":2349,"description":2350,"fork":2331,"url":2351,"forks_url":2352,"keys_url":2353,"collaborators_url":2354,"teams_url":2355,"hooks_url":2356,"issue_events_url":2357,"events_url":2358,"assignees_url":2359,"branches_url":2360,"tags_url":2361,"blobs_url":2362,"git_tags_url":2363,"git_refs_url":2364,"trees_url":2365,"statuses_url":2366,"languages_url":2367,"stargazers_url":2368,"contributors_url":2369,"subscribers_url":2370,"subscription_url":2371,"commits_url":2372,"git_commits_url":2373,"comments_url":2374,"issue_comment_url":2375,"contents_url":2376,"compare_url":2377,"merges_url":2378,"archive_url":2379,"downloads_url":2380,"issues_url":2381,"pulls_url":2382,"milestones_url":2383,"notifications_url":2384,"labels_url":2385,"releases_url":2386,"deployments_url":2387,"created_at":2388,"updated_at":2389,"pushed_at":2390,"git_url":2391,"ssh_url":2392,"clone_url":2393,"svn_url":2349,"homepage":2394,"size":2395,"stargazers_count":2396,"watchers_count":2396,"language":2397,"has_issues":567,"has_projects":567,"has_downloads":567,"has_wiki":567,"has_pages":2331,"has_discussions":567,"forks_count":2398,"mirror_url":236,"archived":2331,"disabled":2331,"open_issues_count":1289,"license":2399,"allow_forking":567,"is_template":2331,"web_commit_signoff_required":2331,"has_pull_requests":567,"pull_request_creation_policy":2405,"topics":2406,"visibility":2348,"forks":2398,"open_issues":1289,"watchers":2396,"default_branch":2421,"temp_clone_token":236,"custom_properties":2422,"organization":2423,"network_count":2398,"subscribers_count":1218},440841550,"R_kgDOGka1Tg","uptrace","uptrace\u002Fuptrace",false,{"login":2329,"id":2333,"node_id":2334,"avatar_url":2335,"gravatar_id":488,"url":2336,"html_url":2337,"followers_url":2338,"following_url":2339,"gists_url":2340,"starred_url":2341,"subscriptions_url":2342,"organizations_url":2343,"repos_url":2344,"events_url":2345,"received_events_url":2346,"type":2347,"user_view_type":2348,"site_admin":2331},64948717,"MDEyOk9yZ2FuaXphdGlvbjY0OTQ4NzE3","https:\u002F\u002Favatars.githubusercontent.com\u002Fu\u002F64948717?v=4","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace","https:\u002F\u002Fgithub.com\u002Fuptrace","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Ffollowers","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Ffollowing{\u002Fother_user}","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Fgists{\u002Fgist_id}","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Fstarred{\u002Fowner}{\u002Frepo}","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Fsubscriptions","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Forgs","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Frepos","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Fevents{\u002Fprivacy}","https:\u002F\u002Fapi.github.com\u002Fusers\u002Fuptrace\u002Freceived_events","Organization","public","https:\u002F\u002Fgithub.com\u002Fuptrace\u002Fuptrace","Open source APM: OpenTelemetry traces, metrics, and logs","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fforks","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fkeys{\u002Fkey_id}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcollaborators{\u002Fcollaborator}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fteams","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fhooks","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fissues\u002Fevents{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fevents","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fassignees{\u002Fuser}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fbranches{\u002Fbranch}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Ftags","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fgit\u002Fblobs{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fgit\u002Ftags{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fgit\u002Frefs{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fgit\u002Ftrees{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fstatuses\u002F{sha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Flanguages","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fstargazers","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcontributors","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fsubscribers","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fsubscription","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcommits{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fgit\u002Fcommits{\u002Fsha}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcomments{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fissues\u002Fcomments{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcontents\u002F{+path}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fcompare\u002F{base}...{head}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fmerges","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002F{archive_format}{\u002Fref}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fdownloads","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fissues{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fpulls{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fmilestones{\u002Fnumber}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fnotifications{?since,all,participating}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Flabels{\u002Fname}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Freleases{\u002Fid}","https:\u002F\u002Fapi.github.com\u002Frepos\u002Fuptrace\u002Fuptrace\u002Fdeployments","2021-12-22T11:53:18Z","2026-04-08T09:57:56Z","2026-03-13T23:34:49Z","git:\u002F\u002Fgithub.com\u002Fuptrace\u002Fuptrace.git","git@github.com:uptrace\u002Fuptrace.git","https:\u002F\u002Fgithub.com\u002Fuptrace\u002Fuptrace.git","https:\u002F\u002Fuptrace.dev\u002Fget\u002Fhosted\u002Fopen-source-apm",5919,4163,"Go",202,{"key":2400,"name":2401,"spdx_id":2402,"url":2403,"node_id":2404},"agpl-3.0","GNU Affero General Public License v3.0","AGPL-3.0","https:\u002F\u002Fapi.github.com\u002Flicenses\u002Fagpl-3.0","MDc6TGljZW5zZTE=","all",[2407,2408,2409,2410,2411,2412,2413,2414,2415,2416,2417,2418,2419,2420],"apm","application-monitoring","clickhouse","distributed-tracing","golang","logs","metrics","monitoring","observability","opentelemetry","performance-monitoring","self-hosted","tracing","vue","master",{},{"login":2329,"id":2333,"node_id":2334,"avatar_url":2335,"gravatar_id":488,"url":2336,"html_url":2337,"followers_url":2338,"following_url":2339,"gists_url":2340,"starred_url":2341,"subscriptions_url":2342,"organizations_url":2343,"repos_url":2344,"events_url":2345,"received_events_url":2346,"type":2347,"user_view_type":2348,"site_admin":2331},1775654762189]