Skip to content

Commit 17978c6

Browse files
jaderiverstokesgr2mclaude
authored
fix: add automatic caching for anthropic (#12743)
## Background Anthropic now supports automatic caching. The top level provider options supports it already, however it gets stripped before being sent to the provider. ## Summary Pass the cacheControl flag through the provider. ## Manual Verification Adds unit test. ## Checklist - [X] Tests have been added / updated (for bug fixes / features) - [ ] Documentation has been added / updated (for bug fixes / features) - [ ] A _patch_ changeset for relevant packages has been added (for bug fixes / features - run `pnpm changeset` in the project root) - [X] I have reviewed this pull request (self-review) ## Related Issues Fixes #12712 --------- Co-authored-by: Gregor Martynus <39992+gr2m@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 704abfb commit 17978c6

File tree

3 files changed

+81
-0
lines changed

3 files changed

+81
-0
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@ai-sdk/anthropic': patch
3+
---
4+
5+
Pass `cacheControl` provider option as top-level `cache_control` in Anthropic API request body to support automatic caching.

packages/anthropic/src/anthropic-messages-language-model.test.ts

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4168,6 +4168,79 @@ describe('AnthropicMessagesLanguageModel', () => {
41684168
expect(result.warnings).toStrictEqual([]);
41694169
});
41704170

4171+
it('should pass cache_control to request body', async () => {
4172+
prepareJsonFixtureResponse('anthropic-text');
4173+
4174+
const result = await model.doGenerate({
4175+
prompt: TEST_PROMPT,
4176+
providerOptions: {
4177+
anthropic: {
4178+
cacheControl: { type: 'ephemeral' },
4179+
} satisfies AnthropicLanguageModelOptions,
4180+
},
4181+
});
4182+
4183+
expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
4184+
{
4185+
"cache_control": {
4186+
"type": "ephemeral",
4187+
},
4188+
"max_tokens": 4096,
4189+
"messages": [
4190+
{
4191+
"content": [
4192+
{
4193+
"text": "Hello",
4194+
"type": "text",
4195+
},
4196+
],
4197+
"role": "user",
4198+
},
4199+
],
4200+
"model": "claude-3-haiku-20240307",
4201+
}
4202+
`);
4203+
4204+
expect(result.warnings).toStrictEqual([]);
4205+
});
4206+
4207+
it('should pass cache_control with ttl to request body', async () => {
4208+
prepareJsonFixtureResponse('anthropic-text');
4209+
4210+
const result = await model.doGenerate({
4211+
prompt: TEST_PROMPT,
4212+
providerOptions: {
4213+
anthropic: {
4214+
cacheControl: { type: 'ephemeral', ttl: '1h' },
4215+
} satisfies AnthropicLanguageModelOptions,
4216+
},
4217+
});
4218+
4219+
expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
4220+
{
4221+
"cache_control": {
4222+
"ttl": "1h",
4223+
"type": "ephemeral",
4224+
},
4225+
"max_tokens": 4096,
4226+
"messages": [
4227+
{
4228+
"content": [
4229+
{
4230+
"text": "Hello",
4231+
"type": "text",
4232+
},
4233+
],
4234+
"role": "user",
4235+
},
4236+
],
4237+
"model": "claude-3-haiku-20240307",
4238+
}
4239+
`);
4240+
4241+
expect(result.warnings).toStrictEqual([]);
4242+
});
4243+
41714244
describe('context management', () => {
41724245
it('should send context_management in request body', async () => {
41734246
prepareJsonFixtureResponse('anthropic-text');

packages/anthropic/src/anthropic-messages-language-model.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,9 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV3 {
353353
...(anthropicOptions?.speed && {
354354
speed: anthropicOptions.speed,
355355
}),
356+
...(anthropicOptions?.cacheControl && {
357+
cache_control: anthropicOptions.cacheControl,
358+
}),
356359

357360
// structured output:
358361
...(useStructuredOutput &&

0 commit comments

Comments
 (0)