Skip to content

Commit 01c9c16

Browse files
vercel-ai-sdk[bot]seojcarlosaayush-kapoor
authored
Backport: fix(openai-compatible): honor camelCase providerOptions key in chat and completion models (#14172)
This is an automated backport of #14135 to the release-v6.0 branch. FYI @seojcarlos This backport has conflicts that need to be resolved manually. ### `git cherry-pick` output ``` Auto-merging packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts Auto-merging packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts CONFLICT (content): Merge conflict in packages/openai-compatible/src/chat/openai-compatible-chat-language-model.ts Auto-merging packages/openai-compatible/src/completion/openai-compatible-completion-language-model.test.ts Auto-merging packages/openai-compatible/src/completion/openai-compatible-completion-language-model.ts Auto-merging packages/openai-compatible/src/image/openai-compatible-image-model.ts error: could not apply 816ff67... fix(openai-compatible): honor camelCase providerOptions key in chat and completion models (#14135) hint: After resolving the conflicts, mark them with hint: "git add/rm <pathspec>", then run hint: "git cherry-pick --continue". hint: You can instead skip this commit with "git cherry-pick --skip". hint: To abort and get back to the state before "git cherry-pick", hint: run "git cherry-pick --abort". hint: Disable this message with "git config set advice.mergeConflict false" ``` --------- Co-authored-by: Juan Carlos Díaz <jcarlos@convertiam.com> Co-authored-by: Aayush Kapoor <aayushkapoor34@gmail.com>
1 parent 7f79020 commit 01c9c16

File tree

8 files changed

+479
-17
lines changed

8 files changed

+479
-17
lines changed

.changeset/fast-ears-beam.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@ai-sdk/openai-compatible": patch
3+
---
4+
5+
fix(openai-compatible): honor camelCase providerOptions key in chat and completion models

packages/openai-compatible/src/chat/openai-compatible-chat-language-model.test.ts

Lines changed: 275 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -520,6 +520,172 @@ describe('doGenerate', () => {
520520
`);
521521
});
522522

523+
describe('camelCase provider options', () => {
524+
it('should accept camelCase provider options key for hyphenated provider name', async () => {
525+
prepareJsonResponse({ content: 'Hello!' });
526+
527+
await provider('grok-3').doGenerate({
528+
providerOptions: {
529+
testProvider: {
530+
someCustomOption: 'test-value',
531+
},
532+
},
533+
prompt: TEST_PROMPT,
534+
});
535+
536+
expect(await server.calls[0].requestBodyJson).toMatchObject({
537+
someCustomOption: 'test-value',
538+
});
539+
});
540+
541+
it('should prefer camelCase options over raw-name options', async () => {
542+
prepareJsonResponse({ content: 'Hello!' });
543+
544+
await provider('grok-3').doGenerate({
545+
providerOptions: {
546+
'test-provider': {
547+
someCustomOption: 'raw-value',
548+
},
549+
testProvider: {
550+
someCustomOption: 'camel-value',
551+
},
552+
},
553+
prompt: TEST_PROMPT,
554+
});
555+
556+
expect(await server.calls[0].requestBodyJson).toMatchObject({
557+
someCustomOption: 'camel-value',
558+
});
559+
});
560+
561+
it('should use camelCase metadata key when camelCase provider options are used', async () => {
562+
prepareJsonResponse({
563+
content: 'Hello!',
564+
usage: {
565+
prompt_tokens: 20,
566+
completion_tokens: 30,
567+
total_tokens: 50,
568+
completion_tokens_details: {
569+
accepted_prediction_tokens: 15,
570+
},
571+
},
572+
});
573+
574+
const result = await provider('grok-3').doGenerate({
575+
providerOptions: {
576+
testProvider: { reasoningEffort: 'high' },
577+
},
578+
prompt: TEST_PROMPT,
579+
});
580+
581+
expect(result.providerMetadata).toHaveProperty('testProvider');
582+
expect(result.providerMetadata).not.toHaveProperty('test-provider');
583+
expect(result.providerMetadata!['testProvider']).toMatchObject({
584+
acceptedPredictionTokens: 15,
585+
});
586+
});
587+
588+
it('should use raw metadata key when raw provider options are used', async () => {
589+
prepareJsonResponse({
590+
content: 'Hello!',
591+
usage: {
592+
prompt_tokens: 20,
593+
completion_tokens: 30,
594+
total_tokens: 50,
595+
completion_tokens_details: {
596+
accepted_prediction_tokens: 15,
597+
},
598+
},
599+
});
600+
601+
const result = await provider('grok-3').doGenerate({
602+
providerOptions: {
603+
'test-provider': { reasoningEffort: 'high' },
604+
},
605+
prompt: TEST_PROMPT,
606+
});
607+
608+
expect(result.providerMetadata).toHaveProperty('test-provider');
609+
expect(result.providerMetadata!['test-provider']).toMatchObject({
610+
acceptedPredictionTokens: 15,
611+
});
612+
});
613+
614+
it('should use raw metadata key when no provider options are passed', async () => {
615+
prepareJsonResponse({ content: 'Hello!' });
616+
617+
const result = await provider('grok-3').doGenerate({
618+
prompt: TEST_PROMPT,
619+
});
620+
621+
expect(result.providerMetadata).toHaveProperty('test-provider');
622+
});
623+
624+
it('should include thought signature in providerMetadata with camelCase key', async () => {
625+
prepareJsonResponse({
626+
tool_calls: [
627+
{
628+
id: 'call-1',
629+
type: 'function' as const,
630+
function: {
631+
name: 'test_tool',
632+
arguments: '{"arg":"value"}',
633+
},
634+
extra_content: {
635+
google: { thought_signature: '<Sig>' },
636+
},
637+
},
638+
],
639+
});
640+
641+
const result = await provider('grok-3').doGenerate({
642+
providerOptions: { testProvider: {} },
643+
prompt: TEST_PROMPT,
644+
});
645+
646+
expect(result.content).toMatchObject([
647+
{
648+
type: 'tool-call',
649+
providerMetadata: {
650+
testProvider: { thoughtSignature: '<Sig>' },
651+
},
652+
},
653+
]);
654+
});
655+
656+
it('should include thought signature in providerMetadata with raw key', async () => {
657+
prepareJsonResponse({
658+
tool_calls: [
659+
{
660+
id: 'call-1',
661+
type: 'function' as const,
662+
function: {
663+
name: 'test_tool',
664+
arguments: '{"arg":"value"}',
665+
},
666+
extra_content: {
667+
google: { thought_signature: '<Sig>' },
668+
},
669+
},
670+
],
671+
});
672+
673+
const result = await provider('grok-3').doGenerate({
674+
providerOptions: { 'test-provider': {} },
675+
prompt: TEST_PROMPT,
676+
});
677+
678+
expect(result.content).toMatchObject([
679+
{
680+
type: 'tool-call',
681+
providerMetadata: {
682+
'test-provider': { thoughtSignature: '<Sig>' },
683+
},
684+
},
685+
]);
686+
});
687+
});
688+
523689
it('should pass tools and toolChoice', async () => {
524690
prepareJsonResponse({ content: '' });
525691

@@ -2874,6 +3040,115 @@ describe('doStream', () => {
28743040
`);
28753041
});
28763042

3043+
describe('camelCase provider options', () => {
3044+
it('should accept camelCase provider options key for hyphenated provider name', async () => {
3045+
prepareStreamResponse({ content: [] });
3046+
3047+
await provider('grok-3').doStream({
3048+
providerOptions: {
3049+
testProvider: {
3050+
someCustomOption: 'test-value',
3051+
},
3052+
},
3053+
prompt: TEST_PROMPT,
3054+
includeRawChunks: false,
3055+
});
3056+
3057+
expect(await server.calls[0].requestBodyJson).toMatchObject({
3058+
someCustomOption: 'test-value',
3059+
});
3060+
});
3061+
3062+
it('should prefer camelCase options over raw-name options', async () => {
3063+
prepareStreamResponse({ content: [] });
3064+
3065+
await provider('grok-3').doStream({
3066+
providerOptions: {
3067+
'test-provider': { someCustomOption: 'raw-value' },
3068+
testProvider: { someCustomOption: 'camel-value' },
3069+
},
3070+
prompt: TEST_PROMPT,
3071+
includeRawChunks: false,
3072+
});
3073+
3074+
expect(await server.calls[0].requestBodyJson).toMatchObject({
3075+
someCustomOption: 'camel-value',
3076+
});
3077+
});
3078+
3079+
it('should use camelCase metadata key in finish event when camelCase options are used', async () => {
3080+
prepareStreamResponse({ content: ['Hello'] });
3081+
3082+
const { stream } = await provider('grok-3').doStream({
3083+
providerOptions: { testProvider: {} },
3084+
prompt: TEST_PROMPT,
3085+
includeRawChunks: false,
3086+
});
3087+
3088+
const parts = await convertReadableStreamToArray(stream);
3089+
const finishPart = parts.find(part => part.type === 'finish');
3090+
3091+
expect(finishPart?.providerMetadata).toHaveProperty('testProvider');
3092+
expect(finishPart?.providerMetadata).not.toHaveProperty('test-provider');
3093+
});
3094+
3095+
it('should use raw metadata key in finish event when raw options are used', async () => {
3096+
prepareStreamResponse({ content: ['Hello'] });
3097+
3098+
const { stream } = await provider('grok-3').doStream({
3099+
providerOptions: { 'test-provider': {} },
3100+
prompt: TEST_PROMPT,
3101+
includeRawChunks: false,
3102+
});
3103+
3104+
const parts = await convertReadableStreamToArray(stream);
3105+
const finishPart = parts.find(part => part.type === 'finish');
3106+
3107+
expect(finishPart?.providerMetadata).toHaveProperty('test-provider');
3108+
});
3109+
3110+
it('should use raw metadata key in finish event when no provider options are passed', async () => {
3111+
prepareStreamResponse({ content: ['Hello'] });
3112+
3113+
const { stream } = await provider('grok-3').doStream({
3114+
prompt: TEST_PROMPT,
3115+
includeRawChunks: false,
3116+
});
3117+
3118+
const parts = await convertReadableStreamToArray(stream);
3119+
const finishPart = parts.find(part => part.type === 'finish');
3120+
3121+
expect(finishPart?.providerMetadata).toHaveProperty('test-provider');
3122+
});
3123+
3124+
it('should use camelCase metadata key for thought signatures in streamed tool calls', async () => {
3125+
server.urls['https://my.api.com/v1/chat/completions'].response = {
3126+
type: 'stream-chunks',
3127+
chunks: [
3128+
`data: {"id":"chat-id","choices":[{"index":0,"delta":{"role":"assistant","tool_calls":[{"index":0,"id":"call-1","type":"function","function":{"name":"test_tool","arguments":"{\\"a\\":1}"},"extra_content":{"google":{"thought_signature":"<Sig>"}}}]},"finish_reason":null}]}\n\n`,
3129+
`data: {"id":"chat-id","choices":[{"index":0,"delta":{},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":10,"completion_tokens":5,"total_tokens":15}}\n\n`,
3130+
'data: [DONE]\n\n',
3131+
],
3132+
};
3133+
3134+
const { stream } = await provider('grok-3').doStream({
3135+
providerOptions: { testProvider: {} },
3136+
prompt: TEST_PROMPT,
3137+
includeRawChunks: false,
3138+
});
3139+
3140+
const parts = await convertReadableStreamToArray(stream);
3141+
const toolCallEvent = parts.find(part => part.type === 'tool-call');
3142+
3143+
expect(toolCallEvent).toMatchObject({
3144+
type: 'tool-call',
3145+
providerMetadata: {
3146+
testProvider: { thoughtSignature: '<Sig>' },
3147+
},
3148+
});
3149+
});
3150+
});
3151+
28773152
it('should send request body', async () => {
28783153
prepareStreamResponse({ content: [] });
28793154

0 commit comments

Comments
 (0)