Skip to content

Commit 370bbcd

Browse files
tyler6204w1kke
andauthored
Model: add strict gpt-5.3-codex fallback for OpenAI Codex (fixes #9989) (#9995)
* Model: allow forward-compatible OpenAI Codex GPT-5 IDs * Model: scope Codex fallback to gpt-5.3-codex * fix: reorder codex fallback before providerCfg, add ordering test, changelog (#9989) (thanks @w1kke) --------- Co-authored-by: Robin <4robinlehmann@gmail.com>
1 parent 6f4665d commit 370bbcd

File tree

3 files changed

+134
-1
lines changed

3 files changed

+134
-1
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
3333

3434
### Fixes
3535

36+
- Models: add forward-compat fallback for `openai-codex/gpt-5.3-codex` when model registry hasn't discovered it yet. (#9989) Thanks @w1kke.
3637
- Auto-reply/Docs: normalize `extra-high` (and spaced variants) to `xhigh` for Codex thinking levels, and align Codex 5.3 FAQ examples. (#9976) Thanks @slonce70.
3738
- Compaction: remove orphaned `tool_result` messages during history pruning to prevent session corruption from aborted tool calls. (#9868, fixes #9769, #9724, #9672)
3839
- Telegram: pass `parentPeer` for forum topic binding inheritance so group-level bindings apply to all topics within the group. (#9789, fixes #9545, #9351)

src/agents/pi-embedded-runner/model.test.ts

Lines changed: 78 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
1-
import { describe, expect, it, vi } from "vitest";
1+
import { beforeEach, describe, expect, it, vi } from "vitest";
22

33
vi.mock("../pi-model-discovery.js", () => ({
44
discoverAuthStorage: vi.fn(() => ({ mocked: true })),
55
discoverModels: vi.fn(() => ({ find: vi.fn(() => null) })),
66
}));
77

88
import type { OpenClawConfig } from "../../config/config.js";
9+
import { discoverModels } from "../pi-model-discovery.js";
910
import { buildInlineProviderModels, resolveModel } from "./model.js";
1011

1112
const makeModel = (id: string) => ({
@@ -18,6 +19,12 @@ const makeModel = (id: string) => ({
1819
maxTokens: 1,
1920
});
2021

22+
beforeEach(() => {
23+
vi.mocked(discoverModels).mockReturnValue({
24+
find: vi.fn(() => null),
25+
} as unknown as ReturnType<typeof discoverModels>);
26+
});
27+
2128
describe("buildInlineProviderModels", () => {
2229
it("attaches provider ids to inline models", () => {
2330
const providers = {
@@ -127,4 +134,74 @@ describe("resolveModel", () => {
127134
expect(result.model?.provider).toBe("custom");
128135
expect(result.model?.id).toBe("missing-model");
129136
});
137+
138+
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
139+
const templateModel = {
140+
id: "gpt-5.2-codex",
141+
name: "GPT-5.2 Codex",
142+
provider: "openai-codex",
143+
api: "openai-codex-responses",
144+
baseUrl: "https://chatgpt.com/backend-api",
145+
reasoning: true,
146+
input: ["text", "image"] as const,
147+
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
148+
contextWindow: 272000,
149+
maxTokens: 128000,
150+
};
151+
152+
vi.mocked(discoverModels).mockReturnValue({
153+
find: vi.fn((provider: string, modelId: string) => {
154+
if (provider === "openai-codex" && modelId === "gpt-5.2-codex") {
155+
return templateModel;
156+
}
157+
return null;
158+
}),
159+
} as unknown as ReturnType<typeof discoverModels>);
160+
161+
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");
162+
163+
expect(result.error).toBeUndefined();
164+
expect(result.model).toMatchObject({
165+
provider: "openai-codex",
166+
id: "gpt-5.3-codex",
167+
api: "openai-codex-responses",
168+
baseUrl: "https://chatgpt.com/backend-api",
169+
reasoning: true,
170+
contextWindow: 272000,
171+
maxTokens: 128000,
172+
});
173+
});
174+
175+
it("keeps unknown-model errors for non-gpt-5 openai-codex ids", () => {
176+
const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent");
177+
expect(result.model).toBeUndefined();
178+
expect(result.error).toBe("Unknown model: openai-codex/gpt-4.1-mini");
179+
});
180+
181+
it("uses codex fallback even when openai-codex provider is configured", () => {
182+
// This test verifies the ordering: codex fallback must fire BEFORE the generic providerCfg fallback.
183+
// If ordering is wrong, the generic fallback would use api: "openai-responses" (the default)
184+
// instead of "openai-codex-responses".
185+
const cfg: OpenClawConfig = {
186+
models: {
187+
providers: {
188+
"openai-codex": {
189+
baseUrl: "https://custom.example.com",
190+
// No models array, or models without gpt-5.3-codex
191+
},
192+
},
193+
},
194+
} as OpenClawConfig;
195+
196+
vi.mocked(discoverModels).mockReturnValue({
197+
find: vi.fn(() => null),
198+
} as unknown as ReturnType<typeof discoverModels>);
199+
200+
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent", cfg);
201+
202+
expect(result.error).toBeUndefined();
203+
expect(result.model?.api).toBe("openai-codex-responses");
204+
expect(result.model?.id).toBe("gpt-5.3-codex");
205+
expect(result.model?.provider).toBe("openai-codex");
206+
});
130207
});

src/agents/pi-embedded-runner/model.ts

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,50 @@ type InlineProviderConfig = {
1919
models?: ModelDefinitionConfig[];
2020
};
2121

22+
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
23+
24+
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
25+
26+
function resolveOpenAICodexGpt53FallbackModel(
27+
provider: string,
28+
modelId: string,
29+
modelRegistry: ModelRegistry,
30+
): Model<Api> | undefined {
31+
const normalizedProvider = normalizeProviderId(provider);
32+
const trimmedModelId = modelId.trim();
33+
if (normalizedProvider !== "openai-codex") {
34+
return undefined;
35+
}
36+
if (trimmedModelId.toLowerCase() !== OPENAI_CODEX_GPT_53_MODEL_ID) {
37+
return undefined;
38+
}
39+
40+
for (const templateId of OPENAI_CODEX_TEMPLATE_MODEL_IDS) {
41+
const template = modelRegistry.find(normalizedProvider, templateId) as Model<Api> | null;
42+
if (!template) {
43+
continue;
44+
}
45+
return normalizeModelCompat({
46+
...template,
47+
id: trimmedModelId,
48+
name: trimmedModelId,
49+
} as Model<Api>);
50+
}
51+
52+
return normalizeModelCompat({
53+
id: trimmedModelId,
54+
name: trimmedModelId,
55+
api: "openai-codex-responses",
56+
provider: normalizedProvider,
57+
baseUrl: "https://chatgpt.com/backend-api",
58+
reasoning: true,
59+
input: ["text", "image"],
60+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
61+
contextWindow: DEFAULT_CONTEXT_TOKENS,
62+
maxTokens: DEFAULT_CONTEXT_TOKENS,
63+
} as Model<Api>);
64+
}
65+
2266
export function buildInlineProviderModels(
2367
providers: Record<string, InlineProviderConfig>,
2468
): InlineModelEntry[] {
@@ -85,6 +129,17 @@ export function resolveModel(
85129
modelRegistry,
86130
};
87131
}
132+
// Codex gpt-5.3 forward-compat fallback must be checked BEFORE the generic providerCfg fallback.
133+
// Otherwise, if cfg.models.providers["openai-codex"] is configured, the generic fallback fires
134+
// with api: "openai-responses" instead of the correct "openai-codex-responses".
135+
const codexForwardCompat = resolveOpenAICodexGpt53FallbackModel(
136+
provider,
137+
modelId,
138+
modelRegistry,
139+
);
140+
if (codexForwardCompat) {
141+
return { model: codexForwardCompat, authStorage, modelRegistry };
142+
}
88143
const providerCfg = providers[provider];
89144
if (providerCfg || modelId.startsWith("mock-")) {
90145
const fallbackModel: Model<Api> = normalizeModelCompat({

0 commit comments

Comments
 (0)