Skip to content

Commit 3baf93f

Browse files
committed
feat(cli): add runtime model discovery
1 parent d0f8f4d commit 3baf93f

16 files changed

Lines changed: 1231 additions & 21 deletions

packages/cli/src/commands/get.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import {
66
formatBoard,
77
formatBoardList,
88
formatLabelList,
9+
formatModelList,
910
formatRepository,
1011
formatRepositoryList,
1112
formatTask,
@@ -15,6 +16,7 @@ import {
1516
getOutputFormat,
1617
output,
1718
} from "../output.js";
19+
import { getProvider, normalizeRuntime } from "../providers/registry.js";
1820

1921
type AgentRef = {
2022
id: string;
@@ -149,6 +151,22 @@ export function registerGetCommand(program: Command) {
149151
}
150152
});
151153

154+
getCmd
155+
.command("model")
156+
.description("List available models for a runtime")
157+
.requiredOption("--runtime <runtime>", "Runtime name")
158+
.option("-o, --output <format>", "Output format (json, yaml, text)")
159+
.action(async (opts) => {
160+
const fmt = getOutputFormat(opts.output);
161+
const provider = getProvider(normalizeRuntime(opts.runtime));
162+
if (!provider.listModels) {
163+
console.error(`Model listing is not supported by ${provider.label}.`);
164+
process.exit(1);
165+
}
166+
const models = await provider.listModels();
167+
output(models, fmt, formatModelList, { kind: "model" });
168+
});
169+
152170
getCmd
153171
.command("repo [id]")
154172
.description("Get a repository or list repositories")

packages/cli/src/output.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,19 @@ export function formatAgentList(agents: any[]): string {
9595
return lines.join("\n");
9696
}
9797

98+
export function formatModelList(models: any[]): string {
99+
if (models.length === 0) return "No models found.";
100+
101+
return models
102+
.map((model) => {
103+
const name = model.name && model.name !== model.id ? ` ${model.name}` : "";
104+
const efforts = model.supported_reasoning_efforts?.length ? ` efforts=${model.supported_reasoning_efforts.join(",")}` : "";
105+
const context = model.context_window ? ` context=${model.context_window}` : "";
106+
return ` ${model.id}${name}${context}${efforts}`;
107+
})
108+
.join("\n");
109+
}
110+
98111
export function formatBoardList(boards: any[]): string {
99112
if (boards.length === 0) return "No boards found.";
100113

packages/cli/src/providers/claude.ts

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,17 @@ import { ToolName } from "@agent-kanban/shared";
77
import type { SDKAssistantMessage, SDKMessage, SDKPartialAssistantMessage, SDKUserMessage } from "@anthropic-ai/claude-agent-sdk";
88
import { getSessionMessages, query } from "@anthropic-ai/claude-agent-sdk";
99
import { createLogger } from "../logger.js";
10-
import type { AgentEvent, AgentHandle, AgentProvider, ContentBlock, ExecuteOpts, HistoryEvent, UsageInfo, UsageWindow } from "./types.js";
10+
import type {
11+
AgentEvent,
12+
AgentHandle,
13+
AgentProvider,
14+
ContentBlock,
15+
ExecuteOpts,
16+
HistoryEvent,
17+
RuntimeModel,
18+
UsageInfo,
19+
UsageWindow,
20+
} from "./types.js";
1121
import { availabilityFromUsage, availabilityFromUsageError, parseRetryAfterMs, UsageFetchError } from "./types.js";
1222

1323
const SUBTASK_STATUSES: readonly SubtaskStatus[] = ["completed", "failed", "stopped"] as const;
@@ -387,6 +397,35 @@ export const claudeProvider: AgentProvider = {
387397
}
388398
},
389399

400+
async listModels(): Promise<RuntimeModel[]> {
401+
const q = query({
402+
prompt: "",
403+
options: {
404+
cwd: process.cwd(),
405+
env: process.env as Record<string, string>,
406+
permissionMode: "bypassPermissions",
407+
allowDangerouslySkipPermissions: true,
408+
},
409+
});
410+
try {
411+
const models = await q.supportedModels();
412+
return models.map((model) => ({
413+
id: model.value,
414+
name: model.displayName,
415+
description: model.description,
416+
supports: {
417+
effort: model.supportsEffort ?? false,
418+
adaptive_thinking: model.supportsAdaptiveThinking ?? false,
419+
fast_mode: model.supportsFastMode ?? false,
420+
auto_mode: model.supportsAutoMode ?? false,
421+
},
422+
supported_reasoning_efforts: model.supportedEffortLevels,
423+
}));
424+
} finally {
425+
q.close();
426+
}
427+
},
428+
390429
execute(opts: ExecuteOpts): Promise<AgentHandle> {
391430
const systemPrompt = opts.systemPromptFile ? readFileSync(opts.systemPromptFile, "utf-8") : undefined;
392431
const abortController = new AbortController();

packages/cli/src/providers/codex.ts

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,26 @@
11
import { execSync } from "node:child_process";
2-
import { readdirSync, readFileSync } from "node:fs";
2+
import { existsSync, readdirSync, readFileSync } from "node:fs";
33
import { homedir } from "node:os";
44
import { join } from "node:path";
55
import type { BashArgs, ReadArgs } from "@agent-kanban/shared";
66
import { ToolName } from "@agent-kanban/shared";
77
import { Codex, type ThreadEvent } from "@openai/codex-sdk";
8-
import type { AgentEvent, AgentHandle, AgentProvider, ContentBlock, ExecuteOpts, HistoryEvent, UsageInfo, UsageWindow } from "./types.js";
8+
import type {
9+
AgentEvent,
10+
AgentHandle,
11+
AgentProvider,
12+
ContentBlock,
13+
ExecuteOpts,
14+
HistoryEvent,
15+
RuntimeModel,
16+
UsageInfo,
17+
UsageWindow,
18+
} from "./types.js";
919
import { availabilityFromUsage, availabilityFromUsageError, parseRetryAfterMs, UsageFetchError } from "./types.js";
1020

1121
const AUTH_PATH = join(homedir(), ".codex", "auth.json");
1222
const CODEX_SESSIONS_DIR = join(homedir(), ".codex", "sessions");
23+
const CODEX_MODELS_CACHE_PATH = join(homedir(), ".codex", "models_cache.json");
1324
const USAGE_API = "https://chatgpt.com/backend-api/wham/usage";
1425

1526
function readAccessToken(): string | null {
@@ -78,6 +89,39 @@ function resolveCodexModel(opts: ExecuteOpts): string | undefined {
7889
return opts.model;
7990
}
8091

92+
type CodexCachedModel = {
93+
slug: string;
94+
display_name?: string;
95+
description?: string;
96+
visibility?: string;
97+
priority?: number;
98+
context_window?: number;
99+
max_context_window?: number;
100+
supported_reasoning_levels?: { effort: string }[];
101+
default_reasoning_level?: string;
102+
support_verbosity?: boolean;
103+
};
104+
105+
function readCodexModelCache(): CodexCachedModel[] {
106+
if (!existsSync(CODEX_MODELS_CACHE_PATH)) throw new Error("Codex models cache not found; start Codex CLI once to populate it");
107+
const data = JSON.parse(readFileSync(CODEX_MODELS_CACHE_PATH, "utf-8")) as { models?: CodexCachedModel[] };
108+
return data.models ?? [];
109+
}
110+
111+
function normalizeCodexCachedModel(model: CodexCachedModel): RuntimeModel {
112+
return {
113+
id: model.slug,
114+
name: model.display_name,
115+
description: model.description,
116+
context_window: model.context_window,
117+
supports: {
118+
verbosity: model.support_verbosity ?? false,
119+
},
120+
supported_reasoning_efforts: model.supported_reasoning_levels?.map((level) => level.effort),
121+
default_reasoning_effort: model.default_reasoning_level,
122+
};
123+
}
124+
81125
/** Map a single Codex thread event to an AgentEvent (or null to skip). */
82126
export function mapThreadEvent(event: ThreadEvent, model = "o3"): AgentEvent | null {
83127
switch (event.type) {
@@ -218,6 +262,13 @@ export const codexProvider: AgentProvider = {
218262
}
219263
},
220264

265+
async listModels(): Promise<RuntimeModel[]> {
266+
return readCodexModelCache()
267+
.filter((model) => model.visibility !== "hide")
268+
.sort((a, b) => (a.priority ?? 0) - (b.priority ?? 0))
269+
.map(normalizeCodexCachedModel);
270+
},
271+
221272
async execute(opts: ExecuteOpts): Promise<AgentHandle> {
222273
const model = resolveCodexModel(opts) ?? "o3";
223274
let resumeToken: string | undefined = opts.resumeToken;

packages/cli/src/providers/copilot.ts

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,17 @@ import { ToolName } from "@agent-kanban/shared";
55
import type { CopilotSession, SessionEvent } from "@github/copilot-sdk";
66
import { approveAll, CopilotClient } from "@github/copilot-sdk";
77
import { createLogger } from "../logger.js";
8-
import type { AgentEvent, AgentHandle, AgentProvider, ContentBlock, ExecuteOpts, HistoryEvent, UsageInfo, UsageWindow } from "./types.js";
8+
import type {
9+
AgentEvent,
10+
AgentHandle,
11+
AgentProvider,
12+
ContentBlock,
13+
ExecuteOpts,
14+
HistoryEvent,
15+
RuntimeModel,
16+
UsageInfo,
17+
UsageWindow,
18+
} from "./types.js";
919
import { availabilityFromUsage, availabilityFromUsageError, parseRetryAfterMs, UsageFetchError } from "./types.js";
1020

1121
const logger = createLogger("copilot");
@@ -286,6 +296,27 @@ export const copilotProvider: AgentProvider = {
286296
}
287297
},
288298

299+
async listModels(): Promise<RuntimeModel[]> {
300+
const client = new CopilotClient({ useLoggedInUser: true });
301+
await client.start();
302+
try {
303+
const models = await client.listModels();
304+
return models.map((model) => ({
305+
id: model.id,
306+
name: model.name,
307+
context_window: model.capabilities.limits.max_context_window_tokens,
308+
supports: {
309+
vision: model.capabilities.supports.vision,
310+
reasoning_effort: model.capabilities.supports.reasoningEffort,
311+
},
312+
supported_reasoning_efforts: model.supportedReasoningEfforts,
313+
default_reasoning_effort: model.defaultReasoningEffort,
314+
}));
315+
} finally {
316+
await client.stop();
317+
}
318+
},
319+
289320
async execute(opts: ExecuteOpts): Promise<AgentHandle> {
290321
const systemPrompt = opts.systemPromptFile ? readFileSync(opts.systemPromptFile, "utf-8") : undefined;
291322

0 commit comments

Comments
 (0)