File tree Expand file tree Collapse file tree 3 files changed +13
-17
lines changed
src/main/presenter/llmProviderPresenter/providers Expand file tree Collapse file tree 3 files changed +13
-17
lines changed Original file line number Diff line number Diff line change 6868 "@electron-toolkit/utils" : " ^4.0.0" ,
6969 "@google/genai" : " ^1.30.0" ,
7070 "@jxa/run" : " ^1.4.0" ,
71- "@modelcontextprotocol/sdk" : " ^1.22.0 " ,
71+ "@modelcontextprotocol/sdk" : " ^1.25.1 " ,
7272 "axios" : " ^1.13.2" ,
7373 "better-sqlite3-multiple-ciphers" : " 12.4.1" ,
7474 "cheerio" : " ^1.1.2" ,
Original file line number Diff line number Diff line change @@ -47,13 +47,11 @@ const OPENAI_REASONING_MODELS = [
4747 'gpt-5-nano' ,
4848 'gpt-5-chat'
4949]
50- const OPENAI_IMAGE_GENERATION_MODELS = [
51- 'gpt-4o-all' ,
52- 'gpt-4o-image' ,
53- 'gpt-image-1' ,
54- 'dall-e-3' ,
55- 'dall-e-2'
56- ]
50+ const OPENAI_IMAGE_GENERATION_MODELS = [ 'gpt-4o-all' , 'gpt-4o-image' ]
51+ const OPENAI_IMAGE_GENERATION_MODEL_PREFIXES = [ 'dall-e-' , 'gpt-image-' ]
52+ const isOpenAIImageGenerationModel = ( modelId : string ) : boolean =>
53+ OPENAI_IMAGE_GENERATION_MODELS . includes ( modelId ) ||
54+ OPENAI_IMAGE_GENERATION_MODEL_PREFIXES . some ( ( prefix ) => modelId . startsWith ( prefix ) )
5755
5856// Add supported image size constants
5957const SUPPORTED_IMAGE_SIZES = {
@@ -1524,7 +1522,7 @@ export class OpenAICompatibleProvider extends BaseLLMProvider {
15241522 if ( ! this . isInitialized ) throw new Error ( 'Provider not initialized' )
15251523 if ( ! modelId ) throw new Error ( 'Model ID is required' )
15261524
1527- if ( OPENAI_IMAGE_GENERATION_MODELS . includes ( modelId ) ) {
1525+ if ( isOpenAIImageGenerationModel ( modelId ) ) {
15281526 yield * this . handleImgGeneration ( messages , modelId )
15291527 } else {
15301528 yield * this . handleChatCompletion (
Original file line number Diff line number Diff line change @@ -39,13 +39,11 @@ const OPENAI_REASONING_MODELS = [
3939 'gpt-5-nano' ,
4040 'gpt-5-chat'
4141]
42- const OPENAI_IMAGE_GENERATION_MODELS = [
43- 'gpt-4o-all' ,
44- 'gpt-4o-image' ,
45- 'gpt-image-1' ,
46- 'dall-e-3' ,
47- 'dall-e-2'
48- ]
42+ const OPENAI_IMAGE_GENERATION_MODELS = [ 'gpt-4o-all' , 'gpt-4o-image' ]
43+ const OPENAI_IMAGE_GENERATION_MODEL_PREFIXES = [ 'dall-e-' , 'gpt-image-' ]
44+ const isOpenAIImageGenerationModel = ( modelId : string ) : boolean =>
45+ OPENAI_IMAGE_GENERATION_MODELS . includes ( modelId ) ||
46+ OPENAI_IMAGE_GENERATION_MODEL_PREFIXES . some ( ( prefix ) => modelId . startsWith ( prefix ) )
4947
5048// 添加支持的图片尺寸常量
5149const SUPPORTED_IMAGE_SIZES = {
@@ -303,7 +301,7 @@ export class OpenAIResponsesProvider extends BaseLLMProvider {
303301 if ( ! this . isInitialized ) throw new Error ( 'Provider not initialized' )
304302 if ( ! modelId ) throw new Error ( 'Model ID is required' )
305303
306- if ( OPENAI_IMAGE_GENERATION_MODELS . includes ( modelId ) ) {
304+ if ( isOpenAIImageGenerationModel ( modelId ) ) {
307305 yield * this . handleImgGeneration ( messages , modelId )
308306 } else {
309307 yield * this . handleChatCompletion (
You can’t perform that action at this time.
0 commit comments