From 461d61adfe38b184f1a729655fee7b85e259d99e Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:15:12 +1100 Subject: [PATCH 1/6] Add Claude Opus 4.5 model to Anthropic provider --- app/lib/modules/llm/providers/anthropic.ts | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/app/lib/modules/llm/providers/anthropic.ts b/app/lib/modules/llm/providers/anthropic.ts index 1e105115..34f8272e 100644 --- a/app/lib/modules/llm/providers/anthropic.ts +++ b/app/lib/modules/llm/providers/anthropic.ts @@ -16,8 +16,17 @@ export default class AnthropicProvider extends BaseProvider { staticModels: ModelInfo[] = [ /* * Essential fallback models - only the most stable/reliable ones - * Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed + * Claude Opus 4.5: 200k context, 64k output, maximum intelligence with practical performance */ + { + name: 'claude-opus-4-5-20251101', + label: 'Claude Opus 4.5', + provider: 'Anthropic', + maxTokenAllowed: 200000, + maxCompletionTokens: 64000, + }, + + // Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed { name: 'claude-sonnet-4-5-20250929', label: 'Claude Sonnet 4.5', @@ -94,7 +103,9 @@ export default class AnthropicProvider extends BaseProvider { // Determine completion token limits based on specific model let maxCompletionTokens = 128000; // default for older Claude 3 models - if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) { + if (m.id?.includes('claude-opus-4-5')) { + maxCompletionTokens = 64000; // Claude Opus 4.5: 64K output limit + } else if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) { maxCompletionTokens = 64000; // Claude 4.5 Sonnet/Haiku: 64K output limit } else if (m.id?.includes('claude-opus-4-1') || m.id?.includes('claude-opus-4')) { maxCompletionTokens = 32000; // Claude 4 Opus: 32K output limit From 5e927669a31612b9942c0c314fef96a050b3032d Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:16:21 +1100 Subject: [PATCH 2/6] Add Claude Opus 4.5 model to OpenRouter provider --- app/lib/modules/llm/providers/open-router.ts | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/app/lib/modules/llm/providers/open-router.ts b/app/lib/modules/llm/providers/open-router.ts index 0e034776..a3ce7e56 100644 --- a/app/lib/modules/llm/providers/open-router.ts +++ b/app/lib/modules/llm/providers/open-router.ts @@ -30,8 +30,17 @@ export default class OpenRouterProvider extends BaseProvider { staticModels: ModelInfo[] = [ /* * Essential fallback models - only the most stable/reliable ones - * Claude Sonnet 4.5 via OpenRouter: 200k context + * Claude Opus 4.5 via OpenRouter: 200k context, maximum intelligence */ + { + name: 'anthropic/claude-opus-4-5', + label: 'Claude Opus 4.5', + provider: 'OpenRouter', + maxTokenAllowed: 200000, + maxCompletionTokens: 64000, + }, + + // Claude Sonnet 4.5 via OpenRouter: 200k context { name: 'anthropic/claude-sonnet-4-5', label: 'Claude Sonnet 4.5', From 8782bb8589be637c6f7d4413104a4faa9621ba0c Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:17:40 +1100 Subject: [PATCH 3/6] Add Claude 4.5 models (Opus, Sonnet, Haiku) to Amazon Bedrock provider --- .../modules/llm/providers/amazon-bedrock.ts | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/app/lib/modules/llm/providers/amazon-bedrock.ts b/app/lib/modules/llm/providers/amazon-bedrock.ts index 778093e3..cabeb754 100644 --- a/app/lib/modules/llm/providers/amazon-bedrock.ts +++ b/app/lib/modules/llm/providers/amazon-bedrock.ts @@ -21,6 +21,27 @@ export default class AmazonBedrockProvider extends BaseProvider { }; staticModels: ModelInfo[] = [ + { + name: 'anthropic.claude-opus-4-5-20251101-v1:0', + label: 'Claude Opus 4.5 (Bedrock)', + provider: 'AmazonBedrock', + maxTokenAllowed: 200000, + maxCompletionTokens: 64000, + }, + { + name: 'anthropic.claude-sonnet-4-5-20250929-v1:0', + label: 'Claude Sonnet 4.5 (Bedrock)', + provider: 'AmazonBedrock', + maxTokenAllowed: 200000, + maxCompletionTokens: 64000, + }, + { + name: 'anthropic.claude-haiku-4-5-20251001-v1:0', + label: 'Claude Haiku 4.5 (Bedrock)', + provider: 'AmazonBedrock', + maxTokenAllowed: 200000, + maxCompletionTokens: 64000, + }, { name: 'anthropic.claude-3-5-sonnet-20241022-v2:0', label: 'Claude 3.5 Sonnet v2 (Bedrock)', From b1591b03a80597c0d20af701e00b689f3cd71f8c Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:22:25 +1100 Subject: [PATCH 4/6] Add GPT-5.2 models (Pro, Thinking, Instant) to OpenAI provider --- app/lib/modules/llm/providers/openai.ts | 39 +++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/app/lib/modules/llm/providers/openai.ts b/app/lib/modules/llm/providers/openai.ts index 5301ae41..a88a8e84 100644 --- a/app/lib/modules/llm/providers/openai.ts +++ b/app/lib/modules/llm/providers/openai.ts @@ -16,8 +16,35 @@ export default class OpenAIProvider extends BaseProvider { staticModels: ModelInfo[] = [ /* * Essential fallback models - only the most stable/reliable ones - * GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks) + * GPT-5.2 Pro: 400k context, 128k output, highest accuracy and quality */ + { + name: 'gpt-5.2-pro', + label: 'GPT-5.2 Pro', + provider: 'OpenAI', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + + // GPT-5.2 Thinking: 400k context, 128k output, for complex reasoning and coding + { + name: 'gpt-5.2-thinking', + label: 'GPT-5.2 Thinking', + provider: 'OpenAI', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + + // GPT-5.2 Instant: 400k context, 128k output, optimized for speed + { + name: 'gpt-5.2-instant', + label: 'GPT-5.2 Instant', + provider: 'OpenAI', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + + // GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks) { name: 'gpt-5.1', label: 'GPT-5.1', @@ -112,6 +139,12 @@ export default class OpenAIProvider extends BaseProvider { // OpenAI provides context_length in their API response if (m.context_length) { contextWindow = m.context_length; + } else if (m.id?.includes('gpt-5.2')) { + contextWindow = 400000; // GPT-5.2 has 400k context + } else if (m.id?.includes('gpt-5.1')) { + contextWindow = 128000; // GPT-5.1 has 128k context + } else if (m.id?.includes('gpt-5')) { + contextWindow = 128000; // Other GPT-5 models have 128k context } else if (m.id?.includes('gpt-4o')) { contextWindow = 128000; // GPT-4o has 128k context } else if (m.id?.includes('gpt-4-turbo') || m.id?.includes('gpt-4-1106')) { @@ -135,6 +168,8 @@ export default class OpenAIProvider extends BaseProvider { maxCompletionTokens = 32000; // Other o1 models: 32K limit } else if (m.id?.includes('o3') || m.id?.includes('o4')) { maxCompletionTokens = 100000; // o3/o4 models: 100K output limit + } else if (m.id?.includes('gpt-5.2')) { + maxCompletionTokens = 128000; // GPT-5.2: 128K output limit } else if (m.id?.includes('gpt-5.1')) { maxCompletionTokens = 16384; // GPT-5.1: 16K output limit } else if (m.id?.includes('gpt-5-mini')) { @@ -155,7 +190,7 @@ export default class OpenAIProvider extends BaseProvider { name: m.id, label: `${m.id} (${Math.floor(contextWindow / 1000)}k context)`, provider: this.name, - maxTokenAllowed: Math.min(contextWindow, 128000), // Cap at 128k for safety + maxTokenAllowed: Math.min(contextWindow, 400000), // Cap at 400k for safety maxCompletionTokens, }; }); From 7e54110e9c19c58caff7f9ac6b0bd424a67da164 Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:23:49 +1100 Subject: [PATCH 5/6] Add GPT-5.2 models (Pro, Thinking, Instant) to OpenRouter provider --- app/lib/modules/llm/providers/open-router.ts | 27 ++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/app/lib/modules/llm/providers/open-router.ts b/app/lib/modules/llm/providers/open-router.ts index a3ce7e56..8ffbacab 100644 --- a/app/lib/modules/llm/providers/open-router.ts +++ b/app/lib/modules/llm/providers/open-router.ts @@ -49,6 +49,33 @@ export default class OpenRouterProvider extends BaseProvider { maxCompletionTokens: 64000, }, + // GPT-5.2 Pro via OpenRouter: 400k context, highest accuracy + { + name: 'openai/gpt-5.2-pro', + label: 'GPT-5.2 Pro', + provider: 'OpenRouter', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + + // GPT-5.2 Thinking via OpenRouter: 400k context, complex reasoning + { + name: 'openai/gpt-5.2-thinking', + label: 'GPT-5.2 Thinking', + provider: 'OpenRouter', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + + // GPT-5.2 Instant via OpenRouter: 400k context, optimized for speed + { + name: 'openai/gpt-5.2-instant', + label: 'GPT-5.2 Instant', + provider: 'OpenRouter', + maxTokenAllowed: 400000, + maxCompletionTokens: 128000, + }, + // GPT-5.1 via OpenRouter: 128k context { name: 'openai/gpt-5.1', From 2e6cb05915031250f8a24760e4eb2efaf4c5fb14 Mon Sep 17 00:00:00 2001 From: Gerome-Elassaad Date: Mon, 15 Dec 2025 15:27:58 +1100 Subject: [PATCH 6/6] updated default model --- app/utils/constants.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 1afc3e2b..dc604e85 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -6,7 +6,7 @@ export const WORK_DIR = `/home/${WORK_DIR_NAME}`; export const MODIFICATIONS_TAG_NAME = 'codinit_file_modifications'; export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/; export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/; -export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest'; +export const DEFAULT_MODEL = 'claude-4-5-sonnet-latest'; export const PROMPT_COOKIE_KEY = 'cachedPrompt'; export const TOOL_EXECUTION_APPROVAL = {