8000 42 add anthropic opus latest by Gerome-Elassaad · Pull Request #43 · codinit-dev/codinit-dev · GitHub
[go: up one dir, main page]

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions app/lib/modules/llm/providers/amazon-bedrock.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,27 @@ export default class AmazonBedrockProvider extends BaseProvider {
};

staticModels: ModelInfo[] = [
{
name: 'anthropic.claude-opus-4-5-20251101-v1:0',
label: 'Claude Opus 4.5 (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 200000,
maxCompletionTokens: 64000,
},
{
name: 'anthropic.claude-sonnet-4-5-20250929-v1:0',
label: 'Claude Sonnet 4.5 (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 200000,
maxCompletionTokens: 64000,
},
{
name: 'anthropic.claude-haiku-4-5-20251001-v1:0',
label: 'Claude Haiku 4.5 (Bedrock)',
provider: 'AmazonBedrock',
maxTokenAllowed: 200000,
maxCompletionTokens: 64000,
},
{
name: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
label: 'Claude 3.5 Sonnet v2 (Bedrock)',
Expand Down
15 changes: 13 additions & 2 deletions app/lib/modules/llm/providers/anthropic.ts
8000
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,17 @@ export default class AnthropicProvider extends BaseProvider {
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed
* Claude Opus 4.5: 200k context, 64k output, maximum intelligence with practical performance
*/
{
name: 'claude-opus-4-5-20251101',
label: 'Claude Opus 4.5',
provider: 'Anthropic',
maxTokenAllowed: 200000,
maxCompletionTokens: 64000,
},

// Claude Sonnet 4.5: 200k context, 64k output, best balance of intelligence and speed
{
name: 'claude-sonnet-4-5-20250929',
label: 'Claude Sonnet 4.5',
Expand Down Expand Up @@ -94,7 +103,9 @@ export default class AnthropicProvider extends BaseProvider {
// Determine completion token limits based on specific model
let maxCompletionTokens = 128000; // default for older Claude 3 models

if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) {
if (m.id?.includes('claude-opus-4-5')) {
maxCompletionTokens = 64000; // Claude Opus 4.5: 64K output limit
} else if (m.id?.includes('claude-sonnet-4-5') || m.id?.includes('claude-haiku-4-5')) {
maxCompletionTokens = 64000; // Claude 4.5 Sonnet/Haiku: 64K output limit
} else if (m.id?.includes('claude-opus-4-1') || m.id?.includes('claude-opus-4')) {
maxCompletionTokens = 32000; // Claude 4 Opus: 32K output limit
Expand Down
38 changes: 37 additions & 1 deletion app/lib/modules/llm/providers/open-router.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,17 @@ export default class OpenRouterProvider extends BaseProvider {
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* Claude Sonnet 4.5 via OpenRouter: 200k context
* Claude Opus 4.5 via OpenRouter: 200k context, maximum intelligence
*/
{
name: 'anthropic/claude-opus-4-5',
label: 'Claude Opus 4.5',
provider: 'OpenRouter',
maxTokenAllowed: 200000,
maxCompletionTokens: 64000,
},

// Claude Sonnet 4.5 via OpenRouter: 200k context
{
name: 'anthropic/claude-sonnet-4-5',
label: 'Claude Sonnet 4.5',
Expand All @@ -40,6 +49,33 @@ export default class OpenRouterProvider extends BaseProvider {
maxCompletionTokens: 64000,
},

// GPT-5.2 Pro via OpenRouter: 400k context, highest accuracy
{
name: 'openai/gpt-5.2-pro',
label: 'GPT-5.2 Pro',
provider: 'OpenRouter',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.2 Thinking via OpenRouter: 400k context, complex reasoning
{
name: 'openai/gpt-5.2-thinking',
label: 'GPT-5.2 Thinking',
provider: 'OpenRouter',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.2 Instant via OpenRouter: 400k context, optimized for speed
{
name: 'openai/gpt-5.2-instant',
label: 'GPT-5.2 Instant',
provider: 'OpenRouter',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.1 via OpenRouter: 128k context
{
name: 'openai/gpt-5.1',
Expand Down
39 changes: 37 additions & 2 deletions app/lib/modules/llm/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,35 @@ export default class OpenAIProvider extends BaseProvider {
staticModels: ModelInfo[] = [
/*
* Essential fallback models - only the most stable/reliable ones
* GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks)
* GPT-5.2 Pro: 400k context, 128k output, highest accuracy and quality
*/
{
name: 'gpt-5.2-pro',
label: 'GPT-5.2 Pro',
provider: 'OpenAI',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.2 Thinking: 400k context, 128k output, for complex reasoning and coding
{
name: 'gpt-5.2-thinking',
label: 'GPT-5.2 Thinking',
provider: 'OpenAI',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.2 Instant: 400k context, 128k output, optimized for speed
{
name: 'gpt-5.2-instant',
label: 'GPT-5.2 Instant',
provider: 'OpenAI',
maxTokenAllowed: 400000,
maxCompletionTokens: 128000,
},

// GPT-5.1: 128k context, 16k output limit (best for coding and agentic tasks)
{
name: 'gpt-5.1',
label: 'GPT-5.1',
Expand Down Expand Up @@ -112,6 +139,12 @@ export default class OpenAIProvider extends BaseProvider {
// OpenAI provides context_length in their API response
if (m.context_length) {
contextWindow = m.context_length;
} else if (m.id?.includes('gpt-5.2')) {
contextWindow = 400000; // GPT-5.2 has 400k context
} else if (m.id?.includes('gpt-5.1')) {
contextWindow = 128000; // GPT-5.1 has 128k context
} else if (m.id?.includes('gpt-5')) {
contextWindow = 128000; // Other GPT-5 models have 128k context
} else if (m.id?.includes('gpt-4o')) {
contextWindow = 128000; // GPT-4o has 128k context
} else if (m.id?.includes('gpt-4-turbo') || m.id?.includes('gpt-4-1106')) {
Expand All @@ -135,6 +168,8 @@ export default class OpenAIProvider extends BaseProvider {
maxCompletionTokens = 32000; // Other o1 models: 32K limit
} else if (m.id?.includes('o3') || m.id?.includes('o4')) {
maxCompletionTokens = 100000; // o3/o4 models: 100K output limit
} else if (m.id?.includes('gpt-5.2')) {
maxCompletionTokens = 128000; // GPT-5.2: 128K output limit
} else if (m.id?.includes('gpt-5.1')) {
maxCompletionTokens = 16384; // GPT-5.1: 16K output limit
} else if (m.id?.includes('gpt-5-mini')) {
Expand All @@ -155,7 +190,7 @@ export default class OpenAIProvider extends BaseProvider {
name: m.id,
label: `${m.id} (${Math.floor(contextWindow / 1000)}k context)`,
provider: this.name,
maxTokenAllowed: Math.min(contextWindow, 128000), // Cap at 128k for safety
maxTokenAllowed: Math.min(contextWindow, 400000), // Cap at 400k for safety
maxCompletionTokens,
};
});
Expand Down
2 changes: 1 addition & 1 deletion app/utils/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
export const MODIFICATIONS_TAG_NAME = 'codinit_file_modifications';
export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/;
export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/;
export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
export const DEFAULT_MODEL = 'claude-4-5-sonnet-latest';
export const PROMPT_COOKIE_KEY = 'cachedPrompt';

export const TOOL_EXECUTION_APPROVAL = {
Expand Down
0