diff --git a/.env.example b/.env.example index 1e1755a..cce0da1 100644 --- a/.env.example +++ b/.env.example @@ -14,6 +14,7 @@ NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW=/webhook/data-engineering-agent NEXT_PUBLIC_N8N_OPENAI_WORKFLOW=/webhook/data-engineering-agent NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW=/webhook/data-engineering-agent NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW=/webhook/data-engineering-agent +NEXT_PUBLIC_N8N_GEMINI_WORKFLOW=/webhook/data-engineering-agent # OpenAI configuration NEXT_PUBLIC_OPENAI_API_KEY=your_openai_key_here @@ -24,6 +25,9 @@ NEXT_PUBLIC_OLLAMA_API_URL=http://localhost:11434 # Claude (Anthropic) configuration NEXT_PUBLIC_ANTHROPIC_API_KEY=your_anthropic_key_here +# Gemini configuration +NEXT_PUBLIC_GEMINI_API_KEY=your_gemini_key_here + # Azure OpenAI configuration NEXT_PUBLIC_AZURE_OPENAI_API_KEY=your_azure_openai_key_here NEXT_PUBLIC_AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 5699b04..787d583 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,13 +38,15 @@ ARG NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW ARG NEXT_PUBLIC_N8N_OPENAI_WORKFLOW ARG NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW ARG NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW +ARG NEXT_PUBLIC_N8N_GEMINI_WORKFLOW -# OpenAI / Ollama / Claude / Azure +# OpenAI / Ollama / Claude / Azure / Gemini ARG NEXT_PUBLIC_OPENAI_API_KEY ARG NEXT_PUBLIC_OLLAMA_API_URL ARG NEXT_PUBLIC_ANTHROPIC_API_KEY ARG NEXT_PUBLIC_AZURE_OPENAI_API_KEY ARG NEXT_PUBLIC_AZURE_OPENAI_ENDPOINT +ARG NEXT_PUBLIC_GEMINI_API_KEY # Make them ENV so that Next.js sees them during build ENV DATABASE_URL=$DATABASE_URL @@ -58,12 +60,14 @@ ENV NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW=$NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW ENV NEXT_PUBLIC_N8N_OPENAI_WORKFLOW=$NEXT_PUBLIC_N8N_OPENAI_WORKFLOW ENV NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW=$NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW ENV NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW=$NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW +ENV NEXT_PUBLIC_N8N_GEMINI_WORKFLOW=$NEXT_PUBLIC_N8N_GEMINI_WORKFLOW ENV NEXT_PUBLIC_OPENAI_API_KEY=$NEXT_PUBLIC_OPENAI_API_KEY ENV NEXT_PUBLIC_OLLAMA_API_URL=$NEXT_PUBLIC_OLLAMA_API_URL ENV NEXT_PUBLIC_ANTHROPIC_API_KEY=$NEXT_PUBLIC_ANTHROPIC_API_KEY ENV NEXT_PUBLIC_AZURE_OPENAI_API_KEY=$NEXT_PUBLIC_AZURE_OPENAI_API_KEY ENV NEXT_PUBLIC_AZURE_OPENAI_ENDPOINT=$NEXT_PUBLIC_AZURE_OPENAI_ENDPOINT +ENV NEXT_PUBLIC_GEMINI_API_KEY=$NEXT_PUBLIC_GEMINI_API_KEY # Copy deps from previous stage COPY --from=deps /app/node_modules ./node_modules diff --git a/README.md b/README.md index 22d72a6..9ef1a7d 100644 --- a/README.md +++ b/README.md @@ -18,13 +18,14 @@ https://github.com/user-attachments/assets/f591bc23-3a19-43eb-9c92-e4b5bb3ba57f

💬 Data Agents, Really!

-

Building a Practical Assistant that empowers Data Engineers to deliver results with speed and efficiency.

+

Data Agent is an agentic AI harnessing GenAI to automate and streamline data engineering workflows. + By delivering complete, well-prepared data requests, it saves time and reduces bottlenecks across teams.

## ✨ Features - 🤖 **Multi-agent collaboration** - Engage with specialized data engineering agents -- 🔄 **Multiple backend support** - Connect to OpenAI, Claude, or Ollama for private deployments +- 🔄 **Multiple backend support** - Connect to OpenAI, Claude, Gemini or Ollama for private deployments - 🔗 **n8n integration** - Use n8n workflows for agent orchestration - 🎯 **Strategy-based approach** - Different strategies for various data engineering tasks - 🌙 **Modern dark UI** - Beautiful, responsive interface inspired by LobeChat @@ -48,8 +49,8 @@ docker-compose up -d 1. Once the containers are running, go to n8n at http://localhost:5678 2. Upload the workflow from the `agents/n8n/conversations` directory 3. Configure your API keys: - - In Docker Compose: update OpenAI/Claude key - - In n8n workflow: click on OpenAI/Claude model block and add your key + - In Docker Compose: update OpenAI/Claude/Gemini key + - In n8n workflow: click on OpenAI/Claude/Gemini model block and add your key - See [n8n documentation](https://docs.n8n.io/integrations/builtin/credentials/openai/#using-api-key) for more details 4. Visit http://localhost:3000 and start interacting with your agents! @@ -103,7 +104,7 @@ The application is built with a modern stack: - **Frontend**: Next.js 14 with App Router, TypeScript, Tailwind CSS, Shadcn UI - **State Management**: Zustand for global state - **Orchestration**: n8n for workflow management -- **AI Integration**: OpenAI, Claude, and Ollama support +- **AI Integration**: OpenAI, Claude, Gemini and Ollama support ## 🤝 Contributing diff --git a/data/n8n/conversation/1_workflow.json b/data/n8n/conversation/1_workflow.json index 34cb7f4..bdfb57e 100644 --- a/data/n8n/conversation/1_workflow.json +++ b/data/n8n/conversation/1_workflow.json @@ -9,10 +9,10 @@ "type": "n8n-nodes-base.code", "typeVersion": 2, "position": [ - 680, - 600 + -2260, + 1380 ], - "id": "51fd4bca-973b-4ce1-996d-1fb045415854", + "id": "11ef38c9-f240-42d8-b41e-f7cbd6e5f432", "name": "Process Agent Request" }, { @@ -23,10 +23,10 @@ "type": "n8n-nodes-base.respondToWebhook", "typeVersion": 1.1, "position": [ - 4840, - 40 + 1900, + 820 ], - "id": "add21e35-ee03-4ad5-bfda-76156712f5a6", + "id": "0b32d575-1bb8-460d-8b9c-37b607777f89", "name": "Respond to Webhook" }, { @@ -35,12 +35,12 @@ "destinationFieldName": "context", "options": {} }, - "id": "c53c0b58-4266-44f6-bb90-c3bdd309cd09", + "id": "a8502f19-6f41-48de-800c-72af0808f278", "name": "Aggregate", "type": "n8n-nodes-base.aggregate", "position": [ - 2200, - 180 + -740, + 960 ], "typeVersion": 1, "alwaysOutputData": true @@ -52,12 +52,12 @@ "width": 755, "color": 7 }, - "id": "8ab6d46f-379d-458a-a26f-81dd41c3ec93", + "id": "ccddd2bb-5459-4e83-b39b-5fc1f31d6412", "name": "Sticky Note", "type": "n8n-nodes-base.stickyNote", "position": [ - 1660, - -40 + -1280, + 740 ], "typeVersion": 1 }, @@ -65,12 +65,12 @@ "parameters": { "options": {} }, - "id": "c313b5a9-870a-47d8-958e-871704385ec8", + "id": "20267b49-6e29-460e-87c6-a3082b6d1356", "name": "Chat Memory Manager", "type": "@n8n/n8n-nodes-langchain.memoryManager", "position": [ - 1820, - 180 + -1120, + 960 ], "typeVersion": 1, "alwaysOutputData": true @@ -83,10 +83,10 @@ "type": "@n8n/n8n-nodes-langchain.memoryBufferWindow", "typeVersion": 1.3, "position": [ - 2060, - 840 + -880, + 1620 ], - "id": "a267ebba-a09a-49f0-98cc-4ebceb003ba7", + "id": "a1b797c4-9904-422a-a955-547ca574c619", "name": "Window Buffer Memory" }, { @@ -99,10 +99,10 @@ "type": "@n8n/n8n-nodes-langchain.chatTrigger", "typeVersion": 1.1, "position": [ - 1220, - -20 + -1720, + 760 ], - "id": "baf20a9f-1cf6-4a5f-ae33-9509ba6cd8e6", + "id": "859a839a-3a40-4af2-a7a1-a6d781dfddeb", "name": "Chat Trigger", "webhookId": "b2f479b7-83af-4ddf-a494-621127b5f96c" }, @@ -122,12 +122,12 @@ ] } }, - "id": "50bac42e-9077-4d6e-87b7-0a571bbb7304", + "id": "81258ae9-095d-4c22-86a2-fe7e0bfac38a", "name": "Insert Chat", "type": "@n8n/n8n-nodes-langchain.memoryManager", "position": [ - 4620, - -660 + 1680, + 120 ], "typeVersion": 1, "alwaysOutputData": true @@ -139,12 +139,12 @@ "width": 441, "color": 6 }, - "id": "3f86f96c-806d-47da-ba7e-3d95901a627d", + "id": "2678aab1-6809-4a44-8399-22c77ce08568", "name": "Sticky Note1", "type": "n8n-nodes-base.stickyNote", "position": [ - 1900, - 760 + -1040, + 1540 ], "typeVersion": 1 }, @@ -155,12 +155,12 @@ "width": 487.4293487597613, "color": 6 }, - "id": "cf35624a-130d-4846-a043-592e247f7bfe", + "id": "ea143540-c79e-4796-90c3-a3ce4b6b2526", "name": "Sticky Note2", "type": "n8n-nodes-base.stickyNote", "position": [ - 1680, - 0 + -1260, + 780 ], "typeVersion": 1 }, @@ -184,13 +184,13 @@ } } }, - "id": "44bbf86e-0f31-494e-9b86-cda9113425ca", + "id": "112abbe2-716e-494e-afc0-a3ab53a64536", "name": "start", "type": "n8n-nodes-base.webhook", "typeVersion": 1, "position": [ - -360, - 840 + -3300, + 1620 ], "webhookId": "87f6c8c3-55de-4661-8fd9-2727490b076f" }, @@ -202,10 +202,10 @@ "type": "n8n-nodes-base.readWriteFile", "typeVersion": 1, "position": [ - 20, - 500 + -2920, + 1280 ], - "id": "36ed94dd-dc41-4395-a213-7c094803cbff", + "id": "8e05a504-fcd1-4319-9aaf-8db221719133", "name": "Read/Write Files from Disk", "alwaysOutputData": true }, @@ -217,10 +217,10 @@ "type": "n8n-nodes-base.extractFromFile", "typeVersion": 1, "position": [ - 220, - 500 + -2720, + 1280 ], - "id": "8b7cdb0e-42ec-4edd-8beb-09379099e31d", + "id": "8561e860-8077-45af-8d4e-b821054e59bf", "name": "extract" }, { @@ -325,6 +325,31 @@ }, "renameOutput": true, "outputKey": "defaultoopenai" + }, + { + "conditions": { + "options": { + "caseSensitive": true, + "leftValue": "", + "typeValidation": "strict", + "version": 2 + }, + "conditions": [ + { + "id": "5897cde4-1838-4a03-bea9-232d49f6df88", + "leftValue": "={{ $('start').item.json.body.modelType }}", + "rightValue": "gemini", + "operator": { + "type": "string", + "operation": "equals", + "name": "filter.operator.equals" + } + } + ], + "combinator": "and" + }, + "renameOutput": true, + "outputKey": "gemini" } ] }, @@ -333,10 +358,10 @@ "type": "n8n-nodes-base.switch", "typeVersion": 3.2, "position": [ - 2620, - 160 + -320, + 940 ], - "id": "7e4cb809-1251-4272-bcbf-5465db48ef6e", + "id": "8667bd1d-9741-4bfc-b32f-c85d519cf1fb", "name": "Switch" }, { @@ -348,10 +373,10 @@ "type": "n8n-nodes-base.stickyNote", "typeVersion": 1, "position": [ - 3100, - -820 + 160, + -40 ], - "id": "abb4e80e-0ae2-4662-8e39-ddd5f4da7bef", + "id": "b83b7b6a-9b40-4376-9032-150a7bf91c40", "name": "Sticky Note3" }, { @@ -367,17 +392,11 @@ "type": "@n8n/n8n-nodes-langchain.lmChatAnthropic", "typeVersion": 1.3, "position": [ - 3280, - 380 + 340, + 1160 ], - "id": "92aa9948-8ac8-4674-b54f-3d1ebfc3346b", - "name": "Anthropic Chat Model1", - "credentials": { - "anthropicApi": { - "id": "0mssuZzBnJ7BB9lE", - "name": "Anthropic account" - } - } + "id": "a8208a3c-bd7b-41e4-96c5-8eed27d5ba7f", + "name": "Anthropic Chat Model1" }, { "parameters": { @@ -386,10 +405,10 @@ "type": "@n8n/n8n-nodes-langchain.outputParserStructured", "typeVersion": 1.2, "position": [ - 3460, - 380 + 520, + 1160 ], - "id": "9c5d7ac2-75ee-43f1-80b5-9f0a737d85c3", + "id": "1dde314f-973a-445f-a745-a66244d689f9", "name": "Structured Output Parser" }, { @@ -402,33 +421,33 @@ "type": "n8n-nodes-base.stickyNote", "typeVersion": 1, "position": [ - 3100, - -20 + 160, + 760 ], - "id": "1696fc1f-18ce-4072-9216-9ca6f17cec0f", + "id": "9c7ea518-de27-4918-bff7-b43a7b1b3c50", "name": "Sticky Note4" }, { "parameters": { "model": { "__rl": true, - "value": "o3-mini", + "value": "gpt-3.5-turbo", "mode": "list", - "cachedResultName": "o3-mini" + "cachedResultName": "gpt-3.5-turbo" }, "options": {} }, "type": "@n8n/n8n-nodes-langchain.lmChatOpenAi", "typeVersion": 1.2, "position": [ - 3260, - -420 + 320, + 360 ], - "id": "7ec4f1a7-db6e-4325-ada7-766365eb34eb", + "id": "791f4907-7d8f-46e6-9e0c-6a0e58d82fbd", "name": "OpenAI Chat Model", "credentials": { "openAiApi": { - "id": "J3VsZCs00ThQuWaN", + "id": "WnhCFsAFZPHsGqpO", "name": "OpenAi account" } } @@ -440,10 +459,10 @@ "type": "@n8n/n8n-nodes-langchain.outputParserStructured", "typeVersion": 1.2, "position": [ - 3460, - -420 + 520, + 360 ], - "id": "0dcc1e35-834d-4539-b41c-bba07529fb35", + "id": "c590fdb8-43e3-40b2-8a19-8d5d0a51541f", "name": "Structured Output Parser1" }, { @@ -462,10 +481,10 @@ "type": "@n8n/n8n-nodes-langchain.chainLlm", "typeVersion": 1.5, "position": [ - 3280, - -680 + 340, + 100 ], - "id": "077761ce-13f5-469f-b0d4-bf8b4429c266", + "id": "219a8138-1426-49fc-b4e5-04081657e988", "name": "OpenAI LLM Chain", "onError": "continueErrorOutput" }, @@ -485,10 +504,10 @@ "type": "@n8n/n8n-nodes-langchain.chainLlm", "typeVersion": 1.5, "position": [ - 3280, - 120 + 340, + 900 ], - "id": "20aaa476-d96d-4f13-b6cb-5c20e96d0af0", + "id": "a7ef8f75-d08c-4206-b35b-960be840b530", "name": "Claude LLM Chain", "onError": "continueErrorOutput" }, @@ -499,10 +518,10 @@ "type": "@n8n/n8n-nodes-langchain.outputParserStructured", "typeVersion": 1.2, "position": [ - 3480, - 1140 + 540, + 1920 ], - "id": "9886b0cf-a866-497a-9c3b-098b35c2cf9f", + "id": "305e945a-0bdc-4e0e-aa7c-10baaffd4bea", "name": "Structured Output Parser2" }, { @@ -515,12 +534,83 @@ "type": "n8n-nodes-base.stickyNote", "typeVersion": 1, "position": [ - 3100, - 760 + 160, + 1540 ], - "id": "23e8cb29-c1fe-459b-8b4d-83d9796015b0", + "id": "7c48b387-6001-4c4a-8409-bb11cb5948b4", "name": "Sticky Note5" }, + { + "parameters": { + "content": "## Gemini\nAdd your Google API Key credentials to the Chat Model.", + "height": 620, + "width": 620 + }, + "type": "n8n-nodes-base.stickyNote", + "typeVersion": 1, + "position": [ + 160, + 2340 + ], + "id": "a6b53704-2dbc-4f77-acc4-815345dba169", + "name": "Sticky Note7" + }, + { + "parameters": { + "promptType": "define", + "text": "={{ $('Process Agent Request').item.json.prompt }}", + "hasOutputParser": true, + "messages": { + "messageValues": [ + { + "message": "=To maintain context and fully understand the user's question, always review the previous conversation between you and him before providing an answer. Always ensure generating output without s JSON Parsing ERROR. Avoid ASCII. Avoid embedded markdown code fences. Avoid unescaped newlines. To represent a newline within the string's value, it must be escaped as \\\\n.\\nThis is the previous conversation: {{ $('Aggregate').item.json[\"context\"].map(m => ` Human: ${m.human || 'undefined'} AI Assistant: ${m.ai || 'undefined'} `).join('') }}" + } + ] + } + }, + "type": "@n8n/n8n-nodes-langchain.chainLlm", + "typeVersion": 1.5, + "position": [ + 340, + 2520 + ], + "id": "e2ad9bb8-d8cc-4985-b5a5-cdaed3b4c1f2", + "name": "Gemini LLM Chain", + "onError": "continueErrorOutput" + }, + { + "parameters": { + "jsonSchemaExample": "{\n \"summary\": \"\",\n \"agents\": [\n {\n \"agentId\": \"\",\n \"agentName\": \"\",\n \"content\": \"\"\n }\n ]\n}\n" + }, + "type": "@n8n/n8n-nodes-langchain.outputParserStructured", + "typeVersion": 1.2, + "position": [ + 520, + 2740 + ], + "id": "524c9ec9-1fa8-4d0a-8e1f-f27ddd31d931", + "name": "Structured Output Parser3" + }, + { + "parameters": { + "modelName": "models/gemini-2.0-flash", + "options": {} + }, + "type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini", + "typeVersion": 1, + "position": [ + 320, + 2740 + ], + "id": "eb09c2cd-d4ad-476a-b9cf-9e72a621c36d", + "name": "Google Gemini Chat Model", + "credentials": { + "googlePalmApi": { + "id": "7C1dNitocYmgSoeE", + "name": "Google Gemini(PaLM) Api" + } + } + }, { "parameters": { "promptType": "define", @@ -537,10 +627,10 @@ "type": "@n8n/n8n-nodes-langchain.chainLlm", "typeVersion": 1.5, "position": [ - 3280, - 900 + 340, + 1680 ], - "id": "a11315ec-7ea6-453f-8c28-8b89df449160", + "id": "c460509d-8a81-41d0-8c1a-fc5e7a457509", "name": "Ollama LLM Chain", "onError": "continueErrorOutput" }, @@ -552,17 +642,11 @@ "type": "@n8n/n8n-nodes-langchain.lmChatOllama", "typeVersion": 1, "position": [ - 3280, - 1140 + 340, + 1920 ], - "id": "816fc7d9-3af6-4b64-bb47-c41f6c77044d", - "name": "Ollama Chat Model", - "credentials": { - "ollamaApi": { - "id": "yPA707CsQLhWpvLV", - "name": "Ollama account" - } - } + "id": "a97b62b4-ebf1-49ee-b8c8-33c2a5fb01f5", + "name": "Ollama Chat Model" }, { "parameters": { @@ -573,10 +657,10 @@ "type": "n8n-nodes-base.stickyNote", "typeVersion": 1, "position": [ - -60, - 380 + -3000, + 1160 ], - "id": "ce38dfca-680a-447a-93b0-1fc9ed79ab19", + "id": "006f3139-3b95-4abd-8ac8-dfa725c5b66e", "name": "Sticky Note6" }, { @@ -586,10 +670,10 @@ "type": "n8n-nodes-base.respondToWebhook", "typeVersion": 1.1, "position": [ - 300, - 1060 + -2640, + 1840 ], - "id": "2e5e8c4b-16aa-4a84-bcda-63cc9c214803", + "id": "94177afe-b45e-4638-8c8d-db5fb8d78815", "name": "Healthcheck" } ], @@ -755,6 +839,13 @@ "type": "main", "index": 0 } + ], + [ + { + "node": "Gemini LLM Chain", + "type": "main", + "index": 0 + } ] ] }, @@ -871,29 +962,66 @@ } ] ] + }, + "Google Gemini Chat Model": { + "ai_languageModel": [ + [ + { + "node": "Gemini LLM Chain", + "type": "ai_languageModel", + "index": 0 + } + ] + ] + }, + "Structured Output Parser3": { + "ai_outputParser": [ + [ + { + "node": "Gemini LLM Chain", + "type": "ai_outputParser", + "index": 0 + } + ] + ] + }, + "Gemini LLM Chain": { + "main": [ + [ + { + "node": "Respond to Webhook", + "type": "main", + "index": 0 + }, + { + "node": "Insert Chat", + "type": "main", + "index": 0 + } + ] + ] } }, "active": true, "settings": { "executionOrder": "v1" }, - "versionId": "ab6e7234-06c9-4cfa-a31d-cb62f1d1b20a", + "versionId": "6227f362-32d6-4c8c-b59c-a106ff25982d", "meta": { - "templateCredsSetupCompleted": true, - "instanceId": "9cd4b56a8550f03129538b7ebc5456172e10a568573c8b63463f9ce7f70cad21" + "instanceId": "692531ee2a13fa33af74f7a5ea5609474d5a0f7c8c56e5778101f322976ad391" }, - "id": "xHGNEWurOgKeq7rN", + "id": "qvRujB5Z7fFaul7f", "tags": [ { - "createdAt": "2025-03-27T12:18:10.994Z", - "updatedAt": "2025-03-27T12:18:10.994Z", - "id": "TnX5QP1h95ttxM9c", + "createdAt": "2025-04-01T04:51:28.575Z", + "updatedAt": "2025-04-01T04:51:28.575Z", + "id": "cOQpbgeAnJqWsyqb", "name": "claude" }, { - "createdAt": "2025-03-27T12:17:50.289Z", - "updatedAt": "2025-03-27T12:17:50.289Z", - "id": "gYrnrGlauCXT9LkQ", + "createdAt": "2025-04-01T04:51:28.581Z", + "updatedAt": "2025-04-01T04:51:28.581Z", + "id": "lmJWAgftXdqaGCuk", "name": "dataengg" } ] diff --git a/docker-compose.yml b/docker-compose.yml index c457d51..6e1bdde 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,11 +17,13 @@ services: NEXT_PUBLIC_N8N_OPENAI_WORKFLOW: "/webhook/dataengineering-common" NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW: "/webhook/dataengineering-common" NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW: "/webhook/dataengineering-common" + NEXT_PUBLIC_N8N_GEMINI_WORKFLOW: "/webhook/dataengineering-common" # IMPORTANT: Use your own key, you can use the .env file to set this # CRITICAL: You can run it without keys by using Ollama NEXT_PUBLIC_OPENAI_API_KEY: ${NEXT_PUBLIC_OPENAI_API_KEY} NEXT_PUBLIC_ANTHROPIC_API_KEY: ${NEXT_PUBLIC_ANTHROPIC_API_KEY} + NEXT_PUBLIC_GEMINI_API_KEY: ${NEXT_PUBLIC_GEMINI_API_KEY} # CRITICAL FIX: Use container name instead of localhost NEXT_PUBLIC_OLLAMA_API_URL: "http://ollama:11434" ports: @@ -39,9 +41,11 @@ services: NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW: "/webhook/dataengineering-common" NEXT_PUBLIC_N8N_OPENAI_WORKFLOW: "/webhook/dataengineering-common" NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW: "/webhook/dataengineering-common" - NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW: "webhook/dataengineering-common" + NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW: "/webhook/dataengineering-common" + NEXT_PUBLIC_N8N_GEMINI_WORKFLOW: "/webhook/dataengineering-common" NEXT_PUBLIC_OPENAI_API_KEY: ${NEXT_PUBLIC_OPENAI_API_KEY} NEXT_PUBLIC_ANTHROPIC_API_KEY: ${NEXT_PUBLIC_ANTHROPIC_API_KEY} + NEXT_PUBLIC_GEMINI_API_KEY: ${NEXT_PUBLIC_GEMINI_API_KEY} # CRITICAL FIX: Use container name instead of localhost NEXT_PUBLIC_OLLAMA_API_URL: "http://ollama:11434" @@ -55,7 +59,7 @@ services: networks: - data-engineering-network healthcheck: - test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/healthz"] + test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/healthz" ] interval: 30s timeout: 5s retries: 3 @@ -72,6 +76,8 @@ services: - N8N_PROTOCOL=http - NODE_ENV=production - N8N_LOG_LEVEL=info + # Pass API keys to n8n + - NEXT_PUBLIC_GEMINI_API_KEY=${NEXT_PUBLIC_GEMINI_API_KEY} volumes: - n8n_data:/home/node/.n8n # Add the volume mount for avatar definitions @@ -81,7 +87,7 @@ services: - data-engineering-network healthcheck: # "localhost" inside the n8n container is fine for the n8n process itself - test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5678/healthz"] + test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5678/healthz" ] interval: 30s timeout: 5s retries: 3 diff --git a/src/app/api/gemini/validate/route.ts b/src/app/api/gemini/validate/route.ts new file mode 100644 index 0000000..4542aa7 --- /dev/null +++ b/src/app/api/gemini/validate/route.ts @@ -0,0 +1,81 @@ +// src/app/api/gemini/validate/route.ts +import { NextResponse } from 'next/server'; + +/** + * API route to validate Gemini API key + * This acts as a proxy to avoid exposing API keys to the client + */ +export async function GET() { + try { + const apiKey = process.env.NEXT_PUBLIC_GEMINI_API_KEY; + + console.log('Validating Gemini API key, exists:', !!apiKey); + + if (!apiKey || apiKey.trim() === '') { + console.error('Gemini API key not found in environment variables'); + return NextResponse.json( + { success: false, message: 'Gemini API key not found in environment variables' }, + { status: 400 } + ); + } + + // Check if the API key is valid by making a request to the models endpoint + // We specifically check for the gemini-2.0-flash model which is what we want to use + const response = await fetch(`https://generativelanguage.googleapis.com/v1/models?key=${apiKey}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }); + + console.log(`Gemini API response status: ${response.status}`); + + if (!response.ok) { + const error = await response.json().catch((e) => { + console.error('Error parsing Gemini API error response:', e); + return { error: { message: 'Unknown error' } }; + }); + console.error('Gemini API validation error:', error); + return NextResponse.json( + { success: false, message: error.error?.message || response.statusText }, + { status: response.status } + ); + } + + const models = await response.json(); + + console.log('Gemini API models response:', JSON.stringify(models).substring(0, 200) + '...'); + console.log('Available models count:', models.models?.length || 0); + + // Check if gemini-2.0-flash model is available + const hasGemini2Flash = models.models?.some((model: any) => { + console.log('Checking model:', model.name); + return model.name === 'models/gemini-2.0-flash'; + }); + + if (!hasGemini2Flash) { + console.warn('Gemini API key is valid but gemini-2.0-flash model is not available'); + } else { + console.log('gemini-2.0-flash model is available!'); + } + + return NextResponse.json({ + success: true, + message: hasGemini2Flash + ? 'Gemini API key is valid with gemini-2.0-flash model available' + : 'Gemini API key is valid but gemini-2.0-flash model may not be available', + // Optionally return a limited subset of models to avoid large payloads + modelCount: models.models?.length || 0, + hasGemini2Flash + }); + } catch (error) { + console.error('Error validating Gemini API key:', error); + return NextResponse.json( + { + success: false, + message: `Error validating Gemini API key: ${error instanceof Error ? error.message : 'Unknown error'}` + }, + { status: 500 } + ); + } +} diff --git a/src/app/api/n8n/health/[type]/route.ts b/src/app/api/n8n/health/[type]/route.ts index fc39bff..e1ec1fd 100644 --- a/src/app/api/n8n/health/[type]/route.ts +++ b/src/app/api/n8n/health/[type]/route.ts @@ -8,6 +8,7 @@ const DEFAULT_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW | const OPENAI_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_OPENAI_WORKFLOW || '/webhook/dataengineering-common'; const OLLAMA_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW || '/webhook/dataengineering-common'; const CLAUDE_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW || '/webhook/dataengineering-common'; +const GEMINI_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_GEMINI_WORKFLOW || '/webhook/dataengineering-common'; // Define workflow endpoints mapping const WORKFLOW_ENDPOINTS: Record = { @@ -15,6 +16,7 @@ const WORKFLOW_ENDPOINTS: Record = { 'openai': OPENAI_WORKFLOW_ENDPOINT, 'ollama': OLLAMA_WORKFLOW_ENDPOINT, 'claude': CLAUDE_WORKFLOW_ENDPOINT, + 'gemini': GEMINI_WORKFLOW_ENDPOINT, 'demo': DEFAULT_WORKFLOW_ENDPOINT // Fallback for demo mode }; @@ -87,6 +89,9 @@ export async function GET( case 'claude': modelCheckResult = await checkClaudeAvailability(); break; + case 'gemini': + modelCheckResult = await checkGeminiAvailability(); + break; } console.log(`Model check result for ${workflowType}:`, modelCheckResult); } @@ -259,4 +264,56 @@ async function checkClaudeAvailability() { message: `Error validating Claude/Anthropic API key: ${error instanceof Error ? error.message : 'Unknown error'}` }; } +} + +/** + * Helper function to check Gemini API availability + */ +async function checkGeminiAvailability() { + const geminiKey = process.env.NEXT_PUBLIC_GEMINI_API_KEY; + + if (!geminiKey || geminiKey.trim() === '') { + return { + available: false, + message: 'Gemini API key not found in environment variables' + }; + } + + try { + console.log('Checking Gemini API key validity'); + // Check if Gemini API key is valid by making a simple request to the models endpoint + const response = await fetch(`https://generativelanguage.googleapis.com/v1/models?key=${geminiKey}`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }); + + console.log(`Gemini API response status: ${response.status}`); + + if (response.ok) { + const data = await response.json(); + console.log('Gemini models available:', data.models?.length || 0); + return { + available: true, + message: 'Gemini API key is valid' + }; + } else { + const error = await response.json().catch(e => { + console.error('Error parsing Gemini API response:', e); + return { error: { message: 'Unknown error parsing response' } }; + }); + console.error('Gemini API error:', error); + return { + available: false, + message: `Gemini API key is invalid: ${error.error?.message || response.statusText}` + }; + } + } catch (error) { + console.error('Exception in checkGeminiAvailability:', error); + return { + available: false, + message: `Error validating Gemini API key: ${error instanceof Error ? error.message : 'Unknown error'}` + }; + } } \ No newline at end of file diff --git a/src/app/api/n8n/health/[type]/validate/route.ts b/src/app/api/n8n/health/[type]/validate/route.ts index bc22684..9a9d281 100644 --- a/src/app/api/n8n/health/[type]/validate/route.ts +++ b/src/app/api/n8n/health/[type]/validate/route.ts @@ -87,7 +87,17 @@ export async function GET( }; } break; - + + case 'gemini': + // Check Gemini API key + if (!process.env.NEXT_PUBLIC_GEMINI_API_KEY) { + validationResult = { + success: true, // Still allow connection to n8n + message: `n8n connected, but gemini warning: Gemini API key not found in environment variables` + }; + } + break; + case 'demo': // No validation needed for demo validationResult = { diff --git a/src/app/api/n8n/workflows/[type]/route.ts b/src/app/api/n8n/workflows/[type]/route.ts index c01bbb2..b897adc 100644 --- a/src/app/api/n8n/workflows/[type]/route.ts +++ b/src/app/api/n8n/workflows/[type]/route.ts @@ -8,6 +8,7 @@ const DEFAULT_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW | const OPENAI_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_OPENAI_WORKFLOW || '/webhook/dataengineering-common'; const OLLAMA_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_OLLAMA_WORKFLOW || '/webhook/dataengineering-common'; const CLAUDE_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_CLAUDE_WORKFLOW || '/webhook/dataengineering-common'; +const GEMINI_WORKFLOW_ENDPOINT = process.env.NEXT_PUBLIC_N8N_GEMINI_WORKFLOW || '/webhook/dataengineering-common'; // Define workflow endpoints mapping const WORKFLOW_ENDPOINTS: Record = { @@ -15,6 +16,7 @@ const WORKFLOW_ENDPOINTS: Record = { 'openai': OPENAI_WORKFLOW_ENDPOINT, 'ollama': OLLAMA_WORKFLOW_ENDPOINT, 'claude': CLAUDE_WORKFLOW_ENDPOINT, + 'gemini': GEMINI_WORKFLOW_ENDPOINT, 'demo': DEFAULT_WORKFLOW_ENDPOINT // Fallback for demo mode }; diff --git a/src/components/ConnectionSelector.tsx b/src/components/ConnectionSelector.tsx index 50ae765..6900db4 100644 --- a/src/components/ConnectionSelector.tsx +++ b/src/components/ConnectionSelector.tsx @@ -26,7 +26,7 @@ import { useToast } from '@/hooks/use-toast'; import { ConnectionStatusTooltip } from './ConnectionStatusTooltip'; import { StoreState, ApiSettings } from '@/types'; -type BackendType = 'demo' | 'n8n-openai' | 'n8n-ollama' | 'n8n-claude' | 'n8n-default'; +type BackendType = 'demo' | 'n8n-openai' | 'n8n-ollama' | 'n8n-claude' | 'n8n-gemini' | 'n8n-default'; interface ConnectionOption { id: BackendType; @@ -67,6 +67,7 @@ export function ConnectionSelector({ console.debug("NEXT_PUBLIC_OLLAMA_API_URL exists:", !!process.env.NEXT_PUBLIC_OLLAMA_API_URL); console.debug("NEXT_PUBLIC_OLLAMA_API_URL value:", process.env.NEXT_PUBLIC_OLLAMA_API_URL); console.debug("NEXT_PUBLIC_ANTHROPIC_API_KEY exists:", !!process.env.NEXT_PUBLIC_ANTHROPIC_API_KEY); + console.debug("NEXT_PUBLIC_GEMINI_API_KEY exists:", !!process.env.NEXT_PUBLIC_GEMINI_API_KEY); console.debug("NEXT_PUBLIC_AGENT_API_URL:", process.env.NEXT_PUBLIC_AGENT_API_URL); console.debug("NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW:", process.env.NEXT_PUBLIC_N8N_DEFAULT_WORKFLOW); console.debug("All NEXT_PUBLIC env variables:", Object.keys(process.env).filter(key => key.startsWith('NEXT_PUBLIC_'))); @@ -91,7 +92,21 @@ export function ConnectionSelector({ console.debug("Claude Key check:", hasKey); return hasKey; }; - + + const hasGeminiKey = (): boolean => { + const hasKey = !!process.env.NEXT_PUBLIC_GEMINI_API_KEY; + console.debug("Gemini Key check:", hasKey); + if (!hasKey) { + console.error("NEXT_PUBLIC_GEMINI_API_KEY is missing or empty"); + } else { + // Log a masked version of the key for debugging + const maskedKey = process.env.NEXT_PUBLIC_GEMINI_API_KEY?.substring(0, 4) + '...' + + process.env.NEXT_PUBLIC_GEMINI_API_KEY?.substring(process.env.NEXT_PUBLIC_GEMINI_API_KEY.length - 4); + console.debug("Gemini Key (masked):", maskedKey); + } + return hasKey; + }; + // Define connection options const backendOptions: ConnectionOption[] = [ { @@ -131,6 +146,14 @@ export function ConnectionSelector({ description: 'Connect to Claude via n8n workflow', workflowType: 'claude', envCheck: hasClaudeKey + }, + { + id: 'n8n-gemini', + name: 'n8n Gemini', + icon: , + description: 'Connect to Gemini via n8n workflow', + workflowType: 'gemini', + envCheck: hasGeminiKey } ]; @@ -172,6 +195,7 @@ export function ConnectionSelector({ case 'openai': return 'n8n-openai'; case 'ollama': return 'n8n-ollama'; case 'claude': return 'n8n-claude'; + case 'gemini': return 'n8n-gemini'; default: return 'n8n-default'; } } diff --git a/src/components/chat/ChatInterface.tsx b/src/components/chat/ChatInterface.tsx index 6557f16..ff8f731 100644 --- a/src/components/chat/ChatInterface.tsx +++ b/src/components/chat/ChatInterface.tsx @@ -5,7 +5,7 @@ import { useState, useRef, useEffect } from "react"; import { v4 as uuidv4 } from 'uuid'; import { Loader2, SendHorizontal, Paperclip, Mic, Code, - Brain, Clock, MessageSquare, CornerDownRight, Sparkles, + Brain, MessageSquare, CornerDownRight, Sparkles, RefreshCw, XCircle } from "lucide-react"; import { ScrollArea } from "@/components/ui/scroll-area"; @@ -17,7 +17,7 @@ import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/comp import { motion, AnimatePresence } from "framer-motion"; import { ChatMessage } from "./ChatMessage"; import { useStore, useSelectedAgents, useSelectedStrategy } from "@/store"; -import { Agent, Message } from '@/types'; +import { Message, Agent } from "@/types"; interface NextQuestion { id: string; @@ -67,6 +67,16 @@ export function ChatInterface({ isLoading = false }: ChatInterfaceProps) { } }, [isLoading, conversationStatus]); + // Auto-resize textarea within its container + useEffect(() => { + if (textareaRef.current) { + textareaRef.current.style.height = "inherit"; + const maxHeight = Math.floor(window.innerHeight * 0.15); // Max 15% of viewport height + const newHeight = Math.min(textareaRef.current.scrollHeight, maxHeight); + textareaRef.current.style.height = `${newHeight}px`; + } + }, [inputValue]); + // Handle removing the thinking message when responses come in useEffect(() => { if (isProcessing) { @@ -303,15 +313,15 @@ export function ChatInterface({ isLoading = false }: ChatInterfaceProps) { - {/* Structure the layout with fixed heights */} -
- {/* Messages area - Takes exactly 80% of the height */} -
+ {/* Main layout with fixed input at bottom */} +
{/* Adjust height as needed */} + {/* Chat messages area */} +
{processedMessages.length === 0 ? ( ) : ( -
+
{filterMessages(processedMessages).map(msg => (
- {/* Input area - Fixed at 20% of the container height */} -
+ {/* Fixed input area - Always at bottom */} +
{conversationStatus === "idle" || conversationStatus === "active" ? (