diff --git a/docs.json b/docs.json index f2a3fe3..faca2c5 100644 --- a/docs.json +++ b/docs.json @@ -31,7 +31,10 @@ "group": "Introduction", "icon": "book-open", "expanded": false, - "pages": ["/", "introduction/welcome"] + "pages": [ + "/", + "introduction/welcome" + ] }, { "group": "Getting Started", @@ -41,12 +44,18 @@ { "group": "Setup", "expanded": false, - "pages": ["quickstart", "getting-started/installation"] + "pages": [ + "quickstart", + "getting-started/installation" + ] }, { "group": "Configuration", "expanded": false, - "pages": ["getting-started/select-your-model", "getting-started/your-first-project"] + "pages": [ + "getting-started/select-your-model", + "getting-started/your-first-project" + ] } ] }, @@ -77,7 +86,7 @@ } ] }, - { + { "group": "Providers", "icon": "cloud", "expanded": false, @@ -113,7 +122,10 @@ "group": "Comparisons", "icon": "scale", "expanded": false, - "pages": ["comparisons/bolt-vs-codinit", "comparisons/lovable-vs-codinit"] + "pages": [ + "comparisons/bolt-vs-codinit", + "comparisons/lovable-vs-codinit" + ] }, { "group": "Prompting", @@ -123,12 +135,19 @@ { "group": "Techniques", "expanded": false, - "pages": ["prompting/discussion-mode", "prompting/prompt-engineering-guide", "prompting/prompting-effectively"] + "pages": [ + "prompting/discussion-mode", + "prompting/prompt-engineering-guide", + "prompting/prompting-effectively" + ] }, { "group": "Optimization", "expanded": false, - "pages": ["prompting/maximize-token-efficiency", "prompting/plan-your-app"] + "pages": [ + "prompting/maximize-token-efficiency", + "prompting/plan-your-app" + ] } ] }, @@ -136,13 +155,20 @@ "group": "Model Configuration", "icon": "settings", "expanded": false, - "pages": ["model-config/context-windows", "model-config/model-comparison"] + "pages": [ + "model-config/context-windows", + "model-config/model-comparison" + ] }, { "group": "Hosting", "icon": "globe", "expanded": false, - "pages": ["integrations/vercel", "integrations/netlify", "integrations/cloudflare"] + "pages": [ + "integrations/vercel", + "integrations/netlify", + "integrations/cloudflare" + ] }, { "group": "Running Models Locally", @@ -158,6 +184,7 @@ }, { "tab": "Essentials", + "hidden": true, "groups": [ { "group": "Essentials", @@ -167,12 +194,17 @@ { "group": "AI Features", "expanded": false, - "pages": ["essentials/ai-chat-commands", "essentials/project-templates"] + "pages": [ + "essentials/ai-chat-commands", + "essentials/project-templates" + ] }, { "group": "Configuration", "expanded": false, - "pages": ["essentials/customization"] + "pages": [ + "essentials/customization" + ] } ] }, @@ -180,7 +212,12 @@ "group": "Integrations", "icon": "plug", "expanded": false, - "pages": ["integrations/deployments", "integrations/git", "integrations/supabase", "mcp/mcp-overview"] + "pages": [ + "integrations/deployments", + "integrations/git", + "integrations/supabase", + "mcp/mcp-overview" + ] } ] }, @@ -191,7 +228,12 @@ "group": "Support", "icon": "life-buoy", "expanded": false, - "pages": ["support/frequently-asked-questions", "support/integration-issues", "support/troubleshooting", "changelog"] + "pages": [ + "support/frequently-asked-questions", + "support/integration-issues", + "support/troubleshooting", + "changelog" + ] } ] }, diff --git a/essentials/ai-chat-commands.mdx b/essentials/ai-chat-commands.mdx index 6646270..43b542a 100644 --- a/essentials/ai-chat-commands.mdx +++ b/essentials/ai-chat-commands.mdx @@ -237,17 +237,17 @@ Different models excel at different tasks: - Refactoring large codebases - Architectural decisions - **Recommended model:** Claude 4.5 Sonnet + **Recommended model:** Claude 3.5 Sonnet - + **Best for:** - General-purpose coding - Quick iterations - Documentation generation - Code completion - **Recommended model:** GPT-5 + **Recommended model:** GPT-4o diff --git a/essentials/customization.mdx b/essentials/customization.mdx index 608433a..d2cf6ac 100644 --- a/essentials/customization.mdx +++ b/essentials/customization.mdx @@ -232,15 +232,23 @@ Configure local AI inference: - **OpenAI-compatible**: Custom OpenAI-compatible endpoints **Local provider settings:** -- Custom base URL -- Connection testing -- Model discovery +- Automatic detection of running providers +- Custom base URL configuration +- Real-time connection status +- Model discovery and listing - Enable/disable toggle -Local providers require the respective software running on your machine. Connection status is shown in real-time. +CodinIT automatically detects when Ollama (port 11434) or LM Studio (port 1234) are running on your machine. Detected providers are automatically enabled and appear in your model selector. Connection status updates in real-time. +**Auto-detection features:** +- Scans for local providers on startup +- Automatically enables detected providers +- Disables providers when they go offline +- Shows loading states during detection +- Integrates seamlessly with model selection + ### API keys Centralized API key management for all AI providers: diff --git a/features/overview.mdx b/features/overview.mdx index c67516f..5f20dbb 100644 --- a/features/overview.mdx +++ b/features/overview.mdx @@ -21,7 +21,7 @@ Transform your coding experience with intelligent AI assistance powered by LLMs - Connect with 19+ AI model providers including OpenAI GPT-4, Anthropic Claude, Google Gemini, Groq, and local LLMs like Ollama for flexible AI coding. + Connect with 18+ AI model providers including OpenAI GPT-4, Anthropic Claude, Google Gemini, Groq, and local LLMs like Ollama for flexible AI coding. @@ -76,7 +76,7 @@ Deploy AI-generated applications anywhere with comprehensive platform support an ## AI development platform benefits - + Connect with industry-leading AI models like Claude, GPT-4, Gemini and local LLM endpoints for maximum flexibility in AI code generation. @@ -112,5 +112,5 @@ Ready to begin your AI-powered development journey? Follow these simple steps to - **New to AI coding?** Visit our [AI Quickstart Guide](/quickstart) to get up and running with AI code generation in minutes, or explore our [LLM Providers Guide](/providers/cloud-providers) to learn about integrating with 19+ AI model providers including Claude, GPT-4, and Gemini. + **New to AI coding?** Visit our [AI Quickstart Guide](/quickstart) to get up and running with AI code generation in minutes, or explore our [LLM Providers Guide](/providers/cloud-providers) to learn about integrating with 18+ AI model providers including Claude, GPT-4, and Gemini. diff --git a/getting-started/installation.mdx b/getting-started/installation.mdx index 06394b4..1ffaf25 100644 --- a/getting-started/installation.mdx +++ b/getting-started/installation.mdx @@ -46,20 +46,11 @@ The easiest way to get started with AI-powered development is to download the Co If you prefer to build the AI coding assistant from source or contribute to open-source AI development: -### Local Version (Web Containers) - ```bash git clone https://github.com/codinit-dev/codinit-dev.git cd codinit-dev ``` -### Web Version (E2B) - -```bash -git clone https://github.com/Gerome-Elassaad/codingit.git -cd codingit -``` - ## Install dependencies Install all required dependencies using your preferred package manager: diff --git a/integrations/deployments.mdx b/integrations/deployments.mdx index 9b3fb2b..a74e424 100644 --- a/integrations/deployments.mdx +++ b/integrations/deployments.mdx @@ -1,6 +1,6 @@ --- title: 'Deployments' -description: 'Deploy AI-generated applications to Netlify and Vercel with seamless API integration, automated workflows, and intelligent configuration for full-stack apps.' +description: 'Deploy AI-generated applications to Netlify, Vercel, and Cloudflare with seamless API integration, automated workflows, and intelligent configuration for full-stack apps.' --- # AI-powered deployment integrations @@ -9,13 +9,16 @@ CodinIT provides direct API integration with leading deployment platforms, enabl ## AI-assisted deployment platforms - + Deploy AI-generated static sites, SPAs, and serverless functions with global CDN and intelligent continuous deployment. Optimized for AI-built Next.js, React apps with edge computing and intelligent framework detection. + + Deploy to Cloudflare's global edge network with automatic SSL and lightning-fast performance. + ### Core Deployment Features @@ -227,6 +230,13 @@ The Vercel integration uses the following API endpoints: - Projects needing instant deployments and preview environments - Teams building with Jamstack architecture +**Use Cloudflare Pages for:** + +- Global edge network deployment with 200+ locations +- Lightning-fast static site hosting with automatic optimization +- Projects requiring DDoS protection and security features +- Applications needing edge computing with Workers integration + ### Deployment Process diff --git a/introduction/welcome.mdx b/introduction/welcome.mdx index d6be2e5..dd8ff7f 100644 --- a/introduction/welcome.mdx +++ b/introduction/welcome.mdx @@ -12,7 +12,7 @@ CodinIT is an open-source AI coding agent and AI-powered IDE that brings frontie Install CodinIT AI IDE and start building with AI code generation - Connect Claude, GPT-4, Gemini, and 19+ LLM providers for AI-powered development + Connect Claude, GPT-4, Gemini, and 18+ LLM providers for AI-powered development Master AI prompting techniques for better code generation and software development diff --git a/openapi.json b/openapi.json index 39765a8..7bcf468 100644 --- a/openapi.json +++ b/openapi.json @@ -808,6 +808,149 @@ } } }, + "/api/mcp-validate-config": { + "post": { + "tags": ["MCP"], + "summary": "Validate MCP server configuration", + "description": "Validates the configuration for a Model Context Protocol server before applying it.", + "operationId": "validateMCPConfig", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": ["serverName", "config"], + "properties": { + "serverName": { + "type": "string", + "description": "Name of the MCP server to validate" + }, + "config": { + "$ref": "#/components/schemas/MCPServerConfig" + } + } + } + } + } + }, + "responses": { + "200": { + "description": "Validation result", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "valid": { + "type": "boolean" + }, + "errors": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/ValidationError" + }, + "500": { + "description": "Server error", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + } + } + }, + "/api/mcp-retry": { + "post": { + "tags": ["MCP"], + "summary": "Retry MCP server connection", + "description": "Attempts to reconnect to a failed or disconnected MCP server and retrieves available tools.", + "operationId": "retryMCPConnection", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": ["serverName"], + "properties": { + "serverName": { + "type": "string", + "description": "Name of the MCP server to retry" + } + } + } + } + } + }, + "responses": { + "200": { + "description": "Server tools after successful reconnection", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "tools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "inputSchema": { + "type": "object" + } + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/responses/ValidationError" + }, + "500": { + "description": "Server error", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "error": { + "type": "string" + } + } + } + } + } + } + } + } + }, "/api/check-env-key": { "get": { "tags": ["Utilities"], diff --git a/prompting/discussion-mode.mdx b/prompting/discussion-mode.mdx index a7bdc8a..4cb3f32 100644 --- a/prompting/discussion-mode.mdx +++ b/prompting/discussion-mode.mdx @@ -3,11 +3,11 @@ title: Discussion Mode description: Technical consultant mode for collaborative problem-solving and guidance --- -Discussion mode transforms the AI into a technical consultant who provides guidance, plans, and structured approaches to development challenges without directly implementing code. +Discussion mode transforms the AI into a technical consultant who provides guidance, plans, and structured approaches to development challenges without directly implementing code. When activated, CodinIT switches to a specialized system prompt designed specifically for consultative interactions. ## Overview -In discussion mode, the AI acts as an experienced senior software engineer and technical consultant, offering strategic advice, architectural guidance, and detailed planning for your development projects. +In discussion mode, the AI acts as an experienced senior software engineer and technical consultant, offering strategic advice, architectural guidance, and detailed planning for your development projects. Unlike build mode, discussion mode focuses on planning and guidance rather than code generation. ## Key Features @@ -19,12 +19,15 @@ In discussion mode, the AI acts as an experienced senior software engineer and t ## How It Works -When you activate discussion mode, the AI switches to a specialized prompt that focuses on: +When you activate discussion mode, CodinIT switches to a specialized system prompt (`discussPrompt`) that fundamentally changes how the AI responds: -- **Planning Over Implementation**: Providing detailed plans rather than writing code -- **Educational Approach**: Explaining concepts and teaching best practices -- **Strategic Thinking**: Considering long-term implications and scalability -- **Collaborative Problem-Solving**: Working with you to develop optimal solutions +- **Planning Over Implementation**: Provides detailed numbered plans with file references and plain English descriptions instead of code +- **Educational Approach**: Explains concepts, reasoning, and best practices with "why" behind recommendations +- **Strategic Thinking**: Uses chain-of-thought reasoning to analyze problems before providing solutions +- **Collaborative Problem-Solving**: Offers quick action buttons for implementing plans, continuing discussions, or opening referenced files +- **No Code Generation**: Responses describe changes in plain English rather than providing code snippets + +The system prompt explicitly instructs the AI to use phrases like "You should add..." instead of "I will implement..." to maintain the consultative role. ## When to Use Discussion Mode @@ -64,13 +67,20 @@ Click the "Discuss" button in the chat interface to switch to technical consulta ### Response Format -In discussion mode, responses typically include: +In discussion mode, responses follow a specific structure: + +- **Chain of Thought**: Visible reasoning process showing how the AI analyzes the problem +- **Clear Plans**: Numbered steps starting with "## The Plan" heading, describing changes in plain English only +- **File References**: Specific file paths with corresponding quick action buttons to open them +- **Technology Recommendations**: Specific tools and approaches with reasoning +- **Best Practice Guidance**: Industry-standard approaches with explanations +- **Quick Actions**: Interactive buttons at the end of every response: + - **Implement**: For executing the outlined plan in build mode + - **Message**: For continuing the conversation with specific follow-ups + - **Link**: For opening external documentation or resources + - **File**: For opening referenced files in the editor -- **Clear Plans**: Numbered steps for implementing solutions -- **Technology Recommendations**: Specific tools and approaches -- **Best Practice Guidance**: Industry-standard approaches -- **Educational Context**: Explanations of why certain approaches are recommended -- **Quick Actions**: Interactive buttons for common next steps +The AI is explicitly instructed to never include code snippets in plans, only plain English descriptions of what needs to change. ## Best Practices for Discussion Mode @@ -124,38 +134,62 @@ Use the AI's recommendations as a foundation for more detailed questions about i Discussion mode responses emphasize planning and strategy: -- **Step-by-step implementation plans** -- **Technology recommendations with reasoning** -- **Consideration of trade-offs and alternatives** -- **Scalability and maintainability considerations** +- **Step-by-step implementation plans** with "## The Plan" heading +- **Technology recommendations with reasoning** explaining why specific tools are suggested +- **Consideration of trade-offs and alternatives** for different approaches +- **Scalability and maintainability considerations** for long-term success +- **File-specific guidance** indicating exactly which files need modification ### Educational Approach Responses include educational elements: -- **Explanation of concepts and principles** -- **Rationale behind recommendations** -- **Industry best practices and standards** -- **Common pitfalls and how to avoid them** +- **Chain of thought reasoning** visible to users before solutions +- **Explanation of concepts and principles** with context +- **Rationale behind recommendations** explaining the "why" +- **Industry best practices and standards** with current information +- **Common pitfalls and how to avoid them** based on experience ### Interactive Elements -Discussion mode often includes interactive elements: +Discussion mode always includes interactive quick action buttons: -- **Quick action buttons** for common next steps -- **File references** for examining specific code -- **Link suggestions** for additional resources -- **Structured recommendations** with clear priorities\*\* +- **Implement buttons** to execute plans in build mode (labeled "Implement this plan", "Fix this bug", "Fix these issues", or database-specific actions) +- **Message buttons** for continuing conversations with specific prompts +- **Link buttons** for opening external documentation (especially for topics in support resources) +- **File buttons** for opening referenced files in the editor +- **Ordered by priority**: Implement actions first, then message, link, and file actions +- **Limited to 4-5 actions** to avoid overwhelming users ## Integration with Other Features -### Combining with Regular Chat +### Combining with Build Mode -You can switch between discussion mode and regular chat mode: +You can seamlessly switch between discussion mode and build mode: 1. Use **discussion mode** for planning and architectural decisions -2. Switch to **regular chat** for implementation and code writing +2. Click **"Implement this plan"** quick action to switch to build mode and execute the plan 3. Return to **discussion mode** for reviewing and refining implemented code +4. The AI maintains context across mode switches + +### Search Grounding + +Discussion mode has access to web search capabilities: + +- **Automatic search** when uncertain about technical information, package details, or API specifications +- **First-party documentation** search for faster, more accurate results +- **Current information** rather than relying on potentially outdated knowledge +- **URL content fetching** when users share links for context + +### Support Resources Integration + +Discussion mode automatically redirects to official documentation for specific topics: + +- **Token efficiency**: Redirects to maximize-token-efficiency guide +- **Effective prompting**: Links to prompting-effectively documentation +- **Supabase integration**: Points to Supabase integration docs +- **Deployment/hosting**: Directs to Netlify and hosting FAQs +- Uses link quick actions to open documentation in new tabs ### Using with Project Context @@ -164,8 +198,8 @@ Discussion mode works best when you provide: - **Current project structure** and technology stack - **Existing code snippets** for review - **Performance requirements** and constraints -- **Team size and expertise** levels -- **Timeline and resource** considerations +- **Specific file paths** for targeted guidance +- **Running processes** (automatically detected by CodinIT) ## Common Use Cases @@ -211,16 +245,40 @@ Analyzing existing codebases, identifying improvement opportunities, planning re mode when you're ready to implement the plans and write code. -## How to use Discussion Mode +## How to Use Discussion Mode + +Discussion mode lets you explore ideas by chatting with CodinIT without making changes to your code. It is versatile and works well for planning, learning, and problem-solving. + +### Activating Discussion Mode + +1. Open your CodinIT project +2. In the bottom-right corner of the chatbox, click the **Discuss** button (chat icon) +3. The button highlights when discussion mode is active +4. Click again to return to build mode + +### Using Discussion Mode Effectively + +Once activated, you can: + +1. **Ask planning questions** about architecture, design patterns, or implementation approaches +2. **Request code reviews** for guidance on improving existing code +3. **Explore technologies** to learn about frameworks, libraries, and best practices +4. **Debug strategically** by discussing error patterns and systematic approaches +5. **Use quick actions** to: + - Implement the plan in build mode + - Continue the discussion with suggested follow-ups + - Open referenced files in the editor + - Access external documentation -Discussion Mode lets you explore ideas by chatting with CodinIT without making changes to your code. It is versatile and works well for a wide range of topics. You can use it any time you want to brainstorm or think through ideas. +### Response Behavior -Follow the steps below to use Discussion Mode in a CodinIT project: +In discussion mode, the AI will: -1. Open your CodinIT project. -2. In the bottom-right corner of the chatbox, click **Discuss**. -3. Enter your question or prompt, and read the response. You can then either: - - Continue the discussion. - - Use one of the quick action buttons to implement the suggestion. +- Show its reasoning process using chain-of-thought +- Provide plans with numbered steps in plain English +- Reference specific files with quick action buttons +- Avoid generating code snippets (descriptions only) +- Always include interactive quick actions at the end +- Redirect to official documentation for specific topics -Discussion Mode highlights blue when active. Click it again to turn it off and return to Build mode. +Discussion mode is ideal for planning before implementation. Use the "Implement this plan" button to seamlessly switch to build mode and execute your plan. diff --git a/prompting/maximize-token-efficiency.mdx b/prompting/maximize-token-efficiency.mdx index aa0ef3a..2e01c93 100644 --- a/prompting/maximize-token-efficiency.mdx +++ b/prompting/maximize-token-efficiency.mdx @@ -7,22 +7,29 @@ Learn how to use AI smartly so you don't run out of credits or money. Think of t ## What Are Tokens? -CodinIT uses AI that runs on "tokens." Tokens are small pieces of text that the AI reads and writes. +CodinIT uses AI that runs on "tokens." Tokens are small pieces of text that the AI reads and writes. Understanding token usage helps you optimize costs and stay within model limits. ### How Tokens Get Used -Tokens are used when: +Tokens are consumed in several ways: -- **You ask questions**: Your messages to the AI -- **AI answers**: The code and explanations the AI gives you -- **Context**: The AI reading your project files to understand what you're building +- **System prompts**: CodinIT's built-in prompts (default, fine-tuned, or experimental) that guide AI behavior +- **Your messages**: The questions and requests you send to the AI +- **AI responses**: The code, explanations, and artifacts the AI generates +- **Project context**: File contents, file changes, and running processes the AI reads +- **Chain of thought**: The reasoning process shown in `` tags +- **Conversation history**: Previous messages in the chat thread ### What Affects Token Usage -- **Which AI model**: Some models cost more than others -- **Project size**: Bigger projects use more tokens -- **Answer length**: Long explanations use more tokens than short ones -- **Chat length**: Longer conversations use more tokens +- **Which AI model**: Some models cost more per token (Claude vs GPT vs DeepSeek) +- **System prompt choice**: Fine-tuned prompt uses more tokens than experimental +- **Project size**: Bigger projects with more files use more context tokens +- **Answer length**: Long explanations and code use more tokens than short ones +- **Chat length**: Longer conversations accumulate more history tokens +- **Mode selection**: Discussion mode may use fewer tokens (no code generation) +- **Chain of thought**: Visible reasoning adds tokens but improves quality +- **File context**: The AI reading multiple files to understand your project **Token Limits**: Each AI has a maximum amount of text it can handle at once. If you go over, you might get errors. @@ -65,12 +72,15 @@ CodinIT has buttons and menus that don't use tokens: ### Use Discussion Mode for Planning -When you just want to talk and plan (not write code), use discussion mode: +When you just want to talk and plan (not write code), use discussion mode to save tokens: -- **Planning**: Talk about features before building them -- **Getting advice**: Ask which tools to use +- **Planning**: Talk about features before building them (no code artifacts generated) +- **Getting advice**: Ask which tools to use (plain English responses) - **Code review**: Discuss improvements without changing code - **Learning**: Ask questions without generating code +- **Architecture decisions**: Get guidance on system design + +Discussion mode uses a different system prompt that focuses on planning rather than code generation, which can reduce token usage while still providing valuable guidance. Use the "Implement this plan" button when ready to switch to build mode. ### Plan Before You Build @@ -121,43 +131,67 @@ When you just want to talk and plan (not write code), use discussion mode: - Focus on one part of your app at a time - **Discussion Mode**: Use this when you want to talk and plan without writing code. It saves tokens! + **Discussion Mode**: Use this when you want to talk and plan without writing code. It uses a different system prompt focused on guidance rather than code generation, which can save tokens. - **Use Git**: Save your work with Git instead of asking AI to undo things. It's free! + **Start New Chats**: When conversations get long, start a new chat to reduce context tokens. CodinIT maintains your project files, so you won't lose work. -## Choosing the Right AI Model + + **Use Git**: Save your work with Git instead of asking AI to undo things. It's free and doesn't use tokens! + + +## Choosing the Right AI Model and Prompt ### Pick the Right Model for the Job -Different AI models cost different amounts: +CodinIT supports multiple AI providers with different token costs: -- **Cheaper models** for simple tasks and quick questions -- **Expensive models** for hard problems and important code -- **Check limits** - some models can't handle huge projects +- **Cheaper models** (GPT-3.5, DeepSeek) for simple tasks and quick questions +- **Mid-range models** (GPT-4, Claude Sonnet) for most development work +- **Premium models** (Claude Opus) for complex problems and important code +- **Check context limits** - some models can't handle huge projects - **Balance cost and quality** based on what you're doing ### Different AI Models **Claude (Anthropic):** -- Great at understanding and writing code -- Costs more but gives better results -- Use for complex projects +- Excellent at reasoning and complex code +- Larger context windows (200K+ tokens) +- Higher cost per token but better quality +- Works well with CodinIT's chain-of-thought prompting +- Best for: Complex projects, refactoring, architecture **GPT (OpenAI):** -- Fast and cheaper for many tasks -- Good for trying things out quickly -- Use GPT-4 for harder problems +- Fast and cost-effective for many tasks +- Good for iterative development +- GPT-4 for harder problems, GPT-3.5 for simple tasks +- Best for: Quick iterations, simple features, prototyping + +**DeepSeek:** + +- Very cost-effective for code generation +- Good code quality at lower cost +- Best for: Budget-conscious development, learning + +**Other Models (Gemini, Groq, etc.):** + +- Check provider-specific strengths +- Consider regional availability +- Compare pricing for your use case + +### System Prompt Selection + +CodinIT offers three prompt variants that affect token usage: -**Other Models:** +1. **Default Prompt**: Balanced approach with comprehensive guidelines +2. **Fine-Tuned Prompt**: More detailed instructions, higher token usage, better results +3. **Experimental Prompt**: Optimized for lower token usage (may sacrifice some quality) -- Check what they're good at -- Some work better in certain countries -- Compare prices for what you need +Choose the experimental prompt if token efficiency is your top priority. ## Advanced Tips diff --git a/prompting/prompt-engineering-guide.mdx b/prompting/prompt-engineering-guide.mdx index d5a2957..e22514d 100644 --- a/prompting/prompt-engineering-guide.mdx +++ b/prompting/prompt-engineering-guide.mdx @@ -3,11 +3,18 @@ title: 'Prompt Engineering Guide' description: 'How to talk to AI to get better code' --- -Learn how to ask the AI for what you want in a way that gets you the best results. Think of it like giving clear instructions to a helpful assistant. +Learn how to ask the AI for what you want in a way that gets you the best results. CodinIT uses sophisticated system prompts that guide the AI's behavior, and understanding how to work with these prompts will help you get better results. -## How AI Understands You +## How CodinIT Processes Your Requests -Different AI models are good at different things. The key is to be clear about what you want, just like explaining something to a friend. +CodinIT uses different system prompts depending on the mode and settings you choose: + +- **Build Mode**: Uses either the default, fine-tuned, or experimental prompt to generate code and implement features +- **Discussion Mode**: Uses a specialized consultant prompt focused on planning and guidance +- **Chain of Thought**: The AI shows its reasoning process before providing solutions +- **Search Grounding**: Automatically searches the web for current information when needed + +Different AI models are good at different things. The key is to be clear about what you want and provide sufficient context. --- @@ -15,7 +22,7 @@ Different AI models are good at different things. The key is to be clear about w ### Be Specific About Your Tools -The AI works better when you tell it exactly what tools and technologies you want to use. It's like telling a chef what ingredients to cook with. +CodinIT's system prompts include built-in preferences (like using Vite for web servers and Supabase for databases), but you can override these by being explicit. The AI works better when you tell it exactly what tools and technologies you want to use. **Good example:** @@ -27,6 +34,20 @@ Build an online store dashboard with: - React Router to move between pages ``` +### Built-in Technology Preferences + +CodinIT has default preferences configured in its system prompts: + +- **Web servers**: Vite (default) +- **Databases**: Supabase (default), or JavaScript-based alternatives like libsql or sqlite +- **Styling**: Tailwind CSS with shadcn/ui components +- **Icons**: Lucide React +- **Images**: Pexels stock photos (direct URLs only) +- **Package management**: npm +- **Node.js scripts**: Preferred over shell scripts + +You can override these by explicitly specifying different tools in your prompts. + ### Popular Tool Combinations | What You're Building | Good Tools to Use | What It's For | @@ -73,20 +94,31 @@ Build an online store dashboard with: ### Tips for Different AI Models -**For Claude or Gemini:** +CodinIT supports multiple AI providers, each with different strengths: -- Give all the information at once +**For Claude (Anthropic):** + +- Excellent at reasoning and complex problem-solving +- Give all the information at once for best results - Use numbered lists to organize your thoughts - Include examples of what you want -- Say how you want the answer formatted +- Works well with CodinIT's chain-of-thought prompting -**For GPT:** +**For GPT (OpenAI):** +- Fast and versatile for most tasks - Break big requests into smaller pieces - Use simple, clear language - Show examples of what you mean - Be specific about what you want +**For other models (Gemini, DeepSeek, etc.):** + +- Check context window limits for large projects +- Test different models for your specific use case +- Consider cost vs. quality trade-offs +- Some models excel at specific tasks (e.g., code generation vs. explanation) + ### Avoid Common Mistakes **Don't do this:** @@ -104,13 +136,33 @@ Build an online store dashboard with: - Ask for checkpoints along the way - **Version Numbers**: If you know which version of a tool you're using, tell the AI. This helps it give you code that works. + **Version Numbers**: If you know which version of a tool you're using, tell the AI. CodinIT can use search grounding to find current documentation and best practices for specific versions. - **Start Simple**: Begin with a clear request, then add more details in follow-up messages. + **Use Discussion Mode**: For planning and architecture decisions, switch to discussion mode to get guidance without code generation. Then use the "Implement this plan" button to execute in build mode. +## Understanding CodinIT's System Constraints + +CodinIT operates in WebContainer, an in-browser Node.js runtime with specific limitations: + +### What Works + +- JavaScript and WebAssembly code +- Node.js scripts and npm packages +- Vite and other JavaScript-based tools +- Python (standard library only) + +### What Doesn't Work + +- Native binaries (C/C++ compiled code) +- Git commands (use CodinIT's built-in Git integration instead) +- Python pip packages (standard library only) +- Supabase CLI (use CodinIT's Supabase integration) + +The AI is aware of these constraints and will suggest compatible alternatives automatically. + ## Quick Tips Summary ### The Main Rules diff --git a/prompting/prompting-effectively.mdx b/prompting/prompting-effectively.mdx index d3acbc1..572f8fd 100644 --- a/prompting/prompting-effectively.mdx +++ b/prompting/prompting-effectively.mdx @@ -3,16 +3,24 @@ title: 'Prompt Effectively' description: 'Master the art of clear, effective communication with AI models' --- -The quality of your results depends heavily on how clearly and effectively you communicate your intentions. Good prompting is a skill that combines clarity, specificity, and understanding of how AI models process information. +The quality of your results depends heavily on how clearly and effectively you communicate your intentions. CodinIT uses sophisticated system prompts that guide the AI's behavior, and understanding how to work with these prompts will help you get better results. ## Understanding AI Communication -AI models process your requests through text, so the way you phrase your questions and instructions directly impacts the quality of responses. Effective prompting involves: +CodinIT processes your requests through specialized system prompts that include: + +- **Chain of thought reasoning**: The AI shows its thinking process before providing solutions +- **Artifact-based responses**: Code and commands are wrapped in structured artifacts +- **Context awareness**: The AI understands your project structure, running processes, and file changes +- **Search grounding**: Automatic web search for current information when needed + +Effective prompting involves: - **Clarity**: Being specific about what you want -- **Context**: Providing necessary background information +- **Context**: Providing necessary background information (file paths, error messages, requirements) - **Structure**: Organizing your requests logically - **Iteration**: Refining your approach based on responses +- **Mode selection**: Using discussion mode for planning, build mode for implementation ## Example Prompts @@ -80,59 +88,85 @@ These examples demonstrate effective prompting patterns that you can adapt to yo ### Understanding AI Reasoning -CodinIT provides visual insights into the AI's thinking process: +CodinIT's system prompts include chain-of-thought instructions that make the AI's reasoning visible: -**Thinking Process Display:** +**Chain of Thought (`` tags):** -- See step-by-step reasoning for complex tasks -- Understand how the AI breaks down problems -- Follow the logical flow of solutions +- The AI shows 2-6 concrete steps it will take before implementing +- Helps you understand the approach before code is generated +- Appears at the start of every response in build mode +- Lists specific actions like "Set up Vite + React project structure" or "Implement core functionality" **Thought Artifacts:** -- Expandable reasoning containers +- Expandable reasoning containers in the UI - Detailed explanation of decision-making - Visual representation of problem-solving steps +- Shows the AI's planning process transparently + +This thinking process is mandatory in CodinIT's system prompts and helps ensure the AI takes a systematic approach to your requests. ### Using Discussion Mode Effectively **Planning Phase:** -- Use discussion mode for architecture decisions +- Click the "Discuss" button to activate discussion mode +- The AI switches to a specialized consultant prompt - Get guidance without code implementation -- Explore multiple solution approaches +- Receive plans with numbered steps in plain English +- Explore multiple solution approaches with reasoning - Understand trade-offs and implications **Implementation Phase:** -- Switch to regular chat for code generation -- Reference discussion insights in prompts -- Build upon planned architectures +- Click "Implement this plan" quick action button +- Automatically switches to build mode with context +- The AI generates code based on the discussed plan +- Reference discussion insights in follow-up prompts - Iterate based on discussion feedback +**Key Differences:** + +- **Discussion mode**: Plans in plain English, no code snippets, consultative tone +- **Build mode**: Generates code in artifacts, implements features, shows chain of thought + ## Optimizing for Different AI Models ### Understanding Model Capabilities -Different AI providers have different strengths: +CodinIT supports multiple AI providers through its provider system. Different models have different strengths: **Claude (Anthropic):** - Excellent at reasoning and analysis - Strong code generation capabilities - Good for complex problem-solving +- Works well with CodinIT's chain-of-thought prompting +- Larger context windows for bigger projects **GPT Models (OpenAI):** - Fast and versatile - Good for creative tasks - Strong at following detailed instructions +- Cost-effective for simpler tasks -**Other Models:** +**Other Models (DeepSeek, Gemini, Groq, etc.):** - Specialized capabilities vary by provider - Consider context limits and pricing - Test different models for your use case +- Some excel at specific tasks (e.g., DeepSeek for code) + +### Prompt Library Options + +CodinIT offers three system prompt variants: + +1. **Default Prompt**: Battle-tested standard prompt with comprehensive guidelines +2. **Fine-Tuned Prompt**: Optimized for better results with advanced techniques +3. **Experimental Prompt**: Optimized for lower token usage (experimental) + +You can select these in the settings to optimize for your needs. ### Adapting Your Prompts @@ -194,16 +228,31 @@ Different AI providers have different strengths: system. -```txt theme={"system"} -For all designs I ask you to make, have them be beautiful, not cookie cutter. Make webpages that are fully featured and worthy for production. +## Custom System Prompts -By default, this template supports JSX syntax with Tailwind CSS classes, the shadcn/ui library, React hooks, and Lucide React for icons. Do not install other packages for UI themes, icons, etc unless absolutely necessary or I request them. +You can enhance CodinIT's behavior by adding custom instructions to your project. These work alongside CodinIT's built-in system prompts. -Use icons from lucide-react for logos. +### Example Custom Instructions + +```txt +For all designs I ask you to make, have them be beautiful, not cookie cutter. +Make webpages that are fully featured and worthy for production. -Use stock photos from unsplash where appropriate. +By default, this template supports JSX syntax with Tailwind CSS classes, +the shadcn/ui library, React hooks, and Lucide React for icons. +Do not install other packages for UI themes, icons, etc unless absolutely +necessary or I request them. + +Use icons from lucide-react for logos. +Use stock photos from Pexels where appropriate. ``` -### Tips for the project or system prompts +### Tips for Custom Instructions + +- **Be specific**: Include instructions about your preferred coding style, libraries, or patterns +- **Set boundaries**: Tell CodinIT to only change relevant code, not rewrite entire files +- **Define standards**: Specify naming conventions, file organization, or testing requirements +- **Provide context**: Explain project-specific constraints or requirements +- **Override defaults**: Explicitly state if you want different tools than CodinIT's defaults -- Include instructions to CodinIT.dev to only change relevant code. +Note: Custom instructions complement but don't replace CodinIT's core system prompts, which handle artifact generation, WebContainer constraints, and mode-specific behavior. diff --git a/providers/cloud-providers.mdx b/providers/cloud-providers.mdx index cf5a266..2a5919c 100644 --- a/providers/cloud-providers.mdx +++ b/providers/cloud-providers.mdx @@ -1,6 +1,6 @@ --- title: 'Providers' -description: 'Connect CodinIT AI IDE with 19+ LLM providers including Claude, GPT-4, Gemini, DeepSeek for AI code generation, local inference, and specialized AI coding services.' +description: 'Connect CodinIT AI IDE with 18+ LLM providers including Claude, GPT-4, Gemini, DeepSeek for AI code generation, local inference, and specialized AI coding services.' --- ## Enterprise & research AI coding models @@ -38,9 +38,6 @@ description: 'Connect CodinIT AI IDE with 19+ LLM providers including Claude, GP Grok LLMs with large context windows for AI code generation - - Fast LLM inference with 40+ AI models for code generation - ## Open-source AI models & community LLMs @@ -96,7 +93,7 @@ description: 'Connect CodinIT AI IDE with 19+ LLM providers including Claude, GP ## Choosing an AI coding provider **AI performance & speed:** -- Ultra-fast LLM inference: Groq, Together AI, Fireworks for real-time code completion +- Ultra-fast LLM inference: Groq, Together AI for real-time code completion - Best AI reasoning: Anthropic Claude, DeepSeek, OpenAI o1 for complex code generation - Balanced AI models: OpenAI GPT-4, Google Gemini, Cohere for general development diff --git a/providers/fireworks.mdx b/providers/fireworks.mdx deleted file mode 100644 index 579c517..0000000 --- a/providers/fireworks.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Fireworks AI" -description: "Configure Fireworks AI for fast inference with 40+ optimized models." ---- - -Fireworks AI provides optimized inference with up to 4x faster performance than alternatives. - -**Website:** [https://fireworks.ai/](https://fireworks.ai/) - -## Getting an API Key - -1. Go to [Fireworks AI](https://fireworks.ai/) and sign in -2. Navigate to API Keys in your dashboard -3. Create a new API key and name it (e.g., "CodinIT") -4. Copy the key immediately - -## Configuration - -1. Click the settings icon (⚙️) in CodinIT -2. Select "Fireworks" as the API Provider -3. Paste your API key -4. Enter the model ID (e.g., "accounts/fireworks/models/llama-v3p1-70b-instruct") - -## Supported Models - -- Llama 3.1 series (8B, 70B, 405B) -- Mixtral 8x7B and 8x22B -- Qwen 2.5 series -- DeepSeek models -- Code Llama models -- Vision models (Llama 3.2, Qwen 2-VL) - -## Key Features - -- **Ultra-fast inference:** Up to 4x faster than alternatives -- **Custom optimizations:** Advanced kernels for maximum performance -- **40+ models:** Wide selection of optimized models -- **Fine-tuning:** Available for custom models -- **OpenAI compatible:** Standard API format - -## Notes - -- **Pricing:** Usage-based, see [Fireworks Pricing](https://fireworks.ai/pricing) -- **Compliance:** HIPAA and SOC 2 Type II certified diff --git a/providers/ollama.mdx b/providers/ollama.mdx index b6a1f39..cf8ee0c 100644 --- a/providers/ollama.mdx +++ b/providers/ollama.mdx @@ -38,9 +38,25 @@ Run models locally using Ollama for privacy, offline access, and control. Requir See [Ollama model library](https://ollama.com/library) for full list. +## Dynamic Context Windows + +CodinIT automatically calculates optimal context windows based on model parameter size: + +- **70B+ models:** 32k context window (e.g., Llama 70B) +- **30B+ models:** 16k context window +- **7B+ models:** 8k context window +- **Smaller models:** 4k context window (default) + +**Special model families:** +- Llama 70B models: 32k context +- Llama 405B models: 128k context + +Model labels in CodinIT show both parameter size and context window (e.g., "qwen2.5-coder:32b (32B, 16k ctx)"). + ## Notes -- **Context window:** Minimum 12,000 tokens recommended, 32,000 ideal +- **Auto-detection:** CodinIT automatically detects Ollama running on port 11434 +- **Context window:** Dynamically calculated based on model capabilities - **Resource demands:** Large models require significant system resources - **Offline capability:** Works without internet after model download - **Performance:** May be slow on average hardware diff --git a/providers/openrouter.mdx b/providers/openrouter.mdx index 01a885e..3ea730b 100644 --- a/providers/openrouter.mdx +++ b/providers/openrouter.mdx @@ -22,7 +22,15 @@ OpenRouter provides access to models from multiple providers through a single AP ## Supported Models -CodinIT automatically fetches available models. See [OpenRouter Models](https://openrouter.ai/models) for the complete list. +CodinIT automatically fetches available models. Featured models include: + +- **Claude Opus 4.5:** 200k context, maximum intelligence +- **Claude Sonnet 4.5:** 1M context, highest intelligence +- **GPT-5.2 Pro:** 400k context, latest GPT model +- **GPT-4o:** 128k context, reliable fallback +- **DeepSeek R1 (Free):** 163k context, free tier available + +See [OpenRouter Models](https://openrouter.ai/models) for the complete list. ## Features diff --git a/quickstart.mdx b/quickstart.mdx index dda11ca..d468df3 100644 --- a/quickstart.mdx +++ b/quickstart.mdx @@ -1,6 +1,6 @@ --- -title: "Quickstart - AI Coding Assistant Setup Guide" -description: "Get started with CodinIT AI-powered IDE in minutes. Install the AI coding assistant, connect LLM providers like Claude and GPT-4, and start building with AI code generation." +title: "Quickstart" +description: "Get started with CodinIT AI-powered IDE in minutes. Install the AI coding assistant, connect LLM providers like Claude and OpenAI, and start building with AI code generation." --- - Deploy AI-generated applications to Vercel, Netlify with one-click deployment. + Deploy AI-generated applications to Vercel, Netlify, and GitHub Pages with one-click deployment.