agentic-system / config.py
Cascade Bot
Added Groq streaming support and optimizations - clean version
1d75522
"""
System Configuration
------------------
Central configuration for the Agentic System including:
1. Local Model Settings
2. Team Settings
3. System Parameters
4. Resource Limits
5. Free API Configurations
"""
import os
from typing import Dict, Any, Optional
from pathlib import Path
import json
import logging
from dataclasses import dataclass, field
logger = logging.getLogger(__name__)
@dataclass
class Config:
"""Configuration for the Advanced Agentic System."""
# Core settings
min_confidence: float = 0.7
parallel_threshold: int = 3
learning_rate: float = 0.1
# Model settings
model_backend: str = field(default_factory=lambda: os.getenv('MODEL_BACKEND', 'huggingface'))
groq_api_key: Optional[str] = field(default_factory=lambda: os.getenv('GROQ_API_KEY'))
huggingface_token: Optional[str] = field(default_factory=lambda: os.getenv('HUGGINGFACE_TOKEN'))
# API settings
enable_openai_compatibility: bool = True
api_rate_limit: int = 100
api_timeout: int = 30
# Resource limits
max_parallel_requests: int = field(
default_factory=lambda: int(os.getenv('MAX_PARALLEL_REQUESTS', '10'))
)
request_timeout: int = field(
default_factory=lambda: int(os.getenv('REQUEST_TIMEOUT', '30'))
)
batch_size: int = field(
default_factory=lambda: int(os.getenv('BATCH_SIZE', '4'))
)
# Cache settings
enable_cache: bool = field(
default_factory=lambda: os.getenv('CACHE_MODELS', 'false').lower() == 'true'
)
cache_dir: str = field(
default_factory=lambda: os.getenv('SPACE_CACHE_DIR', '/tmp/models')
)
# Strategy weights
strategy_weights: Dict[str, float] = field(default_factory=lambda: {
"LOCAL_LLM": 2.0,
"CHAIN_OF_THOUGHT": 1.5,
"TREE_OF_THOUGHTS": 1.5,
"META_LEARNING": 1.5,
"TASK_DECOMPOSITION": 1.3,
"RESOURCE_MANAGEMENT": 1.3,
"CONTEXTUAL_PLANNING": 1.3,
"ADAPTIVE_EXECUTION": 1.3,
"FEEDBACK_INTEGRATION": 1.3,
"BAYESIAN": 1.2,
"MARKET_ANALYSIS": 1.2,
"PORTFOLIO_OPTIMIZATION": 1.2,
"VENTURE": 1.2,
"MONETIZATION": 1.0,
"MULTIMODAL": 1.0,
"NEUROSYMBOLIC": 1.0,
"SPECIALIZED": 1.0,
"VENTURE_TYPE": 1.0,
"RECURSIVE": 1.0,
"ANALOGICAL": 1.0
})
# Agentic system settings
agentic_system: Dict[str, Any] = field(default_factory=lambda: {
"min_confidence": 0.7,
"parallel_threshold": 3,
"learning_rate": 0.1,
"enable_meta_learning": True,
"enable_self_improvement": True,
"max_agents": 10,
"default_agent_config": {
"learning_rate": 0.1,
"risk_tolerance": 0.5,
"max_retries": 3
}
})
def __init__(self, config: Optional[Dict[str, Any]] = None):
"""Initialize configuration."""
if config:
for key, value in config.items():
if hasattr(self, key):
setattr(self, key, value)
# Validate configuration
self._validate_config()
def _validate_config(self):
"""Validate configuration values."""
if self.min_confidence < 0 or self.min_confidence > 1:
raise ValueError("min_confidence must be between 0 and 1")
if self.parallel_threshold < 1:
raise ValueError("parallel_threshold must be at least 1")
if self.learning_rate <= 0 or self.learning_rate > 1:
raise ValueError("learning_rate must be between 0 and 1")
if self.model_backend not in ['groq', 'huggingface']:
raise ValueError("model_backend must be either 'groq' or 'huggingface'")
def get(self, key: str, default: Any = None) -> Any:
"""Get configuration value."""
return getattr(self, key, default)
def to_dict(self) -> Dict[str, Any]:
"""Convert configuration to dictionary."""
return {
key: getattr(self, key)
for key in self.__annotations__
if hasattr(self, key)
}
@classmethod
def from_file(cls, filepath: str) -> 'Config':
"""Load configuration from file."""
path = Path(filepath)
if not path.exists():
raise FileNotFoundError(f"Configuration file not found: {filepath}")
with open(filepath, 'r') as f:
config = json.load(f)
return cls(config)
def save(self, filepath: str):
"""Save configuration to file."""
with open(filepath, 'w') as f:
json.dump(self.to_dict(), f, indent=2)
class SystemConfig:
"""System-wide configuration."""
# Base Paths
BASE_DIR = Path(__file__).parent.absolute()
CACHE_DIR = BASE_DIR / "cache"
LOG_DIR = BASE_DIR / "logs"
DATA_DIR = BASE_DIR / "data"
MODEL_DIR = BASE_DIR / "models"
# System Parameters
DEBUG_MODE = os.getenv("DEBUG_MODE", "False").lower() == "true"
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
MAX_WORKERS = int(os.getenv("MAX_WORKERS", "4"))
ASYNC_TIMEOUT = int(os.getenv("ASYNC_TIMEOUT", "30"))
# Local Model Configurations
MODEL_CONFIG = {
"quick_coder": {
"name": "tugstugi/Qwen2.5-Coder-0.5B-QwQ-draft",
"type": "transformers",
"description": "Fast code completion and simple tasks",
"temperature": 0.2,
"max_tokens": 1000,
"timeout": 30
},
"deep_coder": {
"name": "YorkieOH10/deepseek-coder-6.7B-kexer-Q8_0-GGUF",
"type": "gguf",
"description": "Complex code generation and refactoring",
"temperature": 0.3,
"max_tokens": 2000,
"timeout": 45
},
"text_gen": {
"name": "Orenguteng/Llama-3-8B-Lexi-Uncensored",
"type": "transformers",
"description": "General text generation and reasoning",
"temperature": 0.7,
"max_tokens": 1500,
"timeout": 40
},
"workflow": {
"name": "deepseek-ai/JanusFlow-1.3B",
"type": "transformers",
"description": "Task planning and workflow management",
"temperature": 0.5,
"max_tokens": 1000,
"timeout": 30
}
}
# Team Configurations
TEAM_CONFIG = {
"coders": {
"min_agents": 3,
"max_agents": 7,
"capabilities": [
"full_stack_development",
"cloud_architecture",
"ai_ml",
"blockchain",
"mobile_development"
],
"resource_limits": {
"cpu_percent": 80,
"memory_mb": 4096,
"gpu_memory_mb": 2048
}
},
"business": {
"min_agents": 2,
"max_agents": 5,
"capabilities": [
"market_analysis",
"business_strategy",
"digital_transformation",
"startup_innovation",
"product_management"
],
"resource_limits": {
"cpu_percent": 60,
"memory_mb": 2048,
"api_calls_per_minute": 100
}
},
"research": {
"min_agents": 2,
"max_agents": 6,
"capabilities": [
"deep_research",
"data_analysis",
"trend_forecasting",
"competitive_analysis",
"technology_assessment"
],
"resource_limits": {
"cpu_percent": 70,
"memory_mb": 3072,
"api_calls_per_minute": 150
}
},
"traders": {
"min_agents": 2,
"max_agents": 5,
"capabilities": [
"crypto_trading",
"sports_betting",
"risk_management",
"market_timing",
"portfolio_optimization"
],
"resource_limits": {
"cpu_percent": 60,
"memory_mb": 2048,
"api_calls_per_minute": 200
}
}
}
# Resource Management
RESOURCE_LIMITS = {
"total_cpu_percent": 90,
"total_memory_mb": 8192,
"total_gpu_memory_mb": 4096,
"max_api_calls_per_minute": 500,
"max_concurrent_tasks": 20
}
# Collaboration Settings
COLLABORATION_CONFIG = {
"min_confidence_threshold": 0.6,
"max_team_size": 10,
"max_concurrent_objectives": 5,
"objective_timeout_minutes": 60,
"team_sync_interval_seconds": 30
}
# Error Recovery
ERROR_RECOVERY = {
"max_retries": 3,
"retry_delay_seconds": 5,
"error_threshold": 0.2,
"recovery_timeout": 300
}
# Monitoring
MONITORING = {
"metrics_interval_seconds": 60,
"health_check_interval": 30,
"performance_log_retention_days": 7,
"alert_threshold": {
"cpu": 85,
"memory": 90,
"error_rate": 0.1
}
}
# Free API Configurations (No API Keys Required)
API_CONFIG = {
"search": {
"duckduckgo": {
"base_url": "https://api.duckduckgo.com",
"rate_limit": 100,
"requires_auth": False,
"method": "GET"
},
"wikipedia": {
"base_url": "https://en.wikipedia.org/w/api.php",
"rate_limit": 200,
"requires_auth": False,
"method": "GET"
},
"arxiv": {
"base_url": "http://export.arxiv.org/api/query",
"rate_limit": 60,
"requires_auth": False,
"method": "GET"
},
"crossref": {
"base_url": "https://api.crossref.org/works",
"rate_limit": 50,
"requires_auth": False,
"method": "GET"
},
"unpaywall": {
"base_url": "https://api.unpaywall.org/v2",
"rate_limit": 100,
"requires_auth": False,
"method": "GET"
}
},
"crypto": {
"coincap": {
"base_url": "https://api.coincap.io/v2",
"rate_limit": 200,
"requires_auth": False,
"method": "GET",
"endpoints": {
"assets": "/assets",
"rates": "/rates",
"markets": "/markets"
}
},
"blockchair": {
"base_url": "https://api.blockchair.com",
"rate_limit": 30,
"requires_auth": False,
"method": "GET"
}
},
"news": {
"wikinews": {
"base_url": "https://en.wikinews.org/w/api.php",
"rate_limit": 200,
"requires_auth": False,
"method": "GET"
},
"reddit": {
"base_url": "https://www.reddit.com/r/news/.json",
"rate_limit": 60,
"requires_auth": False,
"method": "GET"
},
"hackernews": {
"base_url": "https://hacker-news.firebaseio.com/v0",
"rate_limit": 100,
"requires_auth": False,
"method": "GET"
}
},
"market_data": {
"yahoo_finance": {
"base_url": "https://query1.finance.yahoo.com/v8/finance",
"rate_limit": 100,
"requires_auth": False,
"method": "GET"
},
"marketstack_free": {
"base_url": "https://api.marketstack.com/v1",
"rate_limit": 100,
"requires_auth": False,
"method": "GET"
}
},
"sports": {
"football_data": {
"base_url": "https://www.football-data.org/v4",
"rate_limit": 10,
"requires_auth": False,
"method": "GET",
"free_endpoints": [
"/competitions",
"/matches"
]
},
"nhl": {
"base_url": "https://statsapi.web.nhl.com/api/v1",
"rate_limit": 50,
"requires_auth": False,
"method": "GET"
},
"mlb": {
"base_url": "https://statsapi.mlb.com/api/v1",
"rate_limit": 50,
"requires_auth": False,
"method": "GET"
}
},
"web_scraping": {
"web_archive": {
"base_url": "https://archive.org/wayback/available",
"rate_limit": 40,
"requires_auth": False,
"method": "GET"
},
"metahtml": {
"base_url": "https://html.spec.whatwg.org/multipage",
"rate_limit": 30,
"requires_auth": False,
"method": "GET"
}
}
}
@classmethod
def get_team_config(cls, team_name: str) -> Dict[str, Any]:
"""Get configuration for a specific team."""
return cls.TEAM_CONFIG.get(team_name, {})
@classmethod
def get_model_config(cls, model_type: str) -> Dict[str, Any]:
"""Get configuration for a specific model type."""
return cls.MODEL_CONFIG.get(model_type, {})
@classmethod
def get_api_config(cls, api_name: str) -> Dict[str, Any]:
"""Get configuration for a specific API."""
for category in cls.API_CONFIG.values():
if api_name in category:
return category[api_name]
return {}