Problem: ImportError or compatibility issues
Solution: Ensure Python 3.11+ is installed
python --version # Should be 3.11 or higher
pip install --upgrade pipProblem: Package installation fails Solution:
# Clear pip cache
pip cache purge
# Install with verbose output
pip install -r requirements.txt -v
# For specific package issues
pip install --no-cache-dir package_nameProblem: Packages not found or wrong versions Solution:
# Recreate virtual environment
rm -rf .venv
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txtProblem: ConfigurationError: OPENAI_API_KEY not found
Solution:
# Set environment variable
export OPENAI_API_KEY=sk-your-key-here
# Or add to .env file
echo "OPENAI_API_KEY=sk-your-key-here" >> .envProblem: ConnectionError or VectorStoreError
Solution:
# For FAISS (local)
export VECTOR_DB=faiss
# For Qdrant
export VECTOR_DB=qdrant
export QDRANT_URL=http://localhost:6334
# For Pinecone
export VECTOR_DB=pinecone
export PINECONE_API_KEY=your-key
export PINECONE_ENVIRONMENT=us-west1-gcpProblem: MemoryError or system slowdown
Solution:
# Reduce chunk size
export CHUNK_SIZE=256
# Reduce batch size
export QUERY_BATCH_SIZE=5
# Monitor memory usage
python -c "import psutil; print(f'Memory: {psutil.virtual_memory().percent}%')"Problem: ConnectionTimeout or RequestException
Solution:
# Test connectivity
curl -I https://api.openai.com/v1/models
# Check proxy settings
export HTTP_PROXY=http://proxy:port
export HTTPS_PROXY=http://proxy:port
# Increase timeout
export QUERY_TIMEOUT=60.0Problem: PermissionError when writing files
Solution:
# Check permissions
ls -la logs/
ls -la faiss_index/
# Fix permissions
chmod 755 logs/
chmod 644 logs/*.logProblem: SteganographyError during obfuscation
Solution:
# Reduce noise level
export STEGO_NOISE_LEVEL=0.005
# Disable problematic techniques
export STEGO_TECHNIQUES=noise,rotation
# Check embedding dimensions
python -c "from config import Config; print(Config().embedding_dimension)"Problem: Model compatibility errors Solution:
# Use compatible models only
export STEGO_FRAGMENT_MODELS=text-embedding-ada-002
# Check model availability
python -c "from openai import OpenAI; client = OpenAI(); print(client.models.list())"Problem: Irrelevant or no results returned Solution:
# Enable debug logging
export LOG_LEVEL=DEBUG
# Try different retrieval strategies
python scripts/query.py --strategy semantic
# Increase result count
python scripts/query.py --top-k 20Problem: Slow query responses Solution:
# Enable caching
export QUERY_CACHE_ENABLED=true
# Reduce context reconstruction
export QUERY_CONTEXT_RECONSTRUCTION=false
# Use batch processing
python scripts/query.py --batch-size 5Problem: Docker build fails Solution:
# Clean build cache
docker system prune -f
# Build with no cache
docker build --no-cache -t vectorsmuggle .
# Check build logs
docker build -t vectorsmuggle . 2>&1 | tee build.logProblem: Container exits or crashes Solution:
# Check container logs
docker logs vectorsmuggle
# Run with debug
docker run -it --entrypoint /bin/bash vectorsmuggle
# Check resource limits
docker stats vectorsmuggleProblem: Pods in CrashLoopBackOff or Error state
Solution:
# Check pod logs
kubectl logs -f deployment/vectorsmuggle
# Describe pod for events
kubectl describe pod vectorsmuggle-xxx
# Check resource constraints
kubectl top podsProblem: Services not accessible Solution:
# Check service endpoints
kubectl get endpoints
# Test service connectivity
kubectl exec -it pod-name -- curl http://service-name:port
# Check network policies
kubectl get networkpoliciesexport LOG_LEVEL=DEBUG
export LOG_FORMAT=text
python scripts/embed.py --debugimport cProfile
import pstats
# Profile embedding operation
cProfile.run('embed_documents()', 'profile_stats')
stats = pstats.Stats('profile_stats')
stats.sort_stats('cumulative').print_stats(10)import tracemalloc
tracemalloc.start()
# Your code here
current, peak = tracemalloc.get_traced_memory()
print(f"Current memory usage: {current / 1024 / 1024:.1f} MB")
print(f"Peak memory usage: {peak / 1024 / 1024:.1f} MB")# Monitor network traffic
sudo tcpdump -i any host api.openai.com
# Check DNS resolution
nslookup api.openai.com
# Test SSL/TLS
openssl s_client -connect api.openai.com:4431001: Missing required environment variable1002: Invalid configuration value1003: Configuration validation failed
2001: Embedding obfuscation failed2002: Fragmentation error2003: Timing control error
3001: Traffic mimicry failed3002: Behavioral camouflage error3003: Network evasion error
4001: Query processing failed4002: Context reconstruction error4003: Cross-reference analysis failed
Always include relevant log entries when seeking help:
# Get recent logs
tail -n 100 logs/vectorsmuggle.log
# Filter for errors
grep ERROR logs/vectorsmuggle.log
# Export logs for analysis
python -c "
import json
from pathlib import Path
logs = Path('logs/vectorsmuggle.log').read_text()
print(json.dumps({'logs': logs.split('\n')[-100:]}, indent=2))
" > debug_logs.jsonCollect system information for bug reports:
# System info
python -c "
import sys, platform, psutil
print(f'Python: {sys.version}')
print(f'Platform: {platform.platform()}')
print(f'CPU: {psutil.cpu_count()} cores')
print(f'Memory: {psutil.virtual_memory().total // 1024**3} GB')
"
# Package versions
pip list | grep -E "(langchain|openai|numpy|torch)"# Generate performance report
python analysis/performance_report.py --output performance.json
# Check resource usage
python -c "
import psutil
print(f'CPU: {psutil.cpu_percent()}%')
print(f'Memory: {psutil.virtual_memory().percent}%')
print(f'Disk: {psutil.disk_usage(\"/\").percent}%')
"- Always use virtual environments
- Pin dependency versions in requirements.txt
- Validate configuration before deployment
- Monitor resource usage
- Implement proper error handling
- Use structured logging
- Regular health checks
# Set up log rotation
echo "logs/*.log {
daily
rotate 7
compress
missingok
notifempty
}" > /etc/logrotate.d/vectorsmuggle
# Monitor disk space
df -h
du -sh logs/