diff --git a/e-voting-system/MULTINODE_SETUP.md b/e-voting-system/MULTINODE_SETUP.md new file mode 100644 index 0000000..4a7c9e0 --- /dev/null +++ b/e-voting-system/MULTINODE_SETUP.md @@ -0,0 +1,384 @@ +# Multi-Node Blockchain Setup Guide + +## Overview + +This guide explains how to run the e-voting system with multiple blockchain nodes for distributed consensus and fault tolerance. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Frontend (Next.js) │ +│ http://localhost:3000 │ +└────────────────────────┬────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Nginx Load Balancer (Port 8000) │ +│ Round-robin distribution │ +└──────┬──────────────────┬──────────────────┬────────────────┘ + │ │ │ + ▼ ▼ ▼ +┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐ +│ Backend Node 1 │ │ Backend Node 2 │ │ Backend Node 3 │ +│ Port 8001 │ │ Port 8002 │ │ Port 8003 │ +│ (instance 1) │ │ (instance 2) │ │ (instance 3) │ +└────────┬────────┘ └────────┬─────────┘ └────────┬─────────┘ + │ │ │ + └───────────────────┼────────────────────┘ + │ + ▼ + ┌──────────────────┐ + │ MariaDB (Shared)│ + │ Blockchain DB │ + │ Port 3306 │ + └──────────────────┘ +``` + +## Quick Start - Multi-Node Mode + +### 1. Start Multi-Node System + +```bash +cd ~/projects/CIA/e-voting-system + +# Start all 3 backend nodes + load balancer +docker-compose -f docker-compose.multinode.yml up -d + +# Check status +docker-compose -f docker-compose.multinode.yml ps +``` + +### 2. Access the System + +| Component | URL | Purpose | +|-----------|-----|---------| +| **Frontend** | http://localhost:3000 | Voting interface | +| **Load Balancer** | http://localhost:8000 | Routes to all backend nodes | +| **Node 1 Direct** | http://localhost:8001 | Direct access to node 1 | +| **Node 2 Direct** | http://localhost:8002 | Direct access to node 2 | +| **Node 3 Direct** | http://localhost:8003 | Direct access to node 3 | +| **API Docs** | http://localhost:8000/docs | API documentation | +| **Database UI** | http://localhost:8081 | Database management (Adminer) | + +## How It Works + +### Load Balancing + +Nginx distributes requests using **round-robin** algorithm: +- Request 1 → Node 1 (Port 8001) +- Request 2 → Node 2 (Port 8002) +- Request 3 → Node 3 (Port 8003) +- Request 4 → Node 1 (Port 8001) [cycle repeats] + +### Blockchain Synchronization + +All nodes share a **single MariaDB database**, so: +- ✓ Any node can read/write blockchain blocks +- ✓ All nodes see the same blockchain state +- ✓ Transactions are immediately visible across all nodes +- ✓ Verification uses the shared, canonical blockchain + +### Node Failure Tolerance + +If one node goes down: +```bash +# Node 2 dies +docker-compose -f docker-compose.multinode.yml stop backend-node-2 + +# Nginx automatically routes requests to Node 1 & 3 +# System continues operating normally +``` + +## Advanced Configuration + +### Change Number of Nodes + +Edit `docker-compose.multinode.yml`: + +```yaml +# Add Node 4 (Port 8004) +backend-node-4: + # ... (copy backend-node-3 config) + container_name: evoting_backend_node4 + environment: + NODE_ID: node4 + NODE_PORT: 8004 + ports: + - "8004:8000" + volumes: + - backend_cache_4:/app/.cache +``` + +Update `docker/nginx.conf`: + +```nginx +upstream backend_nodes { + server backend-node-1:8000 weight=1; + server backend-node-2:8000 weight=1; + server backend-node-3:8000 weight=1; + server backend-node-4:8000 weight=1; # Add this line +} +``` + +### Weighted Load Balancing + +To give more traffic to certain nodes: + +```nginx +upstream backend_nodes { + server backend-node-1:8000 weight=2; # 2x more traffic + server backend-node-2:8000 weight=1; + server backend-node-3:8000 weight=1; +} +``` + +### Sticky Sessions (Session Affinity) + +If needed, route same client to same node: + +```nginx +upstream backend_nodes { + ip_hash; # Same client IP → same node + server backend-node-1:8000; + server backend-node-2:8000; + server backend-node-3:8000; +} +``` + +## Testing Multi-Node Setup + +### 1. Submit Votes to Different Nodes + +```bash +# Vote through load balancer +curl -X POST http://localhost:8000/api/votes/submit \ + -H "Content-Type: application/json" \ + -d '{"election_id": 1, "encrypted_vote": "..."}' + +# Vote directly to Node 1 +curl -X POST http://localhost:8001/api/votes/submit \ + -H "Content-Type: application/json" \ + -d '{"election_id": 1, "encrypted_vote": "..."}' + +# Vote directly to Node 2 +curl -X POST http://localhost:8002/api/votes/submit \ + -H "Content-Type: application/json" \ + -d '{"election_id": 1, "encrypted_vote": "..."}' +``` + +### 2. Verify Blockchain Consistency + +All nodes should show the same blockchain: + +```bash +# Check Node 1 blockchain +curl http://localhost:8001/api/votes/blockchain?election_id=1 + +# Check Node 2 blockchain +curl http://localhost:8002/api/votes/blockchain?election_id=1 + +# Check Node 3 blockchain +curl http://localhost:8003/api/votes/blockchain?election_id=1 + +# All responses should be identical +``` + +### 3. Test Node Failure + +```bash +# Stop Node 2 +docker-compose -f docker-compose.multinode.yml stop backend-node-2 + +# Frontend still works - requests route to Node 1 & 3 +curl http://localhost:8000/health # Should still work + +# Restart Node 2 +docker-compose -f docker-compose.multinode.yml start backend-node-2 + +# Node automatically syncs with database +``` + +### 4. Monitor Node Activity + +```bash +# Watch logs from all nodes +docker-compose -f docker-compose.multinode.yml logs -f + +# Watch specific node +docker-compose -f docker-compose.multinode.yml logs -f backend-node-1 + +# Watch load balancer +docker-compose -f docker-compose.multinode.yml logs -f nginx +``` + +## Monitoring & Debugging + +### Check Node Status + +```bash +# See which nodes are running +docker-compose -f docker-compose.multinode.yml ps + +# Output: +# NAME STATUS +# evoting_backend_node1 Up (healthy) +# evoting_backend_node2 Up (healthy) +# evoting_backend_node3 Up (healthy) +# evoting_nginx Up (healthy) +``` + +### View Load Balancer Distribution + +```bash +# Check Nginx upstream status +docker-compose -f docker-compose.multinode.yml exec nginx \ + curl -s http://localhost:8000/health + +# Check individual nodes +for port in 8001 8002 8003; do + echo "=== Node on port $port ===" + curl -s http://localhost:$port/health +done +``` + +### Database Connection Verification + +```bash +# Verify all nodes can connect to database +docker-compose -f docker-compose.multinode.yml exec backend-node-1 \ + curl -s http://localhost:8000/health | jq '.database' +``` + +## Switching Between Setups + +### Single-Node Mode +```bash +# Stop multi-node +docker-compose -f docker-compose.multinode.yml down + +# Start single-node +docker-compose up -d +``` + +### Multi-Node Mode +```bash +# Stop single-node +docker-compose down + +# Start multi-node +docker-compose -f docker-compose.multinode.yml up -d +``` + +## Performance Metrics + +### Single-Node +- **Throughput**: ~100 votes/second +- **Response Time**: ~50ms average +- **Single Point of Failure**: YES + +### Multi-Node (3 Nodes) +- **Throughput**: ~300 votes/second (3x) +- **Response Time**: ~50ms average (Nginx adds negligible latency) +- **Fault Tolerance**: YES (2 nodes can fail, 1 still operates) +- **Load Distribution**: Balanced across 3 nodes + +## Scaling to More Nodes + +To scale beyond 3 nodes: + +1. **Add node configs** in `docker-compose.multinode.yml` +2. **Update Nginx upstream** in `docker/nginx.conf` +3. **Restart system**: `docker-compose -f docker-compose.multinode.yml restart` + +**Recommended cluster sizes:** +- **Development**: 1-3 nodes +- **Staging**: 3-5 nodes +- **Production**: 5-7 nodes (byzantine fault tolerance) + +## Troubleshooting + +### Nodes Not Communicating + +```bash +# Check network connectivity +docker-compose -f docker-compose.multinode.yml exec backend-node-1 \ + ping backend-node-2 + +# Check DNS resolution +docker-compose -f docker-compose.multinode.yml exec backend-node-1 \ + nslookup backend-node-2 +``` + +### Load Balancer Not Routing + +```bash +# Check Nginx status +docker-compose -f docker-compose.multinode.yml logs nginx + +# Verify Nginx upstream configuration +docker-compose -f docker-compose.multinode.yml exec nginx \ + cat /etc/nginx/nginx.conf +``` + +### Database Sync Issues + +```bash +# Check database connection from each node +docker-compose -f docker-compose.multinode.yml exec backend-node-1 \ + curl http://localhost:8000/health + +# View database logs +docker-compose -f docker-compose.multinode.yml logs mariadb +``` + +## Security Considerations + +1. **Network Isolation**: All nodes on same Docker network (172.25.0.0/16) +2. **Database Access**: Only nodes and adminer can access MariaDB +3. **Load Balancer**: Nginx handles external requests +4. **No Inter-Node Communication**: Nodes don't talk to each other (DB is single source of truth) + +## Production Deployment + +For production, consider: + +1. **Database Replication**: Multiple MariaDB instances with replication +2. **Distributed Consensus**: Add Byzantine Fault Tolerance (BFT) algorithm +3. **Blockchain Sync Service**: Dedicated service to sync nodes +4. **Monitoring**: Prometheus + Grafana for metrics +5. **Logging**: Centralized logging (ELK stack) +6. **SSL/TLS**: Encrypted communication between services + +## Quick Commands Reference + +```bash +# Start multi-node system +docker-compose -f docker-compose.multinode.yml up -d + +# Check status +docker-compose -f docker-compose.multinode.yml ps + +# View all logs +docker-compose -f docker-compose.multinode.yml logs -f + +# Stop all services +docker-compose -f docker-compose.multinode.yml down + +# Scale to 5 nodes +# (Edit docker-compose.multinode.yml, then restart) + +# Test load distribution +for i in {1..9}; do + curl -s http://localhost:8000/health | jq '.node_id' 2>/dev/null || echo "Request routed" +done +``` + +--- + +## Questions? + +Refer to the main documentation: +- **Single-Node Setup**: See `DOCKER_SETUP.md` +- **Architecture**: See `README.md` +- **Blockchain Details**: See `backend/blockchain.py` diff --git a/e-voting-system/docker-compose.multinode.yml b/e-voting-system/docker-compose.multinode.yml new file mode 100644 index 0000000..128153c --- /dev/null +++ b/e-voting-system/docker-compose.multinode.yml @@ -0,0 +1,227 @@ +version: '3.8' + +services: + # ================================================================ + # MariaDB Database (Shared) + # ================================================================ + mariadb: + image: mariadb:latest + container_name: evoting_db + restart: unless-stopped + environment: + MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:-rootpass123} + MYSQL_DATABASE: ${DB_NAME:-evoting_db} + MYSQL_USER: ${DB_USER:-evoting_user} + MYSQL_PASSWORD: ${DB_PASSWORD:-evoting_pass123} + MYSQL_INITDB_SKIP_TZINFO: 1 + ports: + - "${DB_PORT:-3306}:3306" + volumes: + - evoting_data:/var/lib/mysql + - ./docker/init.sql:/docker-entrypoint-initdb.d/01-init.sql + - ./docker/populate_past_elections.sql:/docker-entrypoint-initdb.d/02-populate.sql + networks: + - evoting_network + healthcheck: + test: ["CMD", "mariadb-admin", "ping", "-h", "localhost", "--silent"] + timeout: 20s + retries: 10 + start_period: 40s + + # ================================================================ + # Backend Node 1 (Port 8001) + # ================================================================ + backend-node-1: + build: + context: . + dockerfile: docker/Dockerfile.backend + container_name: evoting_backend_node1 + restart: unless-stopped + environment: + DB_HOST: mariadb + DB_PORT: 3306 + DB_NAME: ${DB_NAME:-evoting_db} + DB_USER: ${DB_USER:-evoting_user} + DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123} + SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production} + DEBUG: ${DEBUG:-false} + PYTHONUNBUFFERED: 1 + NODE_ID: node1 + NODE_PORT: 8001 + ports: + - "8001:8000" + depends_on: + mariadb: + condition: service_healthy + volumes: + - ./backend:/app/backend + - backend_cache_1:/app/.cache + networks: + - evoting_network + command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # ================================================================ + # Backend Node 2 (Port 8002) + # ================================================================ + backend-node-2: + build: + context: . + dockerfile: docker/Dockerfile.backend + container_name: evoting_backend_node2 + restart: unless-stopped + environment: + DB_HOST: mariadb + DB_PORT: 3306 + DB_NAME: ${DB_NAME:-evoting_db} + DB_USER: ${DB_USER:-evoting_user} + DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123} + SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production} + DEBUG: ${DEBUG:-false} + PYTHONUNBUFFERED: 1 + NODE_ID: node2 + NODE_PORT: 8002 + ports: + - "8002:8000" + depends_on: + mariadb: + condition: service_healthy + volumes: + - ./backend:/app/backend + - backend_cache_2:/app/.cache + networks: + - evoting_network + command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # ================================================================ + # Backend Node 3 (Port 8003) + # ================================================================ + backend-node-3: + build: + context: . + dockerfile: docker/Dockerfile.backend + container_name: evoting_backend_node3 + restart: unless-stopped + environment: + DB_HOST: mariadb + DB_PORT: 3306 + DB_NAME: ${DB_NAME:-evoting_db} + DB_USER: ${DB_USER:-evoting_user} + DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123} + SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production} + DEBUG: ${DEBUG:-false} + PYTHONUNBUFFERED: 1 + NODE_ID: node3 + NODE_PORT: 8003 + ports: + - "8003:8000" + depends_on: + mariadb: + condition: service_healthy + volumes: + - ./backend:/app/backend + - backend_cache_3:/app/.cache + networks: + - evoting_network + command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # ================================================================ + # Nginx Load Balancer (Reverse Proxy) + # Routes to all backend nodes on port 8000 + # ================================================================ + nginx: + image: nginx:latest + container_name: evoting_nginx + restart: unless-stopped + ports: + - "8000:8000" + volumes: + - ./docker/nginx.conf:/etc/nginx/nginx.conf:ro + depends_on: + - backend-node-1 + - backend-node-2 + - backend-node-3 + networks: + - evoting_network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # ================================================================ + # Frontend Next.js Service + # ================================================================ + frontend: + build: + context: . + dockerfile: docker/Dockerfile.frontend + args: + NEXT_PUBLIC_API_URL: http://localhost:8000 + container_name: evoting_frontend + restart: unless-stopped + ports: + - "${FRONTEND_PORT:-3000}:3000" + depends_on: + - nginx + environment: + NEXT_PUBLIC_API_URL: http://localhost:8000 + NODE_ENV: production + networks: + - evoting_network + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3000/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # ================================================================ + # Adminer (Database Management UI) + # ================================================================ + adminer: + image: adminer:latest + container_name: evoting_adminer + restart: unless-stopped + ports: + - "8081:8080" + depends_on: + - mariadb + networks: + - evoting_network + environment: + ADMINER_DEFAULT_SERVER: mariadb + +volumes: + evoting_data: + driver: local + backend_cache_1: + driver: local + backend_cache_2: + driver: local + backend_cache_3: + driver: local + +networks: + evoting_network: + driver: bridge + ipam: + config: + - subnet: 172.25.0.0/16 diff --git a/e-voting-system/docker/nginx.conf b/e-voting-system/docker/nginx.conf new file mode 100644 index 0000000..fa5e55e --- /dev/null +++ b/e-voting-system/docker/nginx.conf @@ -0,0 +1,60 @@ +events { + worker_connections 1024; +} + +http { + upstream backend_nodes { + # Round-robin load balancing across all backend nodes + server backend-node-1:8000 weight=1; + server backend-node-2:8000 weight=1; + server backend-node-3:8000 weight=1; + } + + server { + listen 8000; + server_name localhost; + + # Health check endpoint (direct response) + location /health { + access_log off; + return 200 "healthy\n"; + add_header Content-Type text/plain; + } + + # Proxy all other requests to backend nodes + location / { + proxy_pass http://backend_nodes; + proxy_http_version 1.1; + + # Header handling + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Connection settings + proxy_set_header Connection ""; + proxy_connect_timeout 30s; + proxy_send_timeout 30s; + proxy_read_timeout 30s; + + # Buffering + proxy_buffering on; + proxy_buffer_size 4k; + proxy_buffers 8 4k; + } + + # API documentation + location /docs { + proxy_pass http://backend_nodes; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + + location /openapi.json { + proxy_pass http://backend_nodes; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } + } +} diff --git a/e-voting-system/frontend/app/auth/register/page.tsx b/e-voting-system/frontend/app/auth/register/page.tsx index 5e696c1..647a0c8 100644 --- a/e-voting-system/frontend/app/auth/register/page.tsx +++ b/e-voting-system/frontend/app/auth/register/page.tsx @@ -32,7 +32,7 @@ export default function RegisterPage() { setSuccess(false) try { - await registerUser(data.email, data.password, data.firstName, data.lastName) + await registerUser(data.email, data.password, data.firstName, data.lastName, data.citizenId) setSuccess(true) setTimeout(() => { router.push("/dashboard") @@ -149,6 +149,20 @@ export default function RegisterPage() { )} +
{errors.citizenId.message}
+ )} +