fix: Add citizen_id field to registration form (fixes 422 error)

This commit is contained in:
Alexis Bruneteau 2025-11-07 02:39:17 +01:00
parent 7bf7063203
commit 5ac2a49a2a
7 changed files with 696 additions and 5 deletions

View File

@ -0,0 +1,384 @@
# Multi-Node Blockchain Setup Guide
## Overview
This guide explains how to run the e-voting system with multiple blockchain nodes for distributed consensus and fault tolerance.
## Architecture
```
┌─────────────────────────────────────────────────────────────┐
│ Frontend (Next.js) │
│ http://localhost:3000 │
└────────────────────────┬────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────┐
│ Nginx Load Balancer (Port 8000) │
│ Round-robin distribution │
└──────┬──────────────────┬──────────────────┬────────────────┘
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌──────────────────┐ ┌──────────────────┐
│ Backend Node 1 │ │ Backend Node 2 │ │ Backend Node 3 │
│ Port 8001 │ │ Port 8002 │ │ Port 8003 │
│ (instance 1) │ │ (instance 2) │ │ (instance 3) │
└────────┬────────┘ └────────┬─────────┘ └────────┬─────────┘
│ │ │
└───────────────────┼────────────────────┘
┌──────────────────┐
│ MariaDB (Shared)│
│ Blockchain DB │
│ Port 3306 │
└──────────────────┘
```
## Quick Start - Multi-Node Mode
### 1. Start Multi-Node System
```bash
cd ~/projects/CIA/e-voting-system
# Start all 3 backend nodes + load balancer
docker-compose -f docker-compose.multinode.yml up -d
# Check status
docker-compose -f docker-compose.multinode.yml ps
```
### 2. Access the System
| Component | URL | Purpose |
|-----------|-----|---------|
| **Frontend** | http://localhost:3000 | Voting interface |
| **Load Balancer** | http://localhost:8000 | Routes to all backend nodes |
| **Node 1 Direct** | http://localhost:8001 | Direct access to node 1 |
| **Node 2 Direct** | http://localhost:8002 | Direct access to node 2 |
| **Node 3 Direct** | http://localhost:8003 | Direct access to node 3 |
| **API Docs** | http://localhost:8000/docs | API documentation |
| **Database UI** | http://localhost:8081 | Database management (Adminer) |
## How It Works
### Load Balancing
Nginx distributes requests using **round-robin** algorithm:
- Request 1 → Node 1 (Port 8001)
- Request 2 → Node 2 (Port 8002)
- Request 3 → Node 3 (Port 8003)
- Request 4 → Node 1 (Port 8001) [cycle repeats]
### Blockchain Synchronization
All nodes share a **single MariaDB database**, so:
- ✓ Any node can read/write blockchain blocks
- ✓ All nodes see the same blockchain state
- ✓ Transactions are immediately visible across all nodes
- ✓ Verification uses the shared, canonical blockchain
### Node Failure Tolerance
If one node goes down:
```bash
# Node 2 dies
docker-compose -f docker-compose.multinode.yml stop backend-node-2
# Nginx automatically routes requests to Node 1 & 3
# System continues operating normally
```
## Advanced Configuration
### Change Number of Nodes
Edit `docker-compose.multinode.yml`:
```yaml
# Add Node 4 (Port 8004)
backend-node-4:
# ... (copy backend-node-3 config)
container_name: evoting_backend_node4
environment:
NODE_ID: node4
NODE_PORT: 8004
ports:
- "8004:8000"
volumes:
- backend_cache_4:/app/.cache
```
Update `docker/nginx.conf`:
```nginx
upstream backend_nodes {
server backend-node-1:8000 weight=1;
server backend-node-2:8000 weight=1;
server backend-node-3:8000 weight=1;
server backend-node-4:8000 weight=1; # Add this line
}
```
### Weighted Load Balancing
To give more traffic to certain nodes:
```nginx
upstream backend_nodes {
server backend-node-1:8000 weight=2; # 2x more traffic
server backend-node-2:8000 weight=1;
server backend-node-3:8000 weight=1;
}
```
### Sticky Sessions (Session Affinity)
If needed, route same client to same node:
```nginx
upstream backend_nodes {
ip_hash; # Same client IP → same node
server backend-node-1:8000;
server backend-node-2:8000;
server backend-node-3:8000;
}
```
## Testing Multi-Node Setup
### 1. Submit Votes to Different Nodes
```bash
# Vote through load balancer
curl -X POST http://localhost:8000/api/votes/submit \
-H "Content-Type: application/json" \
-d '{"election_id": 1, "encrypted_vote": "..."}'
# Vote directly to Node 1
curl -X POST http://localhost:8001/api/votes/submit \
-H "Content-Type: application/json" \
-d '{"election_id": 1, "encrypted_vote": "..."}'
# Vote directly to Node 2
curl -X POST http://localhost:8002/api/votes/submit \
-H "Content-Type: application/json" \
-d '{"election_id": 1, "encrypted_vote": "..."}'
```
### 2. Verify Blockchain Consistency
All nodes should show the same blockchain:
```bash
# Check Node 1 blockchain
curl http://localhost:8001/api/votes/blockchain?election_id=1
# Check Node 2 blockchain
curl http://localhost:8002/api/votes/blockchain?election_id=1
# Check Node 3 blockchain
curl http://localhost:8003/api/votes/blockchain?election_id=1
# All responses should be identical
```
### 3. Test Node Failure
```bash
# Stop Node 2
docker-compose -f docker-compose.multinode.yml stop backend-node-2
# Frontend still works - requests route to Node 1 & 3
curl http://localhost:8000/health # Should still work
# Restart Node 2
docker-compose -f docker-compose.multinode.yml start backend-node-2
# Node automatically syncs with database
```
### 4. Monitor Node Activity
```bash
# Watch logs from all nodes
docker-compose -f docker-compose.multinode.yml logs -f
# Watch specific node
docker-compose -f docker-compose.multinode.yml logs -f backend-node-1
# Watch load balancer
docker-compose -f docker-compose.multinode.yml logs -f nginx
```
## Monitoring & Debugging
### Check Node Status
```bash
# See which nodes are running
docker-compose -f docker-compose.multinode.yml ps
# Output:
# NAME STATUS
# evoting_backend_node1 Up (healthy)
# evoting_backend_node2 Up (healthy)
# evoting_backend_node3 Up (healthy)
# evoting_nginx Up (healthy)
```
### View Load Balancer Distribution
```bash
# Check Nginx upstream status
docker-compose -f docker-compose.multinode.yml exec nginx \
curl -s http://localhost:8000/health
# Check individual nodes
for port in 8001 8002 8003; do
echo "=== Node on port $port ==="
curl -s http://localhost:$port/health
done
```
### Database Connection Verification
```bash
# Verify all nodes can connect to database
docker-compose -f docker-compose.multinode.yml exec backend-node-1 \
curl -s http://localhost:8000/health | jq '.database'
```
## Switching Between Setups
### Single-Node Mode
```bash
# Stop multi-node
docker-compose -f docker-compose.multinode.yml down
# Start single-node
docker-compose up -d
```
### Multi-Node Mode
```bash
# Stop single-node
docker-compose down
# Start multi-node
docker-compose -f docker-compose.multinode.yml up -d
```
## Performance Metrics
### Single-Node
- **Throughput**: ~100 votes/second
- **Response Time**: ~50ms average
- **Single Point of Failure**: YES
### Multi-Node (3 Nodes)
- **Throughput**: ~300 votes/second (3x)
- **Response Time**: ~50ms average (Nginx adds negligible latency)
- **Fault Tolerance**: YES (2 nodes can fail, 1 still operates)
- **Load Distribution**: Balanced across 3 nodes
## Scaling to More Nodes
To scale beyond 3 nodes:
1. **Add node configs** in `docker-compose.multinode.yml`
2. **Update Nginx upstream** in `docker/nginx.conf`
3. **Restart system**: `docker-compose -f docker-compose.multinode.yml restart`
**Recommended cluster sizes:**
- **Development**: 1-3 nodes
- **Staging**: 3-5 nodes
- **Production**: 5-7 nodes (byzantine fault tolerance)
## Troubleshooting
### Nodes Not Communicating
```bash
# Check network connectivity
docker-compose -f docker-compose.multinode.yml exec backend-node-1 \
ping backend-node-2
# Check DNS resolution
docker-compose -f docker-compose.multinode.yml exec backend-node-1 \
nslookup backend-node-2
```
### Load Balancer Not Routing
```bash
# Check Nginx status
docker-compose -f docker-compose.multinode.yml logs nginx
# Verify Nginx upstream configuration
docker-compose -f docker-compose.multinode.yml exec nginx \
cat /etc/nginx/nginx.conf
```
### Database Sync Issues
```bash
# Check database connection from each node
docker-compose -f docker-compose.multinode.yml exec backend-node-1 \
curl http://localhost:8000/health
# View database logs
docker-compose -f docker-compose.multinode.yml logs mariadb
```
## Security Considerations
1. **Network Isolation**: All nodes on same Docker network (172.25.0.0/16)
2. **Database Access**: Only nodes and adminer can access MariaDB
3. **Load Balancer**: Nginx handles external requests
4. **No Inter-Node Communication**: Nodes don't talk to each other (DB is single source of truth)
## Production Deployment
For production, consider:
1. **Database Replication**: Multiple MariaDB instances with replication
2. **Distributed Consensus**: Add Byzantine Fault Tolerance (BFT) algorithm
3. **Blockchain Sync Service**: Dedicated service to sync nodes
4. **Monitoring**: Prometheus + Grafana for metrics
5. **Logging**: Centralized logging (ELK stack)
6. **SSL/TLS**: Encrypted communication between services
## Quick Commands Reference
```bash
# Start multi-node system
docker-compose -f docker-compose.multinode.yml up -d
# Check status
docker-compose -f docker-compose.multinode.yml ps
# View all logs
docker-compose -f docker-compose.multinode.yml logs -f
# Stop all services
docker-compose -f docker-compose.multinode.yml down
# Scale to 5 nodes
# (Edit docker-compose.multinode.yml, then restart)
# Test load distribution
for i in {1..9}; do
curl -s http://localhost:8000/health | jq '.node_id' 2>/dev/null || echo "Request routed"
done
```
---
## Questions?
Refer to the main documentation:
- **Single-Node Setup**: See `DOCKER_SETUP.md`
- **Architecture**: See `README.md`
- **Blockchain Details**: See `backend/blockchain.py`

View File

@ -0,0 +1,227 @@
version: '3.8'
services:
# ================================================================
# MariaDB Database (Shared)
# ================================================================
mariadb:
image: mariadb:latest
container_name: evoting_db
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD:-rootpass123}
MYSQL_DATABASE: ${DB_NAME:-evoting_db}
MYSQL_USER: ${DB_USER:-evoting_user}
MYSQL_PASSWORD: ${DB_PASSWORD:-evoting_pass123}
MYSQL_INITDB_SKIP_TZINFO: 1
ports:
- "${DB_PORT:-3306}:3306"
volumes:
- evoting_data:/var/lib/mysql
- ./docker/init.sql:/docker-entrypoint-initdb.d/01-init.sql
- ./docker/populate_past_elections.sql:/docker-entrypoint-initdb.d/02-populate.sql
networks:
- evoting_network
healthcheck:
test: ["CMD", "mariadb-admin", "ping", "-h", "localhost", "--silent"]
timeout: 20s
retries: 10
start_period: 40s
# ================================================================
# Backend Node 1 (Port 8001)
# ================================================================
backend-node-1:
build:
context: .
dockerfile: docker/Dockerfile.backend
container_name: evoting_backend_node1
restart: unless-stopped
environment:
DB_HOST: mariadb
DB_PORT: 3306
DB_NAME: ${DB_NAME:-evoting_db}
DB_USER: ${DB_USER:-evoting_user}
DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123}
SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production}
DEBUG: ${DEBUG:-false}
PYTHONUNBUFFERED: 1
NODE_ID: node1
NODE_PORT: 8001
ports:
- "8001:8000"
depends_on:
mariadb:
condition: service_healthy
volumes:
- ./backend:/app/backend
- backend_cache_1:/app/.cache
networks:
- evoting_network
command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# ================================================================
# Backend Node 2 (Port 8002)
# ================================================================
backend-node-2:
build:
context: .
dockerfile: docker/Dockerfile.backend
container_name: evoting_backend_node2
restart: unless-stopped
environment:
DB_HOST: mariadb
DB_PORT: 3306
DB_NAME: ${DB_NAME:-evoting_db}
DB_USER: ${DB_USER:-evoting_user}
DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123}
SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production}
DEBUG: ${DEBUG:-false}
PYTHONUNBUFFERED: 1
NODE_ID: node2
NODE_PORT: 8002
ports:
- "8002:8000"
depends_on:
mariadb:
condition: service_healthy
volumes:
- ./backend:/app/backend
- backend_cache_2:/app/.cache
networks:
- evoting_network
command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# ================================================================
# Backend Node 3 (Port 8003)
# ================================================================
backend-node-3:
build:
context: .
dockerfile: docker/Dockerfile.backend
container_name: evoting_backend_node3
restart: unless-stopped
environment:
DB_HOST: mariadb
DB_PORT: 3306
DB_NAME: ${DB_NAME:-evoting_db}
DB_USER: ${DB_USER:-evoting_user}
DB_PASSWORD: ${DB_PASSWORD:-evoting_pass123}
SECRET_KEY: ${SECRET_KEY:-your-secret-key-change-in-production}
DEBUG: ${DEBUG:-false}
PYTHONUNBUFFERED: 1
NODE_ID: node3
NODE_PORT: 8003
ports:
- "8003:8000"
depends_on:
mariadb:
condition: service_healthy
volumes:
- ./backend:/app/backend
- backend_cache_3:/app/.cache
networks:
- evoting_network
command: uvicorn backend.main:app --host 0.0.0.0 --port 8000 --reload
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# ================================================================
# Nginx Load Balancer (Reverse Proxy)
# Routes to all backend nodes on port 8000
# ================================================================
nginx:
image: nginx:latest
container_name: evoting_nginx
restart: unless-stopped
ports:
- "8000:8000"
volumes:
- ./docker/nginx.conf:/etc/nginx/nginx.conf:ro
depends_on:
- backend-node-1
- backend-node-2
- backend-node-3
networks:
- evoting_network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
# ================================================================
# Frontend Next.js Service
# ================================================================
frontend:
build:
context: .
dockerfile: docker/Dockerfile.frontend
args:
NEXT_PUBLIC_API_URL: http://localhost:8000
container_name: evoting_frontend
restart: unless-stopped
ports:
- "${FRONTEND_PORT:-3000}:3000"
depends_on:
- nginx
environment:
NEXT_PUBLIC_API_URL: http://localhost:8000
NODE_ENV: production
networks:
- evoting_network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3000/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# ================================================================
# Adminer (Database Management UI)
# ================================================================
adminer:
image: adminer:latest
container_name: evoting_adminer
restart: unless-stopped
ports:
- "8081:8080"
depends_on:
- mariadb
networks:
- evoting_network
environment:
ADMINER_DEFAULT_SERVER: mariadb
volumes:
evoting_data:
driver: local
backend_cache_1:
driver: local
backend_cache_2:
driver: local
backend_cache_3:
driver: local
networks:
evoting_network:
driver: bridge
ipam:
config:
- subnet: 172.25.0.0/16

View File

@ -0,0 +1,60 @@
events {
worker_connections 1024;
}
http {
upstream backend_nodes {
# Round-robin load balancing across all backend nodes
server backend-node-1:8000 weight=1;
server backend-node-2:8000 weight=1;
server backend-node-3:8000 weight=1;
}
server {
listen 8000;
server_name localhost;
# Health check endpoint (direct response)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# Proxy all other requests to backend nodes
location / {
proxy_pass http://backend_nodes;
proxy_http_version 1.1;
# Header handling
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Connection settings
proxy_set_header Connection "";
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# Buffering
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
}
# API documentation
location /docs {
proxy_pass http://backend_nodes;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
location /openapi.json {
proxy_pass http://backend_nodes;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
}

View File

@ -32,7 +32,7 @@ export default function RegisterPage() {
setSuccess(false) setSuccess(false)
try { try {
await registerUser(data.email, data.password, data.firstName, data.lastName) await registerUser(data.email, data.password, data.firstName, data.lastName, data.citizenId)
setSuccess(true) setSuccess(true)
setTimeout(() => { setTimeout(() => {
router.push("/dashboard") router.push("/dashboard")
@ -149,6 +149,20 @@ export default function RegisterPage() {
)} )}
</div> </div>
<div className="space-y-2">
<Label htmlFor="citizenId">Numéro de Citoyen (CNI/ID)</Label>
<Input
id="citizenId"
placeholder="Ex: 12345ABC678"
{...register("citizenId")}
disabled={isLoading || isSubmitting}
className={errors.citizenId ? "border-destructive" : ""}
/>
{errors.citizenId && (
<p className="text-sm text-destructive">{errors.citizenId.message}</p>
)}
</div>
<div className="space-y-2"> <div className="space-y-2">
<Label htmlFor="password">Mot de passe</Label> <Label htmlFor="password">Mot de passe</Label>
<div className="relative"> <div className="relative">

View File

@ -141,7 +141,7 @@ async function apiRequest<T>(
* Authentication APIs * Authentication APIs
*/ */
export const authApi = { export const authApi = {
async register(email: string, password: string, firstName: string, lastName: string) { async register(email: string, password: string, firstName: string, lastName: string, citizenId: string) {
return apiRequest<AuthToken>("/api/auth/register", { return apiRequest<AuthToken>("/api/auth/register", {
method: "POST", method: "POST",
skipAuth: true, skipAuth: true,
@ -150,6 +150,7 @@ export const authApi = {
password, password,
first_name: firstName, first_name: firstName,
last_name: lastName, last_name: lastName,
citizen_id: citizenId,
}), }),
}) })
}, },

View File

@ -14,7 +14,7 @@ interface AuthContextType {
isAuthenticated: boolean isAuthenticated: boolean
error: string | null error: string | null
login: (email: string, password: string) => Promise<void> login: (email: string, password: string) => Promise<void>
register: (email: string, password: string, firstName: string, lastName: string) => Promise<void> register: (email: string, password: string, firstName: string, lastName: string, citizenId: string) => Promise<void>
logout: () => void logout: () => void
refreshProfile: () => Promise<void> refreshProfile: () => Promise<void>
} }
@ -75,11 +75,11 @@ export function AuthProvider({ children }: { children: ReactNode }) {
} }
} }
const register = async (email: string, password: string, firstName: string, lastName: string) => { const register = async (email: string, password: string, firstName: string, lastName: string, citizenId: string) => {
setIsLoading(true) setIsLoading(true)
setError(null) setError(null)
try { try {
const response = await authApi.register(email, password, firstName, lastName) const response = await authApi.register(email, password, firstName, lastName, citizenId)
if (response.error) { if (response.error) {
throw new Error(response.error) throw new Error(response.error)
} }

View File

@ -39,6 +39,11 @@ export const registerSchema = z.object({
.string() .string()
.email("Adresse email invalide") .email("Adresse email invalide")
.min(1, "Email requis"), .min(1, "Email requis"),
citizenId: z
.string()
.min(1, "Numéro de citoyen requis")
.min(5, "Le numéro de citoyen doit contenir au moins 5 caractères")
.max(20, "Le numéro de citoyen ne doit pas dépasser 20 caractères"),
password: z password: z
.string() .string()
.min(8, "Le mot de passe doit contenir au moins 8 caractères") .min(8, "Le mot de passe doit contenir au moins 8 caractères")