Compare commits

..

No commits in common. "main" and "rust" have entirely different histories.
main ... rust

76 changed files with 97 additions and 5976 deletions

View File

@ -31,5 +31,5 @@ jobs:
file: ./Dockerfile file: ./Dockerfile
push: true push: true
tags: git.kanopo.dev/rhythm/rhythm-backend:latest tags: git.kanopo.dev/rhythm/rhythm-backend:latest
cache-from: type=registry,ref=git.kanopo.dev/rhythm/rhythm-backend:buildcache cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=registry,ref=git.kanopo.dev/rhythm/rhythm-backend:buildcache,mode=max cache-to: type=local,dest=/tmp/.buildx-cache,mode=max

4
.gitignore vendored
View File

@ -1,4 +1,4 @@
.env .env
target /target
logs /logs
postgres-data postgres-data

View File

@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "update refresh_tokens set revoked_at = now() where user_id = $1 and revoked_at is null",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": []
},
"hash": "011508dbe03cb96438e135c460895932bc47b4055ee9329625fba56a73c55f29"
}

View File

@ -1,52 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "select * from refresh_tokens where token_hash = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "user_id",
"type_info": "Uuid"
},
{
"ordinal": 2,
"name": "token_hash",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "expires_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "revoked_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false,
true
]
},
"hash": "164c44b8f2c7c59eeaf4402be845f5788cc0e720ac0799a5457a893b2a070dda"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "select * from users where email=$1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "email",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "password",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "25b92f656255a863ed7649125d60bd1e309bd64204925cf73f245c8ce63b27b7"
}

View File

@ -1,54 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "insert into refresh_tokens (user_id, token_hash, expires_at) values ($1, $2, $3) returning *",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "user_id",
"type_info": "Uuid"
},
{
"ordinal": 2,
"name": "token_hash",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "expires_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "revoked_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid",
"Varchar",
"Timestamptz"
]
},
"nullable": [
false,
false,
false,
false,
false,
true
]
},
"hash": "784522f0b0c26f78d25290e4cdb8058e68866905cd194b3f485d7563acba884c"
}

View File

@ -1,47 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "insert into organizations (name, slug) values ($1, $2) returning *",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "slug",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "918e7e43a258341fc3380bc26bb354610a9854ddbf318281446770502b5a0183"
}

View File

@ -1,47 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "insert into users (email, password) values ($1, $2) returning *",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "email",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "password",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "a145c5eb33f466fb3112e6feaf0e1ef56735331624010eda1703f0169271577c"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "select * from users where id=$1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "email",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "password",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "af5f9eb11f896d65c13da7697dc8c18a2399aa845af63214e39040958e3e5ec4"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "select * from organizations where id = any($1)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "slug",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"UuidArray"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "bcd0b53a358d5a167a269bee7726acbb7f878614fd3e460f18efdac41106a3a2"
}

View File

@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "update refresh_tokens set revoked_at = now() where id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": []
},
"hash": "cba04640f6d764123bfb0a857498001bdf0ed3c9fe66e410ccd2ef66d5e2b35a"
}

View File

@ -15,7 +15,7 @@
- **DB Access:** sqlx (compile-time checked queries, hand-written repositories) - **DB Access:** sqlx (compile-time checked queries, hand-written repositories)
- **PostgreSQL Driver/Pool:** sqlx::PgPool - **PostgreSQL Driver/Pool:** sqlx::PgPool
- **Logging:** tracing + tracing-subscriber + tracing-tree (structured, hierarchical) - **Logging:** tracing + tracing-subscriber + tracing-tree (structured, hierarchical)
- **Auth:** jsonwebtoken (HS256 access tokens, 15min) + opaque refresh tokens (7d, stored hashed in DB via SHA-256) + argon2 (password hashing) - **Auth:** jsonwebtoken (HS256) + argon2 (password hashing)
- **API Docs:** utoipa (code-first OpenAPI generation) - **API Docs:** utoipa (code-first OpenAPI generation)
- **Health Probes:** liveness/readiness endpoints - **Health Probes:** liveness/readiness endpoints
- **Deployment:** Docker Compose on self-hosted hardware - **Deployment:** Docker Compose on self-hosted hardware
@ -25,7 +25,7 @@
## Decisions Made ## Decisions Made
1. **JWT signing:** HS256 (shared secret from env, can migrate to RS256 later) 1. **JWT signing:** HS256 (shared secret from env, can migrate to RS256 later)
2. **Token model:** Access = JWT (HS256, 15min, stateless verification). Refresh = opaque UUID v4 string, stored SHA-256 hashed in DB (7 days, revocable). Logout deletes refresh token from DB; access token still valid until expiry. 2. **Token model:** Access + Refresh (short-lived access ~15min, long-lived refresh ~7d, refresh tokens stored hashed in DB)
3. **Roles:** Project-level roles (`admin`, `developer`, `reporter`) + Org-level roles (`owner`, `admin`, `member`) 3. **Roles:** Project-level roles (`admin`, `developer`, `reporter`) + Org-level roles (`owner`, `admin`, `member`)
4. **OpenAPI workflow:** Code-first with utoipa (auto-generate spec from Rust handlers/models) 4. **OpenAPI workflow:** Code-first with utoipa (auto-generate spec from Rust handlers/models)
5. **DB access:** sqlx with hand-written repositories (compile-time checked, no ORM) 5. **DB access:** sqlx with hand-written repositories (compile-time checked, no ORM)
@ -44,37 +44,64 @@
``` ```
src/ src/
├── main.rs # Entrypoint, server, graceful shutdown ├── main.rs # Entrypoint, server, graceful shutdown
├── config.rs # Config (db_url, jwt_secret, socket_address, app_env) ├── config.rs # Config (exists, extend)
├── database.rs # PgPool setup, migration runner ├── logging.rs # Logging (exists)
├── logging.rs # tracing config (dev: pretty console, prod: JSON + file) ├── errors.rs # Unified error types → Axum responses
├── errors.rs # AppError enum → IntoResponse (BadRequest/Unauthorized/Internal) ├── state.rs # AppState (PgPool, config, etc.)
├── state.rs # AppState (PgPool, jwt_secret) ├── routes.rs # Router composition (/api/v1/...)
├── server.rs # Axum server init + graceful shutdown
├── auth/ ├── auth/
│ ├── mod.rs │ ├── jwt.rs # HS256 token creation/validation
│ ├── jwt.rs # HS256 JWT creation/validation (access tokens, 15min) │ ├── hash.rs # argon2 password hashing
│ ├── hash.rs # argon2 password hashing/verification │ ├── handlers.rs # register, login, refresh
│ └── token.rs # Refresh token generation (UUID v4) + SHA-256 hashing for DB storage │ ├── models.rs # auth DTOs
├── db/ │ └── service.rs # auth business logic
│ ├── mod.rs
│ ├── user_repo.rs # find_by_email, create
│ └── token_repo.rs # store, find_by_hash, delete_by_hash, delete_all_for_user
├── service/
│ ├── mod.rs
│ └── auth_service.rs # login, register, refresh, logout business logic
├── controller/
│ ├── mod.rs # Router composition (/api/v1/...)
│ ├── model/
│ │ ├── mod.rs
│ │ └── auth_model.rs # LoginRequest, RegisterRequest, AuthResponse DTOs
│ └── v1/
│ ├── mod.rs # v1 router (nests /auth)
│ └── auth_controller.rs # POST /login, /register, /refresh, /logout handlers
├── middleware/ ├── middleware/
│ ├── auth.rs # JWT extraction from Authorization header, inject CurrentUser │ ├── auth.rs # JWT extraction layer
│ └── rbac.rs # Project-level role guard │ └── rbac.rs # Project-level role guard
├── models/ # (future: org, project, issue, etc.) ├── models/
└── handlers/ # (future: orgs, projects, issues, etc.) │ ├── user.rs
│ ├── org.rs
│ ├── project.rs
│ ├── issue.rs
│ ├── comment.rs
│ ├── tag.rs
│ ├── sprint.rs
│ ├── stage.rs
│ ├── time_entry.rs
│ └── role.rs # OrgRole, ProjectRole enums
├── handlers/
│ ├── health.rs
│ ├── orgs.rs
│ ├── projects.rs
│ ├── issues.rs
│ ├── comments.rs
│ ├── tags.rs
│ ├── sprints.rs
│ ├── stages.rs
│ └── time_entries.rs
├── services/
│ ├── org.rs
│ ├── project.rs
│ ├── issue.rs
│ ├── comment.rs
│ ├── tag.rs
│ ├── sprint.rs
│ ├── stage.rs
│ └── time_entry.rs
└── db/
├── mod.rs # Pool setup, migration runner
└── repos/
├── users.rs
├── orgs.rs
├── projects.rs
├── issues.rs
├── comments.rs
├── tags.rs
├── sprints.rs
├── stages.rs
├── time_entries.rs
├── memberships.rs
└── refresh_tokens.rs
migrations/ migrations/
├── 001_create_users.sql ├── 001_create_users.sql
├── 002_create_organizations.sql ├── 002_create_organizations.sql
@ -463,36 +490,23 @@ CREATE INDEX idx_comments_issue ON comments(issue_id);
5. Wire up Axum router with `routes()`, shared state, and graceful shutdown via `tokio::signal` 5. Wire up Axum router with `routes()`, shared state, and graceful shutdown via `tokio::signal`
6. Create first migration: `users` table 6. Create first migration: `users` table
### Phase 2: Auth (Hybrid JWT Access + DB-stored Refresh Tokens) ### Phase 2: Auth (JWT + Register/Login/Refresh)
**Token model:** Access tokens are signed JWTs (15min, no DB lookup). Refresh tokens are opaque strings stored hashed in DB (7 days, revocable). **Additional dependencies:** `jsonwebtoken`, `argon2`, `validator`
**Additional dependencies:** `jsonwebtoken`, `argon2`, `sha2`, `uuid` (v4), `chrono`
**Current progress:**
- ✅ Config, AppState (PgPool), DB migrations runner, router setup, users migration
- ✅ Error types (`AppError` with `IntoResponse` — BadRequest/Unauthorized/Internal)
- ✅ Auth controller routes wired (`POST /api/v1/auth/login`, `/register`)
- ✅ Auth DTOs (`LoginRequest`, `RegisterRequest`, `AuthResponse`) in `controller/model/auth_model.rs`
- ✅ Service stubs (`auth_service.rs``login()`, `register()` with `todo!()`)
- ⚠️ Handler parameter order needs fix (State before Json)
**Tasks:** **Tasks:**
1. ~~Create `users` migration~~ (done — `0001_create_users_table.sql`) 1. Create `users` migration (if not done in Phase 1)
2. Fix handler parameter order — `State` before `Json` in `auth_controller.rs` 2. Create `refresh_tokens` migration
3. Create `tokens` migration — `0002_create_tokens_table.sql` (id, user_id, token_hash, expires_at, created_at) 3. Implement `auth::hash` — argon2 password hashing/verification
4. Add dependencies to `Cargo.toml``argon2`, `jsonwebtoken`, `sha2`, `uuid` (v4), `chrono` 4. Implement `auth::jwt` — HS256 access + refresh token creation/validation
5. Create `src/db/` module — `user_repo.rs` (find_by_email, create) + `token_repo.rs` (store, find_by_hash, delete_by_hash, delete_all_for_user) 5. Implement `db::repos::users` — create, get by email, get by id
6. Create `src/auth/hash.rs` — argon2 password hashing (`hash_password`, `verify_password`) 6. Implement `db::repos::refresh_tokens` — store, find, delete
7. Create `src/auth/jwt.rs` — HS256 JWT access token creation/validation with 15min expiry (`create_access_token`, `verify_access_token`, `Claims` struct) 7. Implement `auth::service` — register, login, refresh logic
8. Create `src/auth/token.rs` — generate random refresh token (UUID v4), SHA-256 hash for DB storage 8. Implement `auth::handlers``POST /api/v1/auth/register`, `/login`, `/refresh`
9. Add `jwt_secret` to `AppState` / `Config` (loaded from `JWT_SECRET` env var) 9. Implement `auth::models` — request/response DTOs with `validator` checks
10. Implement `auth_service.rs` — login (find user → verify password → create JWT access token + generate/store refresh token) and register (check email → hash password → create user → create tokens) 10. Create `errors.rs` — unified `AppError` enum → `IntoResponse`
11. Add input validation — email/password not empty → `AppError::Validation` 11. Create `middleware::auth` — JWT extraction, inject `CurrentUser`
12. Create `middleware::auth` — JWT extraction from `Authorization: Bearer <token>`, inject `CurrentUser` 12. Protect routes with auth layer
13. Add `POST /api/v1/auth/refresh` — accepts refresh token → hash → look up in DB → if valid, delete old + create new token pair
14. Add `POST /api/v1/auth/logout` — accepts refresh token → hash → delete from DB
15. Protect routes with auth layer
### Phase 3: RBAC Layer ### Phase 3: RBAC Layer

3743
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -9,29 +9,3 @@ tracing = "0.1.44"
tracing-appender = "0.2.5" tracing-appender = "0.2.5"
tracing-subscriber = {version="0.3.23", features = ["env-filter", "json"]} tracing-subscriber = {version="0.3.23", features = ["env-filter", "json"]}
tracing-tree = "0.4.1" tracing-tree = "0.4.1"
tokio = { version = "1.52.1", features = ["rt-multi-thread", "macros", "signal"] }
sqlx = { version = "0.8", features = [ "runtime-tokio", "postgres", "chrono", "uuid" ] }
axum = "0.8.9"
thiserror = "2"
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
argon2 = "0.5.3"
jsonwebtoken = { version = "10.3.0", features = ["rand", "rust_crypto"] }
chrono = { version = "0.4.44", features = ["serde"] }
uuid = { version = "1.23.1", features = ["serde", "v4"] }
rand = "0.10.1"
sha2 = "0.11.0"
hex = "0.4.3"
tower-cookies = "0.11.0"
tower-http = { version = "0.6.8", features = ["trace"] }
time = "0.3.47"
tower = "0.5.3"
futures-util = "0.3.32"
dashmap = "6.1.0"
zxcvbn = "3.1.1"
validator = { version = "0.20.0", features = ["derive"] }
[dev-dependencies]
testcontainers = "0.23.1"
testcontainers-modules = { version = "0.11.4", features = ["postgres"] }
reqwest = { version = "0.12", features = ["json", "cookies"] }

View File

@ -1,20 +1,14 @@
FROM rust:1.95.0-alpine3.22 AS builder FROM rust:1.95.0-alpine3.22 AS builder
WORKDIR /app WORKDIR /app
# Cache dependencies by building a dummy project first ARG DB_URL
COPY Cargo.toml Cargo.lock ./ ARG APP_ENV
RUN mkdir src && echo "fn main() {}" > src/main.rs
RUN cargo build --release
RUN rm -rf src
# Copy real source code and build
COPY . . COPY . .
# Touch the main file to ensure cargo sees it as newer than the dummy build
RUN touch src/main.rs
RUN cargo build --release RUN cargo build --release
# Small runtime image # Small runtime image
FROM alpine:3.22 FROM alpine:3.22.4
WORKDIR /app WORKDIR /app
COPY --from=builder /app/target/release/rhythm-backend /app/executable COPY --from=builder /app/target/release/rhythm-backend /app/executable

View File

@ -1,13 +0,0 @@
.PHONY: prepare build run clean
prepare:
cargo sqlx prepare
build: prepare
cargo build
run: build
cargo run
clean:
rm -dfR target

View File

@ -1,66 +0,0 @@
# Rhythm Backend API Documentation
## Authentication System Overview
The authentication system is built with a security-first approach, featuring multi-layered protection against common web vulnerabilities.
### Security Layers (Middleware)
1. **Rate Limiting (Anti-Spam Bucket)**
- **Mechanism:** Token Bucket (in-memory `DashMap`).
- **Logic:** Identifies users via the `X-Client-IP` header (trusted from proxy).
- **Config:** 5 attempts per minute, refilling 1 token every 12 seconds.
- **Response:** `429 Too Many Requests`.
2. **Anti-Enumeration (Timing Protection)**
- **Mechanism:** Variable response delay.
- **Logic:** Ensures every authentication request takes between 150ms and 300ms.
- **Purpose:** Hides whether an account exists or a password was correct from timing analysis.
### Current API Endpoints
#### `POST /api/v1/auth/register`
Registers a new user.
- **Payload:** `RegisterRequest { email, password }`
- **Response:** `200 OK` with `AuthResponse { access_token }`
- **Side Effect:** Sets an `HttpOnly`, `Secure`, `SameSite=Strict` cookie named `refresh_token`.
#### `POST /api/v1/auth/login`
Authenticates a user.
- **Payload:** `LoginRequest { email, password }`
- **Response:** `200 OK` with `AuthResponse { access_token }`
- **Side Effect:** Sets a new `refresh_token` cookie.
#### `POST /api/v1/auth/refresh`
Rotates tokens for an active session.
- **Requirement:** Valid `refresh_token` cookie.
- **Response:** `200 OK` with new `access_token`.
- **Rotation Logic:** Revokes the old refresh token and issues a completely new one (Rotation) to prevent session hijacking.
#### `POST /api/v1/auth/logout`
Invalidates the current session.
- **Requirement:** Valid `refresh_token` cookie.
- **Response:** `200 OK`.
- **Logic:** Revokes the refresh token in the database and clears the HttpOnly cookie.
---
## Security Features Detail
### 1. Rate Limiting (Anti-Spam)
Protects against brute-force and DoS attacks by limiting requests per IP address. Uses an in-memory Token Bucket algorithm.
### 2. Anti-Enumeration (Timing Protection)
Ensures that the time taken to process an auth request is independent of the result (e.g., whether a user exists or not). This prevents attackers from using timing differences to discover valid emails.
### 3. Password Strength (zxcvbn)
Uses Dropbox's `zxcvbn` algorithm to estimate password entropy. Registration requires a score of at least 3/4.
### 4. Refresh Token Rotation
Every time a refresh token is used to get a new access token, the old refresh token is invalidated and a new one is issued. This limits the window of opportunity if a refresh token is leaked.
test cache docker build and gitea

View File

@ -1,35 +0,0 @@
services:
api-prod:
image: git.kanopo.dev/rhythm/rhythm-backend:latest
container_name: rhythm-api-prod
restart: unless-stopped
ports:
- "6969:6969"
env_file:
- ".env"
depends_on:
db-prod:
condition: service_healthy
profiles:
- prod
db-prod:
image: postgres:18.0-alpine
restart: unless-stopped
container_name: rhythm-db-prod
environment:
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: ${DB_NAME}
volumes:
- db:/var/lib/postgresql/data
profiles:
- prod
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME} -d ${DB_NAME}"]
interval: 5s
timeout: 10s
retries: 3
volumes:
db:

View File

@ -1,14 +0,0 @@
DB_USERNAME=user
# openssl rand -base64 32 | wl-copy
DB_PASSWORD=password
DB_NAME=rhythm-dev
DB_PORT=5432
DB_HOST=localhost
DATABASE_URL=postgres://${DB_USERNAME}:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=public&sslmode=disable
# app env can be one of the following [prod, dev]
APP_ENV=dev
# APP_ENV=prod
SOCKET_ADDRESS=0.0.0.0:6969

View File

@ -1,15 +0,0 @@
info:
name: base ping health
type: http
seq: 2
http:
method: GET
url: "{{base_url}}/"
auth: inherit
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,8 +0,0 @@
name: test
variables:
- name: base_url
value: http://localhost:6969
- name: access_token
value: ""
- name: refresh_token
value: ""

View File

@ -1,37 +0,0 @@
info:
name: login
type: http
seq: 3
http:
method: POST
url: "{{base_url}}/api/v1/auth/login"
body:
type: json
data: |
{
"email": "a@a.it",
"password": "Password1!6969_"
}
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("login - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("login - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,28 +0,0 @@
info:
name: logout
type: http
seq: 6
http:
method: POST
url: "{{base_url}}/api/v1/protected/auth/logout"
auth:
type: bearer
token: "{{access_token}}"
runtime:
scripts:
- type: after-response
code: |-
const status = res.getStatus();
if (status === 200 || status === 204) {
bru.setEnvVar("access_token", "");
bru.setEnvVar("refresh_token", "");
console.log("logout - tokens cleared");
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,28 +0,0 @@
info:
name: logout all
type: http
seq: 7
http:
method: POST
url: "{{base_url}}/api/v1/protected/auth/logout-all"
auth:
type: bearer
token: "{{access_token}}"
runtime:
scripts:
- type: after-response
code: |-
const status = res.getStatus();
if (status === 200 || status === 204) {
bru.setEnvVar("access_token", "");
bru.setEnvVar("refresh_token", "");
console.log("logout_all - all sessions revoked, tokens cleared");
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,10 +0,0 @@
opencollection: 1.0.0
info:
name: rhythm
bundled: false
extensions:
bruno:
ignore:
- node_modules
- .git

View File

@ -1,30 +0,0 @@
info:
name: refresh
type: http
seq: 5
http:
method: POST
url: "{{base_url}}/api/v1/auth/refresh"
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("refresh - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("refresh - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,37 +0,0 @@
info:
name: register
type: http
seq: 1
http:
method: POST
url: "{{base_url}}/api/v1/auth/register"
body:
type: json
data: |
{
"email": "a@a.it",
"password": "Password1!6969_"
}
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("register - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("register - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,17 +0,0 @@
info:
name: test protected routes
type: http
seq: 4
http:
method: GET
url: "{{base_url}}/api/v1/protected/ping"
auth:
type: bearer
token: "{{access_token}}"
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -1,7 +0,0 @@
create table users (
id uuid primary key default uuidv4(),
email varchar(255) unique not null,
password varchar(255) not null,
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);

View File

@ -1,10 +0,0 @@
create table refresh_tokens (
id uuid primary key default uuidv4(),
user_id uuid not null references users(id) on delete cascade,
token_hash varchar(255) not null, -- SHA-256 hash of the plain token
expires_at timestamptz not null,
created_at timestamptz not null default now(),
revoked_at timestamptz,
constraint unique_token_hash unique (token_hash)
);

View File

@ -1,7 +0,0 @@
create table organizations (
id uuid primary key default uuidv4(),
name varchar(255) not null,
slug varchar(255) not null unique, -- acme-corp-a7x9
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);

View File

@ -1,13 +0,0 @@
CREATE TYPE org_role AS ENUM ('owner', 'admin', 'member', 'viewer');
CREATE TABLE org_memberships (
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
org_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
role org_role NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (user_id, org_id)
);

View File

@ -2,8 +2,6 @@ use std::env;
use dotenvy::dotenv; use dotenvy::dotenv;
use crate::errors::StartupError;
#[derive(Debug, Clone, Copy, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
pub enum AppEnv { pub enum AppEnv {
Development, Development,
@ -11,15 +9,12 @@ pub enum AppEnv {
} }
impl AppEnv { impl AppEnv {
pub fn from_env() -> Result<Self, StartupError> { pub fn from_env() -> Self {
match env::var("APP_ENV").as_deref() { match std::env::var("APP_ENV").as_deref() {
Ok("prod") => Ok(AppEnv::Production), Ok("prod") => AppEnv::Production,
Ok("dev") => Ok(AppEnv::Development), Ok("dev") => AppEnv::Development,
Ok(other) => Err(StartupError::InvalidConfig(format!( Ok(other) => panic!("Invalid APP_ENV: {}", other),
"Invalid APP_ENV: {}", Err(_) => panic!("APP_ENV must be set"),
other
))),
Err(_) => Err(StartupError::InvalidConfig("APP_ENV must be set".to_string())),
} }
} }
} }
@ -28,18 +23,14 @@ impl AppEnv {
pub struct Config { pub struct Config {
pub db_url: String, pub db_url: String,
pub app_env: AppEnv, pub app_env: AppEnv,
pub socket_address: String,
pub jwt_secret: String,
} }
impl Config { impl Config {
pub fn load() -> Result<Self, StartupError> { pub fn load() -> Self {
dotenv().ok(); dotenv().ok();
Ok(Self { Self {
db_url: env::var("DATABASE_URL")?, db_url: env::var("DB_URL").expect("DB_URL is not configured"),
socket_address: env::var("SOCKET_ADDRESS")?, app_env: AppEnv::from_env(),
app_env: AppEnv::from_env()?, }
jwt_secret: env::var("JWT_SECRET")?,
})
} }
} }

View File

@ -1,52 +0,0 @@
use axum::{
Json,
extract::{FromRequest, FromRequestParts, Request},
http::request::Parts,
};
use serde::de::DeserializeOwned;
use uuid::Uuid;
use validator::Validate;
use crate::errors::ApiError;
#[derive(Debug, Clone)]
pub struct ValidJson<T>(pub T);
impl<T, S> FromRequest<S> for ValidJson<T>
where
T: DeserializeOwned + Validate,
S: Send + Sync,
Json<T>: FromRequest<S, Rejection = axum::extract::rejection::JsonRejection>,
{
type Rejection = ApiError;
async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> {
let Json(value) = Json::<T>::from_request(req, state)
.await
.map_err(|e| ApiError::Validation(e.to_string()))?;
value
.validate()
.map_err(|e| ApiError::Validation(e.to_string()))?;
Ok(ValidJson(value))
}
}
pub struct CurrentUser(pub Uuid);
impl<S> FromRequestParts<S> for CurrentUser
where
S: Send + Sync,
{
type Rejection = ApiError;
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
let user_id = parts
.extensions
.get::<Uuid>()
.ok_or(ApiError::Unauthorized)?;
Ok(CurrentUser(*user_id))
}
}

View File

@ -1,19 +0,0 @@
use axum::{extract::Request, middleware::Next, response::Response};
use rand::RngExt;
use std::time::{Duration, Instant};
use tokio::time::sleep;
const MIN_DELAY_MS: u64 = 150;
const MAX_DELAY_MS: u64 = 500;
pub async fn random_delay_middleware(request: Request, next: Next) -> Response {
let start = Instant::now();
let target = Duration::from_millis(rand::rng().random_range(MIN_DELAY_MS..=MAX_DELAY_MS));
let response = next.run(request).await;
let elapsed = start.elapsed();
if elapsed < target {
sleep(target - elapsed).await;
}
response
}

View File

@ -1,25 +0,0 @@
use crate::{errors::ApiError, state::AppState, utils::jwt::verify_access_token};
use axum::{
extract::{Request, State},
middleware::Next,
response::Response,
};
pub async fn auth_middleware(
State(state): State<AppState>,
mut request: Request,
next: Next,
) -> Result<Response, ApiError> {
let auth_header = request
.headers()
.get(axum::http::header::AUTHORIZATION)
.and_then(|h| h.to_str().ok())
.ok_or(ApiError::Unauthorized)?;
if !auth_header.starts_with("Bearer ") {
return Err(ApiError::Unauthorized);
}
let token = &auth_header[7..];
let claims = verify_access_token(token, &state.jwt_secret)?;
// Inject the user ID into extensions for downstream handlers
request.extensions_mut().insert(claims.sub);
Ok(next.run(request).await)
}

View File

@ -1,3 +0,0 @@
pub mod anti_enumeration_middleware;
pub mod auth_middleware;
pub mod rate_limiting_middleware;

View File

@ -1,44 +0,0 @@
use crate::state::AppState;
use axum::{
extract::{ConnectInfo, Request, State},
http::StatusCode,
middleware::Next,
response::{IntoResponse, Response},
};
use std::net::SocketAddr;
pub async fn rate_limiting_middleware(
State(state): State<AppState>,
request: Request,
next: Next,
) -> Response {
// 1. Identify client by IP (x-client-ip header or socket address)
let client_ip = request
.headers()
.get("x-client-ip")
.and_then(|h| h.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_else(|| {
request
.extensions()
.get::<ConnectInfo<SocketAddr>>()
.map(|ci| ci.0.ip().to_string())
.unwrap_or_else(|| "unknown".to_string())
});
// 2. Retrieve or create a TokenBucket for this IP and try to drain 1 token
let has_tokens = {
let mut entry = state
.rate_limit
.entry(client_ip)
.or_insert_with(crate::state::TokenBucket::new);
entry.value_mut().try_drain()
};
// 3. If successful, proceed; else return 429 Too Many Requests
if has_tokens {
next.run(request).await
} else {
StatusCode::TOO_MANY_REQUESTS.into_response()
}
}

View File

@ -1,16 +0,0 @@
use axum::{Router, routing::get};
use tower_http::trace::TraceLayer;
use crate::state::AppState;
pub mod extractor;
mod middleware;
pub mod model;
mod v1;
pub fn router(state: AppState) -> Router<AppState> {
Router::new()
.route("/", get("Server is going brr 🚀"))
.nest("/api/v1", v1::router_v1(state))
.layer(TraceLayer::new_for_http())
}

View File

@ -1,23 +0,0 @@
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Deserialize, Validate)]
pub struct LoginRequest {
#[validate(email)]
pub email: String,
#[validate(length(min = 1))]
pub password: String,
}
#[derive(Deserialize, Validate)]
pub struct RegisterRequest {
#[validate(email)]
pub email: String,
#[validate(length(min = 8))]
pub password: String,
}
#[derive(Serialize)]
pub struct AuthResponse {
pub access_token: String,
}

View File

@ -1 +0,0 @@
pub mod auth_model;

View File

@ -1,47 +0,0 @@
use axum::extract::State;
use axum::middleware::{from_fn, from_fn_with_state};
use axum::{Json, Router, routing::post};
use tower_cookies::{CookieManagerLayer, Cookies};
use crate::{
controller::extractor::ValidJson,
controller::middleware::anti_enumeration_middleware::random_delay_middleware,
controller::middleware::rate_limiting_middleware::rate_limiting_middleware,
controller::model::auth_model::{AuthResponse, LoginRequest, RegisterRequest},
errors::ApiError,
service::auth_service::{login, refresh, register},
state::AppState,
};
pub fn auth_router(state: AppState) -> Router<AppState> {
Router::new()
.route("/login", post(login_handler))
.route("/register", post(register_handler))
.route("/refresh", post(refresh_handler))
.layer(from_fn(random_delay_middleware))
.layer(from_fn_with_state(state.clone(), rate_limiting_middleware))
.layer(CookieManagerLayer::new())
}
async fn login_handler(
State(s): State<AppState>,
cookies: Cookies,
ValidJson(payload): ValidJson<LoginRequest>,
) -> Result<Json<AuthResponse>, ApiError> {
login(&s, cookies, payload).await
}
async fn register_handler(
State(s): State<AppState>,
cookies: Cookies,
ValidJson(payload): ValidJson<RegisterRequest>,
) -> Result<Json<AuthResponse>, ApiError> {
register(&s, cookies, payload).await
}
async fn refresh_handler(
State(s): State<AppState>,
cookies: Cookies,
) -> Result<Json<AuthResponse>, ApiError> {
refresh(&s, cookies).await
}

View File

@ -1,24 +0,0 @@
use axum::{
Router,
middleware::from_fn_with_state,
};
use crate::{
controller::middleware::auth_middleware::auth_middleware,
state::AppState,
};
mod auth_controller;
mod protected;
pub fn router_v1(state: AppState) -> Router<AppState> {
let public_routes = Router::new().nest("/auth", auth_controller::auth_router(state.clone()));
let protected_routes = Router::new().nest(
"/protected",
protected::protected_router(state.clone())
.layer(from_fn_with_state(state, auth_middleware)),
);
Router::new().merge(public_routes).merge(protected_routes)
}

View File

@ -1,25 +0,0 @@
use axum::{Router, extract::State, routing::post};
use tower_cookies::{CookieManagerLayer, Cookies};
use crate::{
controller::extractor::CurrentUser, errors::ApiError, service::auth_service, state::AppState,
};
pub fn router() -> Router<AppState> {
Router::new()
.route("/logout", post(logout_handler))
.route("/logout-all", post(logout_all_handler))
.layer(CookieManagerLayer::new())
}
async fn logout_handler(State(s): State<AppState>, cookies: Cookies) -> Result<(), ApiError> {
auth_service::logout(&s, cookies).await
}
async fn logout_all_handler(
State(s): State<AppState>,
cookies: Cookies,
CurrentUser(user_id): CurrentUser,
) -> Result<(), ApiError> {
auth_service::logout_all(&s, cookies, user_id).await
}

View File

@ -1,11 +0,0 @@
use axum::Router;
use crate::state::AppState;
mod auth_protected_controller;
pub fn protected_router(_state: AppState) -> Router<AppState> {
Router::new()
.nest("/auth", auth_protected_controller::router())
.route("/ping", axum::routing::get("pong"))
}

View File

@ -1,17 +0,0 @@
use sqlx::{Pool, Postgres, postgres::PgPoolOptions};
use crate::errors::StartupError;
pub async fn init(db_url: &str) -> Result<Pool<Postgres>, StartupError> {
let db = PgPoolOptions::new()
.connect(db_url)
.await?;
sqlx::migrate!("./migrations")
.run(&db)
.await?;
tracing::info!("Migration completed successfully");
Ok(db)
}

View File

@ -1,2 +0,0 @@
pub mod model;
pub mod repository;

View File

@ -1,3 +0,0 @@
pub mod organization;
pub mod refresh_token;
pub mod user;

View File

@ -1,34 +0,0 @@
use sqlx::{
prelude::FromRow,
types::{
Uuid,
chrono::{DateTime, Utc},
},
};
#[derive(Debug, FromRow)]
pub struct Organization {
pub id: Uuid,
pub name: String,
pub slug: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(sqlx::Type, Debug, Clone, PartialEq)]
#[sqlx(type_name = "org_role", rename_all = "lowercase")]
pub enum OrgRole {
Owner,
Admin,
Member,
Viewer,
}
#[derive(Debug, FromRow)]
pub struct OrgMember {
pub user_id: Uuid,
pub org_id: Uuid,
pub role: OrgRole,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}

View File

@ -1,14 +0,0 @@
use sqlx::types::{
Uuid,
chrono::{DateTime, Utc},
};
#[derive(Debug)]
pub struct RefreshToken {
pub id: Uuid,
pub user_id: Uuid,
pub token_hash: String,
pub expires_at: DateTime<Utc>,
pub created_at: DateTime<Utc>,
pub revoked_at: Option<DateTime<Utc>>,
}

View File

@ -1,16 +0,0 @@
use sqlx::{
prelude::FromRow,
types::{
Uuid,
chrono::{DateTime, Utc},
},
};
#[derive(Debug, FromRow)]
pub struct User {
pub id: Uuid,
pub email: String,
pub password: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}

View File

@ -1,3 +0,0 @@
pub mod organization_repository;
pub mod refresh_token_repository;
pub mod user_repository;

View File

@ -1,44 +0,0 @@
use sqlx::{Executor, Postgres};
use uuid::Uuid;
use crate::{db::model::organization::Organization, errors::ApiError};
pub async fn create_organization<'e, E>(
executor: E,
name: String,
slug: String,
) -> Result<Organization, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let org = sqlx::query_as!(
Organization,
"insert into organizations (name, slug) values ($1, $2) returning *",
name,
slug
)
.fetch_one(executor)
.await
.map_err(ApiError::from)?;
Ok(org)
}
pub async fn get_organizations_by_id_list<'e, E>(
executor: E,
ids: &[Uuid],
) -> Result<Vec<Organization>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let org = sqlx::query_as!(
Organization,
"select * from organizations where id = any($1)",
ids
)
.fetch_all(executor)
.await
.map_err(ApiError::from)?;
Ok(org)
}

View File

@ -1,76 +0,0 @@
use sqlx::{
Executor, Postgres,
types::{
Uuid,
chrono::{DateTime, Utc},
},
};
use crate::{db::model::refresh_token::RefreshToken, errors::ApiError};
pub async fn create_refresh_token<'e, E>(
executor: E,
user_id: Uuid,
token_hash: String,
expires_at: DateTime<Utc>,
) -> Result<RefreshToken, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
sqlx::query_as!(
RefreshToken,
"insert into refresh_tokens (user_id, token_hash, expires_at) values ($1, $2, $3) returning *",
user_id,
token_hash,
expires_at
).fetch_one(executor)
.await
.map_err(ApiError::from)
}
pub async fn find_by_hash<'e, E>(
executor: E,
token_hash: &str,
) -> Result<Option<RefreshToken>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
sqlx::query_as!(
RefreshToken,
"select * from refresh_tokens where token_hash = $1",
token_hash
)
.fetch_optional(executor)
.await
.map_err(ApiError::from)
}
pub async fn revoke<'e, E>(executor: E, id: Uuid) -> Result<(), ApiError>
where
E: Executor<'e, Database = Postgres>,
{
sqlx::query!(
"update refresh_tokens set revoked_at = now() where id = $1",
id
)
.execute(executor)
.await
.map_err(ApiError::from)?;
Ok(())
}
pub async fn revoke_all_for_user<'e, E>(
executor: E,
user_id: Uuid,
) -> Result<(), ApiError>
where
E: Executor<'e, Database = Postgres>,
{
sqlx::query!(
"update refresh_tokens set revoked_at = now() where user_id = $1 and revoked_at is null",
user_id
)
.execute(executor)
.await
.map_err(ApiError::from)?;
Ok(())
}

View File

@ -1,48 +0,0 @@
use sqlx::{Executor, Postgres, types::Uuid};
use crate::{db::model::user::User, errors::ApiError};
pub async fn create_user<'e, E>(
executor: E,
email: String,
password: String,
) -> Result<User, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let user = sqlx::query_as!(
User,
"insert into users (email, password) values ($1, $2) returning *",
email,
password
)
.fetch_one(executor)
.await
.map_err(ApiError::from)?;
Ok(user)
}
pub async fn get_user_by_email<'e, E>(executor: E, email: &str) -> Result<Option<User>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let user = sqlx::query_as!(User, "select * from users where email=$1", email)
.fetch_optional(executor)
.await
.map_err(ApiError::from)?;
Ok(user)
}
pub async fn get_user_by_id<'e, E>(executor: E, id: Uuid) -> Result<Option<User>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let user = sqlx::query_as!(User, "select * from users where id=$1", id)
.fetch_optional(executor)
.await
.map_err(ApiError::from)?;
Ok(user)
}

View File

@ -1,71 +0,0 @@
use axum::{
Json,
http::StatusCode,
response::{IntoResponse, Response},
};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum StartupError {
#[error("Failed to load configuration: {0}")]
Config(#[from] std::env::VarError),
#[error("Invalid configuration value: {0}")]
InvalidConfig(String),
#[error("Failed to connect to database: {0}")]
DbConnect(#[from] sqlx::Error),
#[error("Failed to bind to address: {0}")]
Bind(#[from] std::io::Error),
#[error("Migration error: {0}")]
Migration(#[from] sqlx::migrate::MigrateError),
}
#[derive(Debug, Error)]
pub enum ApiError {
#[error("Failed to connect to database: {0}")]
Database(#[from] sqlx::Error),
#[error("Invalid credentials")]
InvalidCredentials,
#[error("Validation error: {0}")]
Validation(String),
#[error("Internal server error")]
Internal,
#[error("Request not authorized")]
Unauthorized,
#[error("Not Found")]
NotFound,
}
impl IntoResponse for ApiError {
fn into_response(self) -> Response {
let (status, message) = match &self {
ApiError::InvalidCredentials => (StatusCode::UNAUTHORIZED, self.to_string()),
ApiError::Validation(_) => (StatusCode::BAD_REQUEST, self.to_string()),
ApiError::Unauthorized => (StatusCode::UNAUTHORIZED, self.to_string()),
ApiError::NotFound => (StatusCode::NOT_FOUND, self.to_string()),
ApiError::Database(err) => {
tracing::error!("Database error: {}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
)
}
ApiError::Internal => {
tracing::error!("Internal server error");
(
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
)
}
};
(status, Json(serde_json::json!({ "error": message }))).into_response()
}
}

View File

@ -1,10 +0,0 @@
pub mod config;
pub mod controller;
pub mod database;
pub mod db;
pub mod errors;
pub mod logging;
pub mod server;
pub mod service;
pub mod state;
pub mod utils;

View File

@ -1,4 +1,3 @@
use tokio::signal;
use tracing_appender::{ use tracing_appender::{
non_blocking::WorkerGuard, non_blocking::WorkerGuard,
rolling::{RollingFileAppender, Rotation}, rolling::{RollingFileAppender, Rotation},
@ -61,27 +60,3 @@ impl LoggerConfig {
guard guard
} }
} }
pub async fn shutdown_signal() {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {tracing::info!("Process killed with CTRL+C")},
_ = terminate => {tracing::info!("Process killed")},
}
}

View File

@ -1,11 +1,10 @@
use rhythm_backend::{config, database, errors::StartupError, logging, server}; mod config;
mod logging;
#[tokio::main] fn main() {
async fn main() -> Result<(), StartupError> { let cfg = config::Config::load();
let cfg = config::Config::load()?;
let _logging_guard = logging::LoggerConfig::init(cfg.app_env); let _logging_guard = logging::LoggerConfig::init(cfg.app_env);
let db = database::init(&cfg.db_url).await?;
server::init(&cfg, db).await?;
Ok(()) tracing::info!("ciao");
tracing::debug!("{:?}", cfg);
} }

View File

@ -1,27 +0,0 @@
use axum::Router;
use sqlx::PgPool;
use crate::{config, controller, errors::StartupError, logging, state::AppState};
pub async fn init(cfg: &config::Config, db: PgPool) -> Result<(), StartupError> {
let state = AppState {
db,
jwt_secret: cfg.jwt_secret.clone(),
rate_limit: std::sync::Arc::new(dashmap::DashMap::new()),
};
let app = Router::new()
.merge(controller::router(state.clone()))
.with_state(state);
let listener = tokio::net::TcpListener::bind(&cfg.socket_address).await?;
tracing::info!("Server started on {}", cfg.socket_address);
axum::serve(
listener,
app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(logging::shutdown_signal())
.await?;
Ok(())
}

View File

@ -1,180 +0,0 @@
use axum::Json;
use chrono::Duration;
use tower_cookies::cookie::SameSite;
use tower_cookies::{Cookie, Cookies};
use crate::controller::model::auth_model::*;
use crate::db::repository::refresh_token_repository::{create_refresh_token, find_by_hash, revoke};
use crate::db::repository::user_repository;
use crate::errors::ApiError;
use crate::state::AppState;
use crate::utils::hash;
use crate::utils::jwt::generate_access_token;
use crate::utils::refresh_token::{generate_refresh_token, hash_refresh_token};
pub async fn login(
state: &AppState,
cookies: Cookies,
req: LoginRequest,
) -> Result<Json<AuthResponse>, ApiError> {
let mut tx = state.db.begin().await?;
let user = user_repository::get_user_by_email(&mut *tx, &req.email)
.await?
.ok_or_else(|| {
tracing::warn!(email = %req.email, "Login failed: user not found");
ApiError::InvalidCredentials
})?;
if !hash::verify(&req.password, &user.password)? {
tracing::warn!(email = %req.email, "Login failed: invalid password");
return Err(ApiError::InvalidCredentials);
}
let access_token = generate_access_token(user.id, &state.jwt_secret)?;
let (refresh_plain, refresh_hash) = generate_refresh_token();
let expires_at = chrono::Utc::now() + Duration::days(7);
create_refresh_token(&mut *tx, user.id, refresh_hash, expires_at).await?;
tx.commit().await?;
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
pub async fn register(
state: &AppState,
cookies: Cookies,
req: RegisterRequest,
) -> Result<Json<AuthResponse>, ApiError> {
let estimate = zxcvbn::zxcvbn(&req.password, &[]);
if (estimate.score() as u8) < 3 {
tracing::warn!(email = %req.email, score = ?estimate.score(), "Registration failed: password too weak");
return Err(ApiError::Validation(
"Password is too weak. Please use a more complex password.".to_string(),
));
}
let mut tx = state.db.begin().await?;
if user_repository::get_user_by_email(&mut *tx, &req.email)
.await?
.is_some()
{
tracing::warn!(email = %req.email, "Registration failed: email already exists");
return Err(ApiError::Validation("bad request".to_string()));
}
let h = hash::hash(&req.password)?;
let user = user_repository::create_user(&mut *tx, req.email, h).await?;
let access_token = generate_access_token(user.id, &state.jwt_secret)?;
let (refresh_plain, refresh_hash) = generate_refresh_token();
let expires_at = chrono::Utc::now() + Duration::days(7);
create_refresh_token(&mut *tx, user.id, refresh_hash, expires_at).await?;
tx.commit().await?;
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
pub async fn refresh(state: &AppState, cookies: Cookies) -> Result<Json<AuthResponse>, ApiError> {
let refresh_token = get_refresh_cookie(&cookies).ok_or(ApiError::InvalidCredentials)?;
let mut tx = state.db.begin().await?;
let hash = hash_refresh_token(&refresh_token);
let token_data = find_by_hash(&mut *tx, &hash)
.await?
.ok_or(ApiError::InvalidCredentials)?;
if token_data.revoked_at.is_some() || token_data.expires_at < chrono::Utc::now() {
return Err(ApiError::InvalidCredentials);
}
revoke(&mut *tx, token_data.id).await?;
let access_token = generate_access_token(token_data.user_id, &state.jwt_secret)?;
let (refresh_plain, refresh_hash) = generate_refresh_token();
let expires_at = chrono::Utc::now() + Duration::days(7);
create_refresh_token(&mut *tx, token_data.user_id, refresh_hash, expires_at).await?;
tx.commit().await?;
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
use crate::db::repository::refresh_token_repository::revoke_all_for_user;
pub async fn logout(state: &AppState, cookies: Cookies) -> Result<(), ApiError> {
let refresh_token = match get_refresh_cookie(&cookies) {
Some(t) => t,
None => return Ok(()), // Already logged out
};
let mut tx = state.db.begin().await?;
let hash = hash_refresh_token(&refresh_token);
if let Some(token_data) = find_by_hash(&mut *tx, &hash).await? {
revoke(&mut *tx, token_data.id).await?;
}
tx.commit().await?;
remove_refresh_cookie(&cookies);
Ok(())
}
pub async fn logout_all(
state: &AppState,
cookies: Cookies,
user_id: uuid::Uuid,
) -> Result<(), ApiError> {
let mut tx = state.db.begin().await?;
revoke_all_for_user(&mut *tx, user_id).await?;
tx.commit().await?;
remove_refresh_cookie(&cookies);
Ok(())
}
const REFRESH_COOKIE_NAME: &str = "refresh_token";
fn remove_refresh_cookie(cookies: &Cookies) {
let cookie = Cookie::build((REFRESH_COOKIE_NAME, ""))
.path("/")
.max_age(time::Duration::ZERO) // Expire immediately
.build();
cookies.remove(cookie);
}
fn set_refresh_cookie(cookies: &Cookies, token: &str) {
let cookie = Cookie::build((REFRESH_COOKIE_NAME, token.to_owned()))
.http_only(true)
.secure(true)
.same_site(SameSite::Strict)
.path("/")
.max_age(time::Duration::days(7))
.build();
cookies.add(cookie);
}
fn get_refresh_cookie(cookies: &Cookies) -> Option<String> {
cookies
.get(REFRESH_COOKIE_NAME)
.map(|c| c.value().to_string())
}

View File

@ -1 +0,0 @@
pub mod auth_service;

View File

@ -1,55 +0,0 @@
use axum::extract::FromRef;
use dashmap::DashMap;
use sqlx::PgPool;
use std::sync::Arc;
use std::time::Instant;
#[derive(Clone)]
pub struct AppState {
pub db: PgPool,
pub jwt_secret: String,
pub rate_limit: Arc<DashMap<String, TokenBucket>>,
}
impl FromRef<AppState> for PgPool {
fn from_ref(state: &AppState) -> Self {
state.db.clone()
}
}
pub struct TokenBucket {
pub tokens: f64,
pub last_refill: Instant,
}
// --- Rate Limiting Configuration ---
const REQUESTS_PER_MINUTE: f64 = 5.0;
const BUCKET_CAPACITY: f64 = 5.0;
impl TokenBucket {
pub fn new() -> Self {
Self {
tokens: BUCKET_CAPACITY,
last_refill: Instant::now(),
}
}
fn refill(&mut self) {
let now = Instant::now();
let elapsed = now.duration_since(self.last_refill).as_secs_f64();
let tokens_per_second = REQUESTS_PER_MINUTE / 60.0;
self.tokens = (self.tokens + elapsed * tokens_per_second).min(BUCKET_CAPACITY);
self.last_refill = now;
}
pub fn try_drain(&mut self) -> bool {
self.refill();
if self.tokens >= 1.0 {
self.tokens -= 1.0;
true
} else {
false
}
}
}

View File

@ -1,31 +0,0 @@
use argon2::{
Argon2, PasswordHash, PasswordHasher, PasswordVerifier,
password_hash::{SaltString, rand_core::OsRng},
};
use crate::errors::ApiError;
pub fn hash(text: &str) -> Result<String, ApiError> {
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(text.as_bytes(), &salt)
.map_err(|e| {
tracing::error!("Hash error: {}", e);
ApiError::Internal
})?;
Ok(password_hash.to_string())
}
pub fn verify(text: &str, hash: &str) -> Result<bool, ApiError> {
let parsed_hash = PasswordHash::new(hash)
.map_err(|e| {
tracing::error!("Hash parsing error: {}", e);
ApiError::Internal
})?;
let argon2 = Argon2::default();
Ok(argon2.verify_password(text.as_bytes(), &parsed_hash).is_ok())
}

View File

@ -1,45 +0,0 @@
use chrono::{Duration, Utc};
use jsonwebtoken::{EncodingKey, Header, encode};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::errors::ApiError;
#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
pub sub: Uuid,
pub iat: i64,
pub exp: i64,
pub jti: Uuid,
}
pub fn generate_access_token(user_id: Uuid, secret: &str) -> Result<String, ApiError> {
let now = Utc::now();
let expires_at = now + Duration::minutes(15);
let claims = Claims {
sub: user_id,
iat: now.timestamp(),
exp: expires_at.timestamp(),
jti: Uuid::new_v4(),
};
encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(secret.as_bytes()),
)
.map_err(|_| ApiError::Internal)
}
pub fn verify_access_token(token: &str, secret: &str) -> Result<Claims, ApiError> {
let mut validation = jsonwebtoken::Validation::default();
validation.validate_exp = true; // Ensure expired tokens are rejected
jsonwebtoken::decode::<Claims>(
token,
&jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()),
&validation,
)
.map(|data| data.claims)
.map_err(|_| ApiError::Unauthorized) // Map any JWT error to 401
}

View File

@ -1,3 +0,0 @@
pub mod hash;
pub mod jwt;
pub mod refresh_token;

View File

@ -1,20 +0,0 @@
use rand::Rng;
use sha2::{Digest, Sha256};
pub fn generate_refresh_token() -> (String, String) {
let mut bytes = [0u8; 32];
let mut thread_rng = rand::rng();
thread_rng.fill_bytes(&mut bytes);
let plain = hex::encode(bytes); // 64 hex chars for user
let hash = hash_refresh_token(&plain);
(plain, hash)
}
pub fn hash_refresh_token(plain: &str) -> String {
// SHA-256 for DB storage
let mut hasher = Sha256::new();
hasher.update(&plain);
hex::encode(hasher.finalize())
}

View File

@ -1,3 +0,0 @@
pub mod registration_login;
pub mod security;
pub mod session;

View File

@ -1,34 +0,0 @@
use crate::common::{setup_app, spawn_server};
#[tokio::test]
async fn test_register_and_login() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
let email = format!("user_{}@test.com", uuid::Uuid::new_v4());
// Register
let resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success(), "Register failed: {}", resp.text().await.unwrap_or_default());
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["access_token"].is_string());
// Login success
let resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success());
// Login failure
let resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({"email": email, "password": "WrongPassword"}))
.send().await.unwrap();
assert_eq!(resp.status(), 401);
}

View File

@ -1,83 +0,0 @@
use crate::common::{setup_app, spawn_server};
use std::time::{Duration, Instant};
#[tokio::test]
async fn test_rate_limiting_blocks_spam() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// Send 6 requests CONCURRENTLY so they arrive nearly simultaneously
// Capacity is 5, so exactly 5 should pass and 1 should fail.
let mut handles = Vec::new();
for _ in 0..6 {
let c = client.clone();
let url = format!("{}/api/v1/auth/login", base_url);
handles.push(tokio::spawn(async move {
c.post(&url)
.header("x-client-ip", "1.2.3.4")
.json(&serde_json::json!({"email": "a@test.com", "password": "b"}))
.send().await.unwrap()
.status().as_u16()
}));
}
let statuses: Vec<u16> = futures_util::future::join_all(handles).await
.into_iter().map(|r| r.unwrap()).collect();
let successes = statuses.iter().filter(|&&s| s != 429).count();
let blocked = statuses.iter().filter(|&&s| s == 429).count();
assert!(successes <= 5, "At most 5 rapid requests should be allowed, got {} passing", successes);
assert!(blocked >= 1, "At least 1 request should be rate-limited (429)");
// Different IP should still work
let ok = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "5.6.7.8")
.json(&serde_json::json!({"email": "a@test.com", "password": "b"}))
.send().await.unwrap();
assert_ne!(ok.status(), 429, "Different IP should not be rate-limited");
}
#[tokio::test]
async fn test_anti_enumeration_timing() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
let start = Instant::now();
let _ = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "1.1.1.1") // Different IP to avoid rate limits from previous requests in other tests if run concurrently
.json(&serde_json::json!({"email": "ghost_not_real@test.com", "password": "irrelevant"}))
.send().await.unwrap();
let duration_nonexistent = start.elapsed();
let email = format!("timing_{}@test.com", uuid::Uuid::new_v4());
let resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.header("x-client-ip", "2.2.2.2")
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success());
let start = Instant::now();
let _ = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "3.3.3.3")
.json(&serde_json::json!({"email": email, "password": "WrongPassword123!"}))
.send().await.unwrap();
let duration_existent = start.elapsed();
// Anti-enumeration middleware ensures BOTH take >= 150ms
assert!(duration_nonexistent >= Duration::from_millis(150), "Fast path should be padded to >= 150ms");
assert!(duration_existent >= Duration::from_millis(150), "Slow path should be padded to >= 150ms");
let diff = if duration_nonexistent > duration_existent {
duration_nonexistent - duration_existent
} else {
duration_existent - duration_nonexistent
};
assert!(diff < Duration::from_millis(300),
"Timing difference should be <300ms because both paths are padded by random delay: got {:?}", diff);
}

View File

@ -1,62 +0,0 @@
use crate::common::{setup_app, spawn_server};
#[tokio::test]
async fn test_protected_route_requires_auth() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// No token → 401
let resp = client
.get(format!("{}/api/v1/protected/ping", base_url))
.send().await.unwrap();
assert_eq!(resp.status(), 401, "Protected route should require auth");
// With token → 200
let email = format!("protected_{}@test.com", uuid::Uuid::new_v4());
let reg = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
let token: serde_json::Value = reg.json().await.unwrap();
let resp = client
.get(format!("{}/api/v1/protected/ping", base_url))
.bearer_auth(token["access_token"].as_str().unwrap())
.send().await.unwrap();
assert_eq!(resp.status(), 200, "Protected route should succeed with valid token");
}
#[tokio::test]
async fn test_refresh_and_logout_all() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// Register + login to get a valid session
let email = format!("refresh_{}@test.com", uuid::Uuid::new_v4());
let reg = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
let _token: serde_json::Value = reg.json().await.unwrap();
// Refresh should work
let refreshed = client
.post(format!("{}/api/v1/auth/refresh", base_url))
.send().await.unwrap();
assert!(refreshed.status().is_success(), "Refresh should succeed with cookie");
let new_token: serde_json::Value = refreshed.json().await.unwrap();
assert!(new_token["access_token"].is_string());
// Logout all
let resp = client
.post(format!("{}/api/v1/protected/auth/logout-all", base_url))
.bearer_auth(new_token["access_token"].as_str().unwrap())
.send().await.unwrap();
assert!(resp.status().is_success(), "logout-all should succeed");
// After logout-all, refresh should fail
let fail = client
.post(format!("{}/api/v1/auth/refresh", base_url))
.send().await.unwrap();
assert_eq!(fail.status(), 401, "Refresh should fail after logout-all");
}

View File

@ -1,2 +0,0 @@
mod common;
mod auth;

View File

@ -1,103 +0,0 @@
use reqwest::Client;
use rhythm_backend::{
controller, state::AppState,
};
use axum::Router;
use sqlx::PgPool;
use std::sync::Arc;
use dashmap::DashMap;
async fn setup_app() -> (Router, PgPool) {
let db_url = "postgres://user:password@localhost:5432/rhythm-dev?sslmode=disable";
let db = PgPool::connect(db_url)
.await
.expect("Failed to connect to Postgres at localhost:5432");
// Run migrations
sqlx::migrate!("./migrations")
.run(&db)
.await
.expect("Failed to run migrations");
let state = AppState {
db: db.clone(),
jwt_secret: "test-secret-key-12345678901234567890".to_string(),
rate_limit: Arc::new(DashMap::new()),
};
let app = controller::router(state.clone()).with_state(state);
(app, db)
}
#[tokio::test]
async fn test_register_and_login() {
let (app, _db) = setup_app().await;
// Start the server on a random port
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
tokio::spawn(async move {
axum::serve(listener, app.into_make_service()).await.unwrap();
});
// Give server a moment to start
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
let base_url = format!("http://127.0.0.1:{}", port);
let client = Client::new();
// Use a unique email so we don't conflict with previous test runs
let unique_email = format!("testuser_{}@test.com", uuid::Uuid::new_v4());
// Test 1: Register
let reg_resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "SuperSecureP@ssw0rd2024!"
}))
.send()
.await
.unwrap();
println!("Register status: {}", reg_resp.status());
assert!(
reg_resp.status().is_success(),
"Register should succeed: {}",
reg_resp.text().await.unwrap_or_default()
);
// Test 2: Login with correct password
let login_resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "SuperSecureP@ssw0rd2024!"
}))
.send()
.await
.unwrap();
println!("Login status: {}", login_resp.status());
assert!(login_resp.status().is_success(), "Login should succeed with correct password: {}", login_resp.text().await.unwrap_or_default());
let body: serde_json::Value = login_resp.json().await.unwrap();
assert!(body.get("access_token").is_some(), "Login response should have access_token");
// Test 3: Login with wrong password
let bad_login = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "WrongPassword1!"
}))
.send()
.await
.unwrap();
println!("Bad login status: {}", bad_login.status());
assert_eq!(bad_login.status(), 401, "Login with wrong password should return 401");
}

View File

@ -1,48 +0,0 @@
use reqwest::Client;
use std::time::Duration;
use rhythm_backend::{controller, state::AppState};
use axum::Router;
use sqlx::PgPool;
use std::sync::Arc;
use dashmap::DashMap;
pub async fn setup_app() -> (Router, PgPool) {
let db_url = "postgres://user:password@localhost:5432/rhythm-dev?sslmode=disable";
let db = PgPool::connect(db_url)
.await
.expect("Failed to connect to Postgres at localhost:5432");
sqlx::migrate!("./migrations")
.run(&db)
.await
.expect("Failed to run migrations");
let state = AppState {
db: db.clone(),
jwt_secret: "test-secret-key-12345678901234567890".to_string(),
rate_limit: Arc::new(DashMap::new()),
};
let app = controller::router(state.clone()).with_state(state);
(app, db)
}
pub async fn spawn_server(app: axum::Router) -> (String, Client) {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
tokio::spawn(async move {
axum::serve(listener, app.into_make_service()).await.unwrap();
});
tokio::time::sleep(Duration::from_millis(100)).await;
let base_url = format!("http://127.0.0.1:{}", port);
let client = Client::builder()
.cookie_store(true)
.build()
.unwrap();
(base_url, client)
}

19
todo.md
View File

@ -1,19 +0,0 @@
# Project Roadmap TODO
## 1. Organizations & Roles (Next Up)
- [ ] Create `0004_create_org_memberships_table.sql` with `org_role` ENUM (owner, admin, member, viewer).
- [ ] Implement `src/db/repository/organization_repository.rs` with `create_org_with_owner` (using `&mut sqlx::Transaction`).
- [ ] Add slug generation utility (name -> `slug-a7x9`).
- [ ] Build `POST /api/v1/orgs` and `GET /api/v1/orgs` endpoints using the new `CurrentUser` and `ValidJson` extractors.
## 2. Projects Layer
- [ ] Create projects table (belongs to `org_id`).
- [ ] Create `project_memberships` table for specific project access (inherits downward from Org roles).
- [ ] CRUD endpoints for projects nested under `/api/v1/orgs/{org_slug}/projects`.
## 3. Core Issue Tracking
- [ ] Create issues table (belongs to `project_id`).
- [ ] Allow Projects to define their own custom workflow stages (e.g., Todo, In Progress, QA, Done).
## 4. Power-User Features
- [ ] Build "Agglomeration Views": Allow creating Org-level Kanban boards that span multiple projects and map project-specific stages to unified columns.