Compare commits

...

17 Commits

Author SHA1 Message Date
8b69b59485
fixed old todos
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 13m48s
2026-05-04 23:49:33 +02:00
0173e01f49
added sqlx prepares
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 2m36s
2026-05-04 23:32:33 +02:00
a846093fbf
initial work for organizations
Some checks failed
Build and Push Docker Image / build-and-push (push) Failing after 1m30s
2026-05-04 23:30:41 +02:00
4cd399f2a0
todos
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 2m51s
2026-05-03 22:57:55 +02:00
fce2d67c69
test cache 2026-05-03 22:50:28 +02:00
4fd9c85474
cache
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 13m49s
2026-05-03 22:42:14 +02:00
ed7fff4983
cache
Some checks are pending
Build and Push Docker Image / build-and-push (push) Waiting to run
2026-05-03 22:39:22 +02:00
adb95de2a9
sqlx prepare
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
2026-05-03 22:34:54 +02:00
e3d4f8eac8
authentication for the backend
Some checks failed
Build and Push Docker Image / build-and-push (push) Failing after 11m16s
2026-05-03 22:23:11 +02:00
f78054fecd
initial auth stuff
Some checks failed
Build and Push Docker Image / build-and-push (push) Failing after 10m55s
2026-05-03 19:26:29 +02:00
eb436cf14c
fix
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 13m11s
2026-05-03 12:11:50 +02:00
308639e418
rate limiting and anti enumeration
Some checks failed
Build and Push Docker Image / build-and-push (push) Has been cancelled
2026-05-03 12:10:21 +02:00
bc7866b4fb
removed middlewares
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 12m56s
2026-05-03 11:22:37 +02:00
8996161cc9
anti enumeration adn rate limit
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 12m50s
2026-05-02 14:06:54 +02:00
f1ddaf5f2d
intial login stuff
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 11m52s
2026-05-02 11:01:47 +02:00
07c3da2b71
new cookie register flow
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 11m36s
2026-04-30 18:40:26 +02:00
505100d930
added tracing per req and initial cookies
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 10m25s
2026-04-30 17:58:52 +02:00
58 changed files with 2989 additions and 194 deletions

View File

@ -31,5 +31,5 @@ jobs:
file: ./Dockerfile
push: true
tags: git.kanopo.dev/rhythm/rhythm-backend:latest
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache,mode=max
cache-from: type=registry,ref=git.kanopo.dev/rhythm/rhythm-backend:buildcache
cache-to: type=registry,ref=git.kanopo.dev/rhythm/rhythm-backend:buildcache,mode=max

View File

@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "update refresh_tokens set revoked_at = now() where user_id = $1 and revoked_at is null",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Uuid"
]
},
"nullable": []
},
"hash": "011508dbe03cb96438e135c460895932bc47b4055ee9329625fba56a73c55f29"
}

View File

@ -0,0 +1,47 @@
{
"db_name": "PostgreSQL",
"query": "insert into organizations (name, slug) values ($1, $2) returning *",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "slug",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "918e7e43a258341fc3380bc26bb354610a9854ddbf318281446770502b5a0183"
}

View File

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "select * from organizations where id = any($1)",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Uuid"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "slug",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "created_at",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "updated_at",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"UuidArray"
]
},
"nullable": [
false,
false,
false,
false,
false
]
},
"hash": "bcd0b53a358d5a167a269bee7726acbb7f878614fd3e460f18efdac41106a3a2"
}

1554
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -16,9 +16,22 @@ thiserror = "2"
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
argon2 = "0.5.3"
jsonwebtoken = { version = "10.3.0", features = ["rand"] }
jsonwebtoken = { version = "10.3.0", features = ["rand", "rust_crypto"] }
chrono = { version = "0.4.44", features = ["serde"] }
uuid = { version = "1.23.1", features = ["serde", "v4"] }
rand = "0.10.1"
sha2 = "0.11.0"
hex = "0.4.3"
tower-cookies = "0.11.0"
tower-http = { version = "0.6.8", features = ["trace"] }
time = "0.3.47"
tower = "0.5.3"
futures-util = "0.3.32"
dashmap = "6.1.0"
zxcvbn = "3.1.1"
validator = { version = "0.20.0", features = ["derive"] }
[dev-dependencies]
testcontainers = "0.23.1"
testcontainers-modules = { version = "0.11.4", features = ["postgres"] }
reqwest = { version = "0.12", features = ["json", "cookies"] }

View File

@ -1,7 +1,16 @@
FROM rust:1.95.0-alpine3.22 AS builder
WORKDIR /app
# Cache dependencies by building a dummy project first
COPY Cargo.toml Cargo.lock ./
RUN mkdir src && echo "fn main() {}" > src/main.rs
RUN cargo build --release
RUN rm -rf src
# Copy real source code and build
COPY . .
# Touch the main file to ensure cargo sees it as newer than the dummy build
RUN touch src/main.rs
RUN cargo build --release
# Small runtime image

66
README.md Normal file
View File

@ -0,0 +1,66 @@
# Rhythm Backend API Documentation
## Authentication System Overview
The authentication system is built with a security-first approach, featuring multi-layered protection against common web vulnerabilities.
### Security Layers (Middleware)
1. **Rate Limiting (Anti-Spam Bucket)**
- **Mechanism:** Token Bucket (in-memory `DashMap`).
- **Logic:** Identifies users via the `X-Client-IP` header (trusted from proxy).
- **Config:** 5 attempts per minute, refilling 1 token every 12 seconds.
- **Response:** `429 Too Many Requests`.
2. **Anti-Enumeration (Timing Protection)**
- **Mechanism:** Variable response delay.
- **Logic:** Ensures every authentication request takes between 150ms and 300ms.
- **Purpose:** Hides whether an account exists or a password was correct from timing analysis.
### Current API Endpoints
#### `POST /api/v1/auth/register`
Registers a new user.
- **Payload:** `RegisterRequest { email, password }`
- **Response:** `200 OK` with `AuthResponse { access_token }`
- **Side Effect:** Sets an `HttpOnly`, `Secure`, `SameSite=Strict` cookie named `refresh_token`.
#### `POST /api/v1/auth/login`
Authenticates a user.
- **Payload:** `LoginRequest { email, password }`
- **Response:** `200 OK` with `AuthResponse { access_token }`
- **Side Effect:** Sets a new `refresh_token` cookie.
#### `POST /api/v1/auth/refresh`
Rotates tokens for an active session.
- **Requirement:** Valid `refresh_token` cookie.
- **Response:** `200 OK` with new `access_token`.
- **Rotation Logic:** Revokes the old refresh token and issues a completely new one (Rotation) to prevent session hijacking.
#### `POST /api/v1/auth/logout`
Invalidates the current session.
- **Requirement:** Valid `refresh_token` cookie.
- **Response:** `200 OK`.
- **Logic:** Revokes the refresh token in the database and clears the HttpOnly cookie.
---
## Security Features Detail
### 1. Rate Limiting (Anti-Spam)
Protects against brute-force and DoS attacks by limiting requests per IP address. Uses an in-memory Token Bucket algorithm.
### 2. Anti-Enumeration (Timing Protection)
Ensures that the time taken to process an auth request is independent of the result (e.g., whether a user exists or not). This prevents attackers from using timing differences to discover valid emails.
### 3. Password Strength (zxcvbn)
Uses Dropbox's `zxcvbn` algorithm to estimate password entropy. Registration requires a score of at least 3/4.
### 4. Refresh Token Rotation
Every time a refresh token is used to get a new access token, the old refresh token is invalidated and a new one is issued. This limits the window of opportunity if a refresh token is leaked.
test cache docker build and gitea

View File

@ -0,0 +1,15 @@
info:
name: base ping health
type: http
seq: 2
http:
method: GET
url: "{{base_url}}/"
auth: inherit
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -0,0 +1,8 @@
name: test
variables:
- name: base_url
value: http://localhost:6969
- name: access_token
value: ""
- name: refresh_token
value: ""

37
http_client/login.yml Normal file
View File

@ -0,0 +1,37 @@
info:
name: login
type: http
seq: 3
http:
method: POST
url: "{{base_url}}/api/v1/auth/login"
body:
type: json
data: |
{
"email": "a@a.it",
"password": "Password1!6969_"
}
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("login - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("login - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

28
http_client/logout.yml Normal file
View File

@ -0,0 +1,28 @@
info:
name: logout
type: http
seq: 6
http:
method: POST
url: "{{base_url}}/api/v1/protected/auth/logout"
auth:
type: bearer
token: "{{access_token}}"
runtime:
scripts:
- type: after-response
code: |-
const status = res.getStatus();
if (status === 200 || status === 204) {
bru.setEnvVar("access_token", "");
bru.setEnvVar("refresh_token", "");
console.log("logout - tokens cleared");
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -0,0 +1,28 @@
info:
name: logout all
type: http
seq: 7
http:
method: POST
url: "{{base_url}}/api/v1/protected/auth/logout-all"
auth:
type: bearer
token: "{{access_token}}"
runtime:
scripts:
- type: after-response
code: |-
const status = res.getStatus();
if (status === 200 || status === 204) {
bru.setEnvVar("access_token", "");
bru.setEnvVar("refresh_token", "");
console.log("logout_all - all sessions revoked, tokens cleared");
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -0,0 +1,10 @@
opencollection: 1.0.0
info:
name: rhythm
bundled: false
extensions:
bruno:
ignore:
- node_modules
- .git

30
http_client/refresh.yml Normal file
View File

@ -0,0 +1,30 @@
info:
name: refresh
type: http
seq: 5
http:
method: POST
url: "{{base_url}}/api/v1/auth/refresh"
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("refresh - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("refresh - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

37
http_client/register.yml Normal file
View File

@ -0,0 +1,37 @@
info:
name: register
type: http
seq: 1
http:
method: POST
url: "{{base_url}}/api/v1/auth/register"
body:
type: json
data: |
{
"email": "a@a.it",
"password": "Password1!6969_"
}
auth: inherit
runtime:
scripts:
- type: after-response
code: |-
const response = res.getBody();
const token = response.access_token;
bru.setEnvVar("access_token", token);
console.log("register - access_token:", token);
const cookies = res.getHeaders()['set-cookie'];
if (cookies) {
bru.setEnvVar("refresh_token", cookies[0]);
console.log("register - refresh_token:", cookies[0]);
}
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -0,0 +1,17 @@
info:
name: test protected routes
type: http
seq: 4
http:
method: GET
url: "{{base_url}}/api/v1/protected/ping"
auth:
type: bearer
token: "{{access_token}}"
settings:
encodeUrl: true
timeout: 0
followRedirects: true
maxRedirects: 5

View File

@ -0,0 +1,7 @@
create table organizations (
id uuid primary key default uuidv4(),
name varchar(255) not null,
slug varchar(255) not null unique, -- acme-corp-a7x9
created_at timestamptz not null default now(),
updated_at timestamptz not null default now()
);

View File

@ -0,0 +1,13 @@
CREATE TYPE org_role AS ENUM ('owner', 'admin', 'member', 'viewer');
CREATE TABLE org_memberships (
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
org_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
role org_role NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (user_id, org_id)
);

View File

@ -2,7 +2,7 @@ use std::env;
use dotenvy::dotenv;
use crate::errors::AppError;
use crate::errors::StartupError;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum AppEnv {
@ -11,15 +11,15 @@ pub enum AppEnv {
}
impl AppEnv {
pub fn from_env() -> Result<Self, AppError> {
pub fn from_env() -> Result<Self, StartupError> {
match env::var("APP_ENV").as_deref() {
Ok("prod") => Ok(AppEnv::Production),
Ok("dev") => Ok(AppEnv::Development),
Ok(other) => Err(AppError::InvalidConfig(format!(
Ok(other) => Err(StartupError::InvalidConfig(format!(
"Invalid APP_ENV: {}",
other
))),
Err(_) => Err(AppError::InvalidConfig("APP_ENV must be set".to_string())),
Err(_) => Err(StartupError::InvalidConfig("APP_ENV must be set".to_string())),
}
}
}
@ -33,7 +33,7 @@ pub struct Config {
}
impl Config {
pub fn load() -> Result<Self, AppError> {
pub fn load() -> Result<Self, StartupError> {
dotenv().ok();
Ok(Self {
db_url: env::var("DATABASE_URL")?,

View File

@ -0,0 +1,52 @@
use axum::{
Json,
extract::{FromRequest, FromRequestParts, Request},
http::request::Parts,
};
use serde::de::DeserializeOwned;
use uuid::Uuid;
use validator::Validate;
use crate::errors::ApiError;
#[derive(Debug, Clone)]
pub struct ValidJson<T>(pub T);
impl<T, S> FromRequest<S> for ValidJson<T>
where
T: DeserializeOwned + Validate,
S: Send + Sync,
Json<T>: FromRequest<S, Rejection = axum::extract::rejection::JsonRejection>,
{
type Rejection = ApiError;
async fn from_request(req: Request, state: &S) -> Result<Self, Self::Rejection> {
let Json(value) = Json::<T>::from_request(req, state)
.await
.map_err(|e| ApiError::Validation(e.to_string()))?;
value
.validate()
.map_err(|e| ApiError::Validation(e.to_string()))?;
Ok(ValidJson(value))
}
}
pub struct CurrentUser(pub Uuid);
impl<S> FromRequestParts<S> for CurrentUser
where
S: Send + Sync,
{
type Rejection = ApiError;
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
let user_id = parts
.extensions
.get::<Uuid>()
.ok_or(ApiError::Unauthorized)?;
Ok(CurrentUser(*user_id))
}
}

View File

@ -0,0 +1,19 @@
use axum::{extract::Request, middleware::Next, response::Response};
use rand::RngExt;
use std::time::{Duration, Instant};
use tokio::time::sleep;
const MIN_DELAY_MS: u64 = 150;
const MAX_DELAY_MS: u64 = 500;
pub async fn random_delay_middleware(request: Request, next: Next) -> Response {
let start = Instant::now();
let target = Duration::from_millis(rand::rng().random_range(MIN_DELAY_MS..=MAX_DELAY_MS));
let response = next.run(request).await;
let elapsed = start.elapsed();
if elapsed < target {
sleep(target - elapsed).await;
}
response
}

View File

@ -0,0 +1,25 @@
use crate::{errors::ApiError, state::AppState, utils::jwt::verify_access_token};
use axum::{
extract::{Request, State},
middleware::Next,
response::Response,
};
pub async fn auth_middleware(
State(state): State<AppState>,
mut request: Request,
next: Next,
) -> Result<Response, ApiError> {
let auth_header = request
.headers()
.get(axum::http::header::AUTHORIZATION)
.and_then(|h| h.to_str().ok())
.ok_or(ApiError::Unauthorized)?;
if !auth_header.starts_with("Bearer ") {
return Err(ApiError::Unauthorized);
}
let token = &auth_header[7..];
let claims = verify_access_token(token, &state.jwt_secret)?;
// Inject the user ID into extensions for downstream handlers
request.extensions_mut().insert(claims.sub);
Ok(next.run(request).await)
}

View File

@ -0,0 +1,3 @@
pub mod anti_enumeration_middleware;
pub mod auth_middleware;
pub mod rate_limiting_middleware;

View File

@ -0,0 +1,44 @@
use crate::state::AppState;
use axum::{
extract::{ConnectInfo, Request, State},
http::StatusCode,
middleware::Next,
response::{IntoResponse, Response},
};
use std::net::SocketAddr;
pub async fn rate_limiting_middleware(
State(state): State<AppState>,
request: Request,
next: Next,
) -> Response {
// 1. Identify client by IP (x-client-ip header or socket address)
let client_ip = request
.headers()
.get("x-client-ip")
.and_then(|h| h.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_else(|| {
request
.extensions()
.get::<ConnectInfo<SocketAddr>>()
.map(|ci| ci.0.ip().to_string())
.unwrap_or_else(|| "unknown".to_string())
});
// 2. Retrieve or create a TokenBucket for this IP and try to drain 1 token
let has_tokens = {
let mut entry = state
.rate_limit
.entry(client_ip)
.or_insert_with(crate::state::TokenBucket::new);
entry.value_mut().try_drain()
};
// 3. If successful, proceed; else return 429 Too Many Requests
if has_tokens {
next.run(request).await
} else {
StatusCode::TOO_MANY_REQUESTS.into_response()
}
}

View File

@ -1,12 +1,16 @@
use axum::{Router, routing::get};
use tower_http::trace::TraceLayer;
use crate::state::AppState;
pub mod extractor;
mod middleware;
pub mod model;
mod v1;
pub fn router() -> Router<AppState> {
pub fn router(state: AppState) -> Router<AppState> {
Router::new()
.route("/", get("Server is going brr 🚀"))
.nest("/api/v1", v1::router_v1())
.nest("/api/v1", v1::router_v1(state))
.layer(TraceLayer::new_for_http())
}

View File

@ -1,18 +1,23 @@
use serde::{Deserialize, Serialize};
use validator::Validate;
#[derive(Deserialize)]
#[derive(Deserialize, Validate)]
pub struct LoginRequest {
#[validate(email)]
pub email: String,
#[validate(length(min = 1))]
pub password: String,
}
#[derive(Deserialize)]
#[derive(Deserialize, Validate)]
pub struct RegisterRequest {
#[validate(email)]
pub email: String,
#[validate(length(min = 8))]
pub password: String,
}
#[derive(Serialize)]
pub struct AuthResponse {
pub access_token: String,
pub refresh_token: String,
}

View File

@ -1,28 +1,47 @@
use axum::extract::State;
use axum::middleware::{from_fn, from_fn_with_state};
use axum::{Json, Router, routing::post};
use tower_cookies::{CookieManagerLayer, Cookies};
use crate::{
controller::extractor::ValidJson,
controller::middleware::anti_enumeration_middleware::random_delay_middleware,
controller::middleware::rate_limiting_middleware::rate_limiting_middleware,
controller::model::auth_model::{AuthResponse, LoginRequest, RegisterRequest},
errors::AppError,
service::auth_service::{login, register},
errors::ApiError,
service::auth_service::{login, refresh, register},
state::AppState,
};
pub fn auth_router() -> Router<AppState> {
pub fn auth_router(state: AppState) -> Router<AppState> {
Router::new()
.route("/login", post(login_handler))
.route("/register", post(register_handler))
.route("/refresh", post(refresh_handler))
.layer(from_fn(random_delay_middleware))
.layer(from_fn_with_state(state.clone(), rate_limiting_middleware))
.layer(CookieManagerLayer::new())
}
async fn login_handler(
State(s): State<AppState>,
Json(payload): Json<LoginRequest>,
) -> Result<Json<AuthResponse>, AppError> {
login(&s, payload).await
cookies: Cookies,
ValidJson(payload): ValidJson<LoginRequest>,
) -> Result<Json<AuthResponse>, ApiError> {
login(&s, cookies, payload).await
}
async fn register_handler(
State(s): State<AppState>,
Json(payload): Json<RegisterRequest>,
) -> Result<Json<AuthResponse>, AppError> {
register(&s, payload).await
cookies: Cookies,
ValidJson(payload): ValidJson<RegisterRequest>,
) -> Result<Json<AuthResponse>, ApiError> {
register(&s, cookies, payload).await
}
async fn refresh_handler(
State(s): State<AppState>,
cookies: Cookies,
) -> Result<Json<AuthResponse>, ApiError> {
refresh(&s, cookies).await
}

View File

@ -1,10 +1,24 @@
use axum::Router;
use axum::{
Router,
middleware::from_fn_with_state,
};
use crate::state::AppState;
use crate::{
controller::middleware::auth_middleware::auth_middleware,
state::AppState,
};
pub mod auth_controller;
mod auth_controller;
mod protected;
pub fn router_v1() -> Router<AppState> {
Router::new().nest("/auth", auth_controller::auth_router())
pub fn router_v1(state: AppState) -> Router<AppState> {
let public_routes = Router::new().nest("/auth", auth_controller::auth_router(state.clone()));
let protected_routes = Router::new().nest(
"/protected",
protected::protected_router(state.clone())
.layer(from_fn_with_state(state, auth_middleware)),
);
Router::new().merge(public_routes).merge(protected_routes)
}

View File

@ -0,0 +1,25 @@
use axum::{Router, extract::State, routing::post};
use tower_cookies::{CookieManagerLayer, Cookies};
use crate::{
controller::extractor::CurrentUser, errors::ApiError, service::auth_service, state::AppState,
};
pub fn router() -> Router<AppState> {
Router::new()
.route("/logout", post(logout_handler))
.route("/logout-all", post(logout_all_handler))
.layer(CookieManagerLayer::new())
}
async fn logout_handler(State(s): State<AppState>, cookies: Cookies) -> Result<(), ApiError> {
auth_service::logout(&s, cookies).await
}
async fn logout_all_handler(
State(s): State<AppState>,
cookies: Cookies,
CurrentUser(user_id): CurrentUser,
) -> Result<(), ApiError> {
auth_service::logout_all(&s, cookies, user_id).await
}

View File

@ -0,0 +1,11 @@
use axum::Router;
use crate::state::AppState;
mod auth_protected_controller;
pub fn protected_router(_state: AppState) -> Router<AppState> {
Router::new()
.nest("/auth", auth_protected_controller::router())
.route("/ping", axum::routing::get("pong"))
}

View File

@ -1,17 +1,15 @@
use sqlx::{Pool, Postgres, migrate::MigrateError, postgres::PgPoolOptions};
use sqlx::{Pool, Postgres, postgres::PgPoolOptions};
use crate::errors::AppError;
use crate::errors::StartupError;
pub async fn init(db_url: &str) -> Result<Pool<Postgres>, AppError> {
pub async fn init(db_url: &str) -> Result<Pool<Postgres>, StartupError> {
let db = PgPoolOptions::new()
.connect(db_url)
.await
.map_err(AppError::DbConnect)?;
.await?;
sqlx::migrate!("./migrations")
.run(&db)
.await
.map_err(|e: MigrateError| AppError::InvalidConfig(format!("Migration failed: {}", e)))?;
.await?;
tracing::info!("Migration completed successfully");

View File

@ -1,2 +1,3 @@
pub mod organization;
pub mod refresh_token;
pub mod user;

View File

@ -0,0 +1,34 @@
use sqlx::{
prelude::FromRow,
types::{
Uuid,
chrono::{DateTime, Utc},
},
};
#[derive(Debug, FromRow)]
pub struct Organization {
pub id: Uuid,
pub name: String,
pub slug: String,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(sqlx::Type, Debug, Clone, PartialEq)]
#[sqlx(type_name = "org_role", rename_all = "lowercase")]
pub enum OrgRole {
Owner,
Admin,
Member,
Viewer,
}
#[derive(Debug, FromRow)]
pub struct OrgMember {
pub user_id: Uuid,
pub org_id: Uuid,
pub role: OrgRole,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}

View File

@ -1,2 +1,3 @@
pub mod organization_repository;
pub mod refresh_token_repository;
pub mod user_repository;

View File

@ -0,0 +1,44 @@
use sqlx::{Executor, Postgres};
use uuid::Uuid;
use crate::{db::model::organization::Organization, errors::ApiError};
pub async fn create_organization<'e, E>(
executor: E,
name: String,
slug: String,
) -> Result<Organization, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let org = sqlx::query_as!(
Organization,
"insert into organizations (name, slug) values ($1, $2) returning *",
name,
slug
)
.fetch_one(executor)
.await
.map_err(ApiError::from)?;
Ok(org)
}
pub async fn get_organizations_by_id_list<'e, E>(
executor: E,
ids: &[Uuid],
) -> Result<Vec<Organization>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let org = sqlx::query_as!(
Organization,
"select * from organizations where id = any($1)",
ids
)
.fetch_all(executor)
.await
.map_err(ApiError::from)?;
Ok(org)
}

View File

@ -6,14 +6,14 @@ use sqlx::{
},
};
use crate::{db::model::refresh_token::RefreshToken, errors::AppError};
use crate::{db::model::refresh_token::RefreshToken, errors::ApiError};
pub async fn create_refresh_token<'e, E>(
executor: E,
user_id: Uuid,
token_hash: String,
expires_at: DateTime<Utc>,
) -> Result<RefreshToken, AppError>
) -> Result<RefreshToken, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
@ -25,13 +25,13 @@ where
expires_at
).fetch_one(executor)
.await
.map_err(AppError::from)
.map_err(ApiError::from)
}
pub async fn find_by_hash<'e, E>(
executor: E,
token_hash: &str,
) -> Result<Option<RefreshToken>, AppError>
) -> Result<Option<RefreshToken>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
@ -42,9 +42,9 @@ where
)
.fetch_optional(executor)
.await
.map_err(AppError::from)
.map_err(ApiError::from)
}
pub async fn revoke<'e, E>(executor: E, id: Uuid) -> Result<(), AppError>
pub async fn revoke<'e, E>(executor: E, id: Uuid) -> Result<(), ApiError>
where
E: Executor<'e, Database = Postgres>,
{
@ -54,6 +54,23 @@ where
)
.execute(executor)
.await
.map_err(AppError::from)?;
.map_err(ApiError::from)?;
Ok(())
}
pub async fn revoke_all_for_user<'e, E>(
executor: E,
user_id: Uuid,
) -> Result<(), ApiError>
where
E: Executor<'e, Database = Postgres>,
{
sqlx::query!(
"update refresh_tokens set revoked_at = now() where user_id = $1 and revoked_at is null",
user_id
)
.execute(executor)
.await
.map_err(ApiError::from)?;
Ok(())
}

View File

@ -1,12 +1,12 @@
use sqlx::{Executor, PgPool, Postgres, types::Uuid};
use sqlx::{Executor, Postgres, types::Uuid};
use crate::{db::model::user::User, errors::AppError};
use crate::{db::model::user::User, errors::ApiError};
pub async fn create_user<'e, E>(
executor: E,
email: String,
password: String,
) -> Result<User, AppError>
) -> Result<User, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
@ -18,36 +18,31 @@ where
)
.fetch_one(executor)
.await
.map_err(AppError::from)?;
.map_err(ApiError::from)?;
Ok(user)
}
/*
*And these two call patterns both work:
- Pool: user_repo::create_user(&state.db, ...) E = &'static PgPool
- Transaction: user_repo::create_user(&mut *tx, ...) E = &mut PgConnection
*/
pub async fn get_user_by_email<'e, E>(executor: E, email: &str) -> Result<Option<User>, AppError>
pub async fn get_user_by_email<'e, E>(executor: E, email: &str) -> Result<Option<User>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let user = sqlx::query_as!(User, "select * from users where email=$1", email)
.fetch_optional(executor)
.await
.map_err(AppError::from)?;
.map_err(ApiError::from)?;
Ok(user)
}
pub async fn get_user_by_id<'e, E>(executor: E, id: Uuid) -> Result<Option<User>, AppError>
pub async fn get_user_by_id<'e, E>(executor: E, id: Uuid) -> Result<Option<User>, ApiError>
where
E: Executor<'e, Database = Postgres>,
{
let user = sqlx::query_as!(User, "select * from users where id=$1", id)
.fetch_optional(executor)
.await
.map_err(AppError::from)?;
.map_err(ApiError::from)?;
Ok(user)
}

View File

@ -6,50 +6,65 @@ use axum::{
use thiserror::Error;
#[derive(Debug, Error)]
pub enum AppError {
pub enum StartupError {
#[error("Failed to load configuration: {0}")]
Config(#[from] std::env::VarError),
#[error("Invalid configuration value: {0}")]
InvalidConfig(String),
#[error("Failed to connect to database")]
#[error("Failed to connect to database: {0}")]
DbConnect(#[from] sqlx::Error),
#[error("Failed to bind to address")]
#[error("Failed to bind to address: {0}")]
Bind(#[from] std::io::Error),
#[error("Invalid credentials")]
InvalidCredentials,
#[error("Validation error: {0}")]
Validation(String),
#[error("Internal server error")]
Internal,
#[error("Migration error: {0}")]
Migration(#[from] sqlx::migrate::MigrateError),
}
#[derive(Debug, Error)]
#[error("Application error: {0}")]
pub struct MainError(pub AppError);
pub enum ApiError {
#[error("Failed to connect to database: {0}")]
Database(#[from] sqlx::Error),
impl From<AppError> for MainError {
fn from(err: AppError) -> Self {
Self(err)
}
#[error("Invalid credentials")]
InvalidCredentials,
#[error("Validation error: {0}")]
Validation(String),
#[error("Internal server error")]
Internal,
#[error("Request not authorized")]
Unauthorized,
#[error("Not Found")]
NotFound,
}
impl IntoResponse for AppError {
impl IntoResponse for ApiError {
fn into_response(self) -> Response {
let (status, message) = match &self {
AppError::InvalidCredentials => (StatusCode::UNAUTHORIZED, self.to_string()),
AppError::Validation(_) => (StatusCode::BAD_REQUEST, self.to_string()),
AppError::DbConnect(_) | AppError::Bind(_) | AppError::Internal => (
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
),
AppError::Config(_) | AppError::InvalidConfig(_) => (
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
),
ApiError::InvalidCredentials => (StatusCode::UNAUTHORIZED, self.to_string()),
ApiError::Validation(_) => (StatusCode::BAD_REQUEST, self.to_string()),
ApiError::Unauthorized => (StatusCode::UNAUTHORIZED, self.to_string()),
ApiError::NotFound => (StatusCode::NOT_FOUND, self.to_string()),
ApiError::Database(err) => {
tracing::error!("Database error: {}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
)
}
ApiError::Internal => {
tracing::error!("Internal server error");
(
StatusCode::INTERNAL_SERVER_ERROR,
"Internal server error".to_string(),
)
}
};
(status, Json(serde_json::json!({ "error": message }))).into_response()
}

10
src/lib.rs Normal file
View File

@ -0,0 +1,10 @@
pub mod config;
pub mod controller;
pub mod database;
pub mod db;
pub mod errors;
pub mod logging;
pub mod server;
pub mod service;
pub mod state;
pub mod utils;

View File

@ -1,17 +1,7 @@
mod config;
mod controller;
mod database;
mod db;
mod errors;
mod logging;
mod server;
mod service;
mod state;
mod utils;
use errors::MainError;
use rhythm_backend::{config, database, errors::StartupError, logging, server};
#[tokio::main]
async fn main() -> Result<(), MainError> {
async fn main() -> Result<(), StartupError> {
let cfg = config::Config::load()?;
let _logging_guard = logging::LoggerConfig::init(cfg.app_env);
let db = database::init(&cfg.db_url).await?;

View File

@ -1,24 +1,27 @@
use axum::Router;
use sqlx::PgPool;
use crate::{config, controller, errors::AppError, logging, state::AppState};
use crate::{config, controller, errors::StartupError, logging, state::AppState};
pub async fn init(cfg: &config::Config, db: PgPool) -> Result<(), AppError> {
pub async fn init(cfg: &config::Config, db: PgPool) -> Result<(), StartupError> {
let state = AppState {
db,
jwt_secret: cfg.jwt_secret.clone(),
rate_limit: std::sync::Arc::new(dashmap::DashMap::new()),
};
let app = Router::new().merge(controller::router()).with_state(state);
let app = Router::new()
.merge(controller::router(state.clone()))
.with_state(state);
let listener = tokio::net::TcpListener::bind(&cfg.socket_address)
.await
.map_err(AppError::Bind)?;
let listener = tokio::net::TcpListener::bind(&cfg.socket_address).await?;
tracing::info!("Server started on {}", cfg.socket_address);
axum::serve(listener, app)
.with_graceful_shutdown(logging::shutdown_signal())
.await
.map_err(AppError::Bind)?;
axum::serve(
listener,
app.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(logging::shutdown_signal())
.await?;
Ok(())
}

View File

@ -1,35 +1,72 @@
use std::time::Instant;
use axum::Json;
use chrono::{Duration, Utc};
use chrono::Duration;
use tower_cookies::cookie::SameSite;
use tower_cookies::{Cookie, Cookies};
use crate::controller::model::auth_model::*;
use crate::db::repository::refresh_token_repository::create_refresh_token;
use crate::db::repository::refresh_token_repository::{create_refresh_token, find_by_hash, revoke};
use crate::db::repository::user_repository;
use crate::errors::AppError;
use crate::errors::ApiError;
use crate::state::AppState;
use crate::utils::anti_enumeration::anti_enumeration_delay;
use crate::utils::hash;
use crate::utils::jwt::generate_access_token;
use crate::utils::refresh_token::generate_refresh_token;
use crate::utils::refresh_token::{generate_refresh_token, hash_refresh_token};
pub async fn login(state: &AppState, req: LoginRequest) -> Result<Json<AuthResponse>, AppError> {
todo!()
pub async fn login(
state: &AppState,
cookies: Cookies,
req: LoginRequest,
) -> Result<Json<AuthResponse>, ApiError> {
let mut tx = state.db.begin().await?;
let user = user_repository::get_user_by_email(&mut *tx, &req.email)
.await?
.ok_or_else(|| {
tracing::warn!(email = %req.email, "Login failed: user not found");
ApiError::InvalidCredentials
})?;
if !hash::verify(&req.password, &user.password)? {
tracing::warn!(email = %req.email, "Login failed: invalid password");
return Err(ApiError::InvalidCredentials);
}
let access_token = generate_access_token(user.id, &state.jwt_secret)?;
let (refresh_plain, refresh_hash) = generate_refresh_token();
let expires_at = chrono::Utc::now() + Duration::days(7);
create_refresh_token(&mut *tx, user.id, refresh_hash, expires_at).await?;
tx.commit().await?;
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
pub async fn register(
state: &AppState,
cookies: Cookies,
req: RegisterRequest,
) -> Result<Json<AuthResponse>, AppError> {
let start = Instant::now();
let mut tx = state.db.begin().await?;
{
let user = user_repository::get_user_by_email(&mut *tx, &req.email).await?;
if user.is_some() {
// user already registered
anti_enumeration_delay(start, 150, 300).await;
return Err(AppError::Internal);
}
) -> Result<Json<AuthResponse>, ApiError> {
let estimate = zxcvbn::zxcvbn(&req.password, &[]);
if (estimate.score() as u8) < 3 {
tracing::warn!(email = %req.email, score = ?estimate.score(), "Registration failed: password too weak");
return Err(ApiError::Validation(
"Password is too weak. Please use a more complex password.".to_string(),
));
}
let mut tx = state.db.begin().await?;
if user_repository::get_user_by_email(&mut *tx, &req.email)
.await?
.is_some()
{
tracing::warn!(email = %req.email, "Registration failed: email already exists");
return Err(ApiError::Validation("bad request".to_string()));
}
let h = hash::hash(&req.password)?;
let user = user_repository::create_user(&mut *tx, req.email, h).await?;
let access_token = generate_access_token(user.id, &state.jwt_secret)?;
@ -39,10 +76,105 @@ pub async fn register(
create_refresh_token(&mut *tx, user.id, refresh_hash, expires_at).await?;
tx.commit().await?;
anti_enumeration_delay(start, 150, 300).await;
// TODO: put refresh token in cookie
Ok(Json(AuthResponse {
access_token: access_token,
refresh_token: refresh_plain,
}))
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
pub async fn refresh(state: &AppState, cookies: Cookies) -> Result<Json<AuthResponse>, ApiError> {
let refresh_token = get_refresh_cookie(&cookies).ok_or(ApiError::InvalidCredentials)?;
let mut tx = state.db.begin().await?;
let hash = hash_refresh_token(&refresh_token);
let token_data = find_by_hash(&mut *tx, &hash)
.await?
.ok_or(ApiError::InvalidCredentials)?;
if token_data.revoked_at.is_some() || token_data.expires_at < chrono::Utc::now() {
return Err(ApiError::InvalidCredentials);
}
revoke(&mut *tx, token_data.id).await?;
let access_token = generate_access_token(token_data.user_id, &state.jwt_secret)?;
let (refresh_plain, refresh_hash) = generate_refresh_token();
let expires_at = chrono::Utc::now() + Duration::days(7);
create_refresh_token(&mut *tx, token_data.user_id, refresh_hash, expires_at).await?;
tx.commit().await?;
set_refresh_cookie(&cookies, &refresh_plain);
Ok(Json(AuthResponse { access_token }))
}
use crate::db::repository::refresh_token_repository::revoke_all_for_user;
pub async fn logout(state: &AppState, cookies: Cookies) -> Result<(), ApiError> {
let refresh_token = match get_refresh_cookie(&cookies) {
Some(t) => t,
None => return Ok(()), // Already logged out
};
let mut tx = state.db.begin().await?;
let hash = hash_refresh_token(&refresh_token);
if let Some(token_data) = find_by_hash(&mut *tx, &hash).await? {
revoke(&mut *tx, token_data.id).await?;
}
tx.commit().await?;
remove_refresh_cookie(&cookies);
Ok(())
}
pub async fn logout_all(
state: &AppState,
cookies: Cookies,
user_id: uuid::Uuid,
) -> Result<(), ApiError> {
let mut tx = state.db.begin().await?;
revoke_all_for_user(&mut *tx, user_id).await?;
tx.commit().await?;
remove_refresh_cookie(&cookies);
Ok(())
}
const REFRESH_COOKIE_NAME: &str = "refresh_token";
fn remove_refresh_cookie(cookies: &Cookies) {
let cookie = Cookie::build((REFRESH_COOKIE_NAME, ""))
.path("/")
.max_age(time::Duration::ZERO) // Expire immediately
.build();
cookies.remove(cookie);
}
fn set_refresh_cookie(cookies: &Cookies, token: &str) {
let cookie = Cookie::build((REFRESH_COOKIE_NAME, token.to_owned()))
.http_only(true)
.secure(true)
.same_site(SameSite::Strict)
.path("/")
.max_age(time::Duration::days(7))
.build();
cookies.add(cookie);
}
fn get_refresh_cookie(cookies: &Cookies) -> Option<String> {
cookies
.get(REFRESH_COOKIE_NAME)
.map(|c| c.value().to_string())
}

View File

@ -1,7 +1,55 @@
use axum::extract::FromRef;
use dashmap::DashMap;
use sqlx::PgPool;
use std::sync::Arc;
use std::time::Instant;
#[derive(Clone)]
pub struct AppState {
pub db: PgPool,
pub jwt_secret: String,
pub rate_limit: Arc<DashMap<String, TokenBucket>>,
}
impl FromRef<AppState> for PgPool {
fn from_ref(state: &AppState) -> Self {
state.db.clone()
}
}
pub struct TokenBucket {
pub tokens: f64,
pub last_refill: Instant,
}
// --- Rate Limiting Configuration ---
const REQUESTS_PER_MINUTE: f64 = 5.0;
const BUCKET_CAPACITY: f64 = 5.0;
impl TokenBucket {
pub fn new() -> Self {
Self {
tokens: BUCKET_CAPACITY,
last_refill: Instant::now(),
}
}
fn refill(&mut self) {
let now = Instant::now();
let elapsed = now.duration_since(self.last_refill).as_secs_f64();
let tokens_per_second = REQUESTS_PER_MINUTE / 60.0;
self.tokens = (self.tokens + elapsed * tokens_per_second).min(BUCKET_CAPACITY);
self.last_refill = now;
}
pub fn try_drain(&mut self) -> bool {
self.refill();
if self.tokens >= 1.0 {
self.tokens -= 1.0;
true
} else {
false
}
}
}

View File

@ -1,21 +0,0 @@
use std::time::{Duration, Instant};
use rand::RngExt;
use tokio::time::sleep;
/// Anti-enumeration: ensures consistent response timing regardless of outcome.
/// Call at the end of request handler, before returning.
/// Range: 150-350ms (configurable)
///
/// # Arguments
/// * `start` - The Instant when request processing began
/// * `min_ms` - Minimum delay in milliseconds (default: 150)
/// * `max_ms` - Maximum delay in milliseconds (default: 350)
pub async fn anti_enumeration_delay(start: Instant, min_ms: u64, max_ms: u64) {
let target = min_ms + rand::rng().random::<u64>() % (max_ms - min_ms);
let target_duration = Duration::from_millis(target);
if let Some(remaining) = target_duration.checked_sub(start.elapsed()) {
sleep(remaining).await;
}
}

View File

@ -3,26 +3,29 @@ use argon2::{
password_hash::{SaltString, rand_core::OsRng},
};
use crate::errors::AppError;
use crate::errors::ApiError;
pub fn hash(text: &str) -> Result<String, AppError> {
pub fn hash(text: &str) -> Result<String, ApiError> {
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
let res = argon2
let password_hash = argon2
.hash_password(text.as_bytes(), &salt)
.map_err(|e| AppError::InvalidConfig(format!("Invalid hash {}", e)))?;
.map_err(|e| {
tracing::error!("Hash error: {}", e);
ApiError::Internal
})?;
Ok(res.to_string())
Ok(password_hash.to_string())
}
pub fn verify(text: &str, hash: &str) -> Result<bool, AppError> {
pub fn verify(text: &str, hash: &str) -> Result<bool, ApiError> {
let parsed_hash = PasswordHash::new(hash)
.map_err(|e| AppError::InvalidConfig(format!("Invalid hash {}", e)))?;
.map_err(|e| {
tracing::error!("Hash parsing error: {}", e);
ApiError::Internal
})?;
let argon2 = Argon2::default();
let res = argon2
.verify_password(text.as_bytes(), &parsed_hash)
.is_ok();
Ok(res)
Ok(argon2.verify_password(text.as_bytes(), &parsed_hash).is_ok())
}

View File

@ -3,7 +3,7 @@ use jsonwebtoken::{EncodingKey, Header, encode};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::errors::AppError;
use crate::errors::ApiError;
#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
@ -13,7 +13,7 @@ pub struct Claims {
pub jti: Uuid,
}
pub fn generate_access_token(user_id: Uuid, secret: &str) -> Result<String, AppError> {
pub fn generate_access_token(user_id: Uuid, secret: &str) -> Result<String, ApiError> {
let now = Utc::now();
let expires_at = now + Duration::minutes(15);
@ -29,5 +29,17 @@ pub fn generate_access_token(user_id: Uuid, secret: &str) -> Result<String, AppE
&claims,
&EncodingKey::from_secret(secret.as_bytes()),
)
.map_err(|_| AppError::Internal)
.map_err(|_| ApiError::Internal)
}
pub fn verify_access_token(token: &str, secret: &str) -> Result<Claims, ApiError> {
let mut validation = jsonwebtoken::Validation::default();
validation.validate_exp = true; // Ensure expired tokens are rejected
jsonwebtoken::decode::<Claims>(
token,
&jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()),
&validation,
)
.map(|data| data.claims)
.map_err(|_| ApiError::Unauthorized) // Map any JWT error to 401
}

View File

@ -1,4 +1,3 @@
pub mod anti_enumeration;
pub mod hash;
pub mod jwt;
pub mod refresh_token;

View File

@ -6,13 +6,15 @@ pub fn generate_refresh_token() -> (String, String) {
let mut thread_rng = rand::rng();
thread_rng.fill_bytes(&mut bytes);
let plain = hex::encode(&bytes); // 64 hex chars for user
let hash = {
// SHA-256 for DB storage
let mut hasher = Sha256::new();
hasher.update(&plain);
hex::encode(hasher.finalize())
};
let plain = hex::encode(bytes); // 64 hex chars for user
let hash = hash_refresh_token(&plain);
(plain, hash)
}
pub fn hash_refresh_token(plain: &str) -> String {
// SHA-256 for DB storage
let mut hasher = Sha256::new();
hasher.update(&plain);
hex::encode(hasher.finalize())
}

3
tests/auth/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod registration_login;
pub mod security;
pub mod session;

View File

@ -0,0 +1,34 @@
use crate::common::{setup_app, spawn_server};
#[tokio::test]
async fn test_register_and_login() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
let email = format!("user_{}@test.com", uuid::Uuid::new_v4());
// Register
let resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success(), "Register failed: {}", resp.text().await.unwrap_or_default());
let body: serde_json::Value = resp.json().await.unwrap();
assert!(body["access_token"].is_string());
// Login success
let resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success());
// Login failure
let resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({"email": email, "password": "WrongPassword"}))
.send().await.unwrap();
assert_eq!(resp.status(), 401);
}

83
tests/auth/security.rs Normal file
View File

@ -0,0 +1,83 @@
use crate::common::{setup_app, spawn_server};
use std::time::{Duration, Instant};
#[tokio::test]
async fn test_rate_limiting_blocks_spam() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// Send 6 requests CONCURRENTLY so they arrive nearly simultaneously
// Capacity is 5, so exactly 5 should pass and 1 should fail.
let mut handles = Vec::new();
for _ in 0..6 {
let c = client.clone();
let url = format!("{}/api/v1/auth/login", base_url);
handles.push(tokio::spawn(async move {
c.post(&url)
.header("x-client-ip", "1.2.3.4")
.json(&serde_json::json!({"email": "a@test.com", "password": "b"}))
.send().await.unwrap()
.status().as_u16()
}));
}
let statuses: Vec<u16> = futures_util::future::join_all(handles).await
.into_iter().map(|r| r.unwrap()).collect();
let successes = statuses.iter().filter(|&&s| s != 429).count();
let blocked = statuses.iter().filter(|&&s| s == 429).count();
assert!(successes <= 5, "At most 5 rapid requests should be allowed, got {} passing", successes);
assert!(blocked >= 1, "At least 1 request should be rate-limited (429)");
// Different IP should still work
let ok = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "5.6.7.8")
.json(&serde_json::json!({"email": "a@test.com", "password": "b"}))
.send().await.unwrap();
assert_ne!(ok.status(), 429, "Different IP should not be rate-limited");
}
#[tokio::test]
async fn test_anti_enumeration_timing() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
let start = Instant::now();
let _ = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "1.1.1.1") // Different IP to avoid rate limits from previous requests in other tests if run concurrently
.json(&serde_json::json!({"email": "ghost_not_real@test.com", "password": "irrelevant"}))
.send().await.unwrap();
let duration_nonexistent = start.elapsed();
let email = format!("timing_{}@test.com", uuid::Uuid::new_v4());
let resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.header("x-client-ip", "2.2.2.2")
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
assert!(resp.status().is_success());
let start = Instant::now();
let _ = client
.post(format!("{}/api/v1/auth/login", base_url))
.header("x-client-ip", "3.3.3.3")
.json(&serde_json::json!({"email": email, "password": "WrongPassword123!"}))
.send().await.unwrap();
let duration_existent = start.elapsed();
// Anti-enumeration middleware ensures BOTH take >= 150ms
assert!(duration_nonexistent >= Duration::from_millis(150), "Fast path should be padded to >= 150ms");
assert!(duration_existent >= Duration::from_millis(150), "Slow path should be padded to >= 150ms");
let diff = if duration_nonexistent > duration_existent {
duration_nonexistent - duration_existent
} else {
duration_existent - duration_nonexistent
};
assert!(diff < Duration::from_millis(300),
"Timing difference should be <300ms because both paths are padded by random delay: got {:?}", diff);
}

62
tests/auth/session.rs Normal file
View File

@ -0,0 +1,62 @@
use crate::common::{setup_app, spawn_server};
#[tokio::test]
async fn test_protected_route_requires_auth() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// No token → 401
let resp = client
.get(format!("{}/api/v1/protected/ping", base_url))
.send().await.unwrap();
assert_eq!(resp.status(), 401, "Protected route should require auth");
// With token → 200
let email = format!("protected_{}@test.com", uuid::Uuid::new_v4());
let reg = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
let token: serde_json::Value = reg.json().await.unwrap();
let resp = client
.get(format!("{}/api/v1/protected/ping", base_url))
.bearer_auth(token["access_token"].as_str().unwrap())
.send().await.unwrap();
assert_eq!(resp.status(), 200, "Protected route should succeed with valid token");
}
#[tokio::test]
async fn test_refresh_and_logout_all() {
let (app, _db) = setup_app().await;
let (base_url, client) = spawn_server(app).await;
// Register + login to get a valid session
let email = format!("refresh_{}@test.com", uuid::Uuid::new_v4());
let reg = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({"email": email, "password": "SuperSecureP@ssw0rd2024!"}))
.send().await.unwrap();
let _token: serde_json::Value = reg.json().await.unwrap();
// Refresh should work
let refreshed = client
.post(format!("{}/api/v1/auth/refresh", base_url))
.send().await.unwrap();
assert!(refreshed.status().is_success(), "Refresh should succeed with cookie");
let new_token: serde_json::Value = refreshed.json().await.unwrap();
assert!(new_token["access_token"].is_string());
// Logout all
let resp = client
.post(format!("{}/api/v1/protected/auth/logout-all", base_url))
.bearer_auth(new_token["access_token"].as_str().unwrap())
.send().await.unwrap();
assert!(resp.status().is_success(), "logout-all should succeed");
// After logout-all, refresh should fail
let fail = client
.post(format!("{}/api/v1/auth/refresh", base_url))
.send().await.unwrap();
assert_eq!(fail.status(), 401, "Refresh should fail after logout-all");
}

2
tests/auth_tests.rs Normal file
View File

@ -0,0 +1,2 @@
mod common;
mod auth;

103
tests/basic_test.rs Normal file
View File

@ -0,0 +1,103 @@
use reqwest::Client;
use rhythm_backend::{
controller, state::AppState,
};
use axum::Router;
use sqlx::PgPool;
use std::sync::Arc;
use dashmap::DashMap;
async fn setup_app() -> (Router, PgPool) {
let db_url = "postgres://user:password@localhost:5432/rhythm-dev?sslmode=disable";
let db = PgPool::connect(db_url)
.await
.expect("Failed to connect to Postgres at localhost:5432");
// Run migrations
sqlx::migrate!("./migrations")
.run(&db)
.await
.expect("Failed to run migrations");
let state = AppState {
db: db.clone(),
jwt_secret: "test-secret-key-12345678901234567890".to_string(),
rate_limit: Arc::new(DashMap::new()),
};
let app = controller::router(state.clone()).with_state(state);
(app, db)
}
#[tokio::test]
async fn test_register_and_login() {
let (app, _db) = setup_app().await;
// Start the server on a random port
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
tokio::spawn(async move {
axum::serve(listener, app.into_make_service()).await.unwrap();
});
// Give server a moment to start
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
let base_url = format!("http://127.0.0.1:{}", port);
let client = Client::new();
// Use a unique email so we don't conflict with previous test runs
let unique_email = format!("testuser_{}@test.com", uuid::Uuid::new_v4());
// Test 1: Register
let reg_resp = client
.post(format!("{}/api/v1/auth/register", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "SuperSecureP@ssw0rd2024!"
}))
.send()
.await
.unwrap();
println!("Register status: {}", reg_resp.status());
assert!(
reg_resp.status().is_success(),
"Register should succeed: {}",
reg_resp.text().await.unwrap_or_default()
);
// Test 2: Login with correct password
let login_resp = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "SuperSecureP@ssw0rd2024!"
}))
.send()
.await
.unwrap();
println!("Login status: {}", login_resp.status());
assert!(login_resp.status().is_success(), "Login should succeed with correct password: {}", login_resp.text().await.unwrap_or_default());
let body: serde_json::Value = login_resp.json().await.unwrap();
assert!(body.get("access_token").is_some(), "Login response should have access_token");
// Test 3: Login with wrong password
let bad_login = client
.post(format!("{}/api/v1/auth/login", base_url))
.json(&serde_json::json!({
"email": unique_email,
"password": "WrongPassword1!"
}))
.send()
.await
.unwrap();
println!("Bad login status: {}", bad_login.status());
assert_eq!(bad_login.status(), 401, "Login with wrong password should return 401");
}

48
tests/common/mod.rs Normal file
View File

@ -0,0 +1,48 @@
use reqwest::Client;
use std::time::Duration;
use rhythm_backend::{controller, state::AppState};
use axum::Router;
use sqlx::PgPool;
use std::sync::Arc;
use dashmap::DashMap;
pub async fn setup_app() -> (Router, PgPool) {
let db_url = "postgres://user:password@localhost:5432/rhythm-dev?sslmode=disable";
let db = PgPool::connect(db_url)
.await
.expect("Failed to connect to Postgres at localhost:5432");
sqlx::migrate!("./migrations")
.run(&db)
.await
.expect("Failed to run migrations");
let state = AppState {
db: db.clone(),
jwt_secret: "test-secret-key-12345678901234567890".to_string(),
rate_limit: Arc::new(DashMap::new()),
};
let app = controller::router(state.clone()).with_state(state);
(app, db)
}
pub async fn spawn_server(app: axum::Router) -> (String, Client) {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let port = listener.local_addr().unwrap().port();
tokio::spawn(async move {
axum::serve(listener, app.into_make_service()).await.unwrap();
});
tokio::time::sleep(Duration::from_millis(100)).await;
let base_url = format!("http://127.0.0.1:{}", port);
let client = Client::builder()
.cookie_store(true)
.build()
.unwrap();
(base_url, client)
}

19
todo.md Normal file
View File

@ -0,0 +1,19 @@
# Project Roadmap TODO
## 1. Organizations & Roles (Next Up)
- [ ] Create `0004_create_org_memberships_table.sql` with `org_role` ENUM (owner, admin, member, viewer).
- [ ] Implement `src/db/repository/organization_repository.rs` with `create_org_with_owner` (using `&mut sqlx::Transaction`).
- [ ] Add slug generation utility (name -> `slug-a7x9`).
- [ ] Build `POST /api/v1/orgs` and `GET /api/v1/orgs` endpoints using the new `CurrentUser` and `ValidJson` extractors.
## 2. Projects Layer
- [ ] Create projects table (belongs to `org_id`).
- [ ] Create `project_memberships` table for specific project access (inherits downward from Org roles).
- [ ] CRUD endpoints for projects nested under `/api/v1/orgs/{org_slug}/projects`.
## 3. Core Issue Tracking
- [ ] Create issues table (belongs to `project_id`).
- [ ] Allow Projects to define their own custom workflow stages (e.g., Todo, In Progress, QA, Done).
## 4. Power-User Features
- [ ] Build "Agglomeration Views": Allow creating Org-level Kanban boards that span multiple projects and map project-specific stages to unified columns.