removed middlewares
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 12m56s
All checks were successful
Build and Push Docker Image / build-and-push (push) Successful in 12m56s
This commit is contained in:
parent
8996161cc9
commit
bc7866b4fb
@ -1,73 +0,0 @@
|
||||
use axum::{extract::Request, response::Response};
|
||||
use futures_util::future::BoxFuture;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::sleep;
|
||||
use tower::{Layer, Service};
|
||||
use rand::RngExt;
|
||||
|
||||
/// Middleware Layer that ensures every request takes a minimum amount of time.
|
||||
/// This prevents "Timing Attacks" where an attacker can determine if a user exists
|
||||
/// by observing how much faster the server responds when an email is not found
|
||||
/// versus when a password check (expensive hash) is performed.
|
||||
#[derive(Clone)]
|
||||
pub struct AntiEnumerationLayer {
|
||||
pub min_ms: u64,
|
||||
pub max_ms: u64,
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for AntiEnumerationLayer {
|
||||
type Service = AntiEnumerationService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
AntiEnumerationService {
|
||||
inner,
|
||||
min_ms: self.min_ms,
|
||||
max_ms: self.max_ms,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The Service implementation for AntiEnumeration.
|
||||
/// It wraps the inner service, records the start time, and sleeps if the
|
||||
/// inner service finishes too quickly.
|
||||
#[derive(Clone)]
|
||||
pub struct AntiEnumerationService<S> {
|
||||
inner: S,
|
||||
min_ms: u64,
|
||||
max_ms: u64,
|
||||
}
|
||||
|
||||
impl<S> Service<Request> for AntiEnumerationService<S>
|
||||
where
|
||||
S: Service<Request, Response = Response> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.inner.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
let start = Instant::now();
|
||||
let min = self.min_ms;
|
||||
let max = self.max_ms;
|
||||
let future = self.inner.call(req);
|
||||
|
||||
Box::pin(async move {
|
||||
let res = future.await?;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// Pick a random target within the window to make timing analysis even harder
|
||||
let target = Duration::from_millis(rand::rng().random_range(min..=max));
|
||||
|
||||
if elapsed < target {
|
||||
sleep(target - elapsed).await;
|
||||
}
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,97 +0,0 @@
|
||||
use std::task::{Context, Poll};
|
||||
use axum::{extract::Request, response::Response};
|
||||
use futures_util::future::BoxFuture;
|
||||
use tower::{Layer, Service};
|
||||
use std::time::Instant;
|
||||
use crate::state::AppState;
|
||||
use crate::errors::AppError;
|
||||
|
||||
/// Middleware Layer for Rate Limiting.
|
||||
/// It implements a Token Bucket algorithm to limit requests per IP.
|
||||
/// This prevents spam, brute-force attacks, and DoS on expensive endpoints.
|
||||
#[derive(Clone)]
|
||||
pub struct RateLimitLayer {
|
||||
pub state: AppState,
|
||||
pub max_tokens: f64,
|
||||
pub refill_rate: f64, // tokens per second
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for RateLimitLayer {
|
||||
type Service = RateLimitService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
RateLimitService {
|
||||
inner,
|
||||
state: self.state.clone(),
|
||||
max_tokens: self.max_tokens,
|
||||
refill_rate: self.refill_rate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The Service implementation for RateLimiting.
|
||||
/// It identifies the user via 'X-Client-IP' header (injected by the trusted proxy).
|
||||
#[derive(Clone)]
|
||||
pub struct RateLimitService<S> {
|
||||
inner: S,
|
||||
state: AppState,
|
||||
max_tokens: f64,
|
||||
refill_rate: f64,
|
||||
}
|
||||
|
||||
impl<S> Service<Request> for RateLimitService<S>
|
||||
where
|
||||
S: Service<Request, Response = Response> + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.inner.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
// 1. Extract IP from trusted header (X-Client-IP)
|
||||
// Note: In a production Docker setup, Nginx should be configured to set this.
|
||||
let client_ip = req.headers()
|
||||
.get("X-Client-IP")
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
let state = self.state.clone();
|
||||
let max_tokens = self.max_tokens;
|
||||
let refill_rate = self.refill_rate;
|
||||
|
||||
// 2. Access the shared DashMap for the IP's bucket
|
||||
let mut bucket = state.rate_limit.entry(client_ip).or_insert_with(|| crate::state::TokenBucket::new(max_tokens));
|
||||
|
||||
let now = Instant::now();
|
||||
let elapsed = now.duration_since(bucket.last_refill).as_secs_f64();
|
||||
|
||||
// 3. Token Bucket Refill logic: tokens = current + (time_passed * rate)
|
||||
bucket.tokens = (bucket.tokens + elapsed * refill_rate).min(max_tokens);
|
||||
bucket.last_refill = now;
|
||||
|
||||
// 4. Consumption check
|
||||
if bucket.tokens >= 1.0 {
|
||||
bucket.tokens -= 1.0;
|
||||
drop(bucket); // CRITICAL: Release the lock before proceeding to allow other threads to access the map
|
||||
let future = self.inner.call(req);
|
||||
Box::pin(async move {
|
||||
let res = future.await?;
|
||||
Ok(res)
|
||||
})
|
||||
} else {
|
||||
drop(bucket);
|
||||
// Limit exceeded: return 429 Too Many Requests
|
||||
Box::pin(async move {
|
||||
let err = AppError::Validation("Too many requests".to_string());
|
||||
use axum::response::IntoResponse;
|
||||
Ok(err.into_response())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,2 +0,0 @@
|
||||
pub mod AntiEnumerationLayer;
|
||||
pub mod RateLimitLayer;
|
||||
@ -3,7 +3,6 @@ use tower_http::trace::TraceLayer;
|
||||
|
||||
use crate::state::AppState;
|
||||
|
||||
mod middleware;
|
||||
pub mod model;
|
||||
mod v1;
|
||||
|
||||
|
||||
@ -3,11 +3,9 @@ use axum::{Json, Router, routing::post};
|
||||
use tower_cookies::{CookieManagerLayer, Cookies};
|
||||
|
||||
use crate::{
|
||||
controller::middleware::AntiEnumerationLayer::AntiEnumerationLayer,
|
||||
controller::middleware::RateLimitLayer::RateLimitLayer,
|
||||
controller::model::auth_model::{AuthResponse, LoginRequest, RegisterRequest},
|
||||
errors::AppError,
|
||||
service::auth_service::{login, register, refresh},
|
||||
service::auth_service::{login, refresh, register},
|
||||
state::AppState,
|
||||
};
|
||||
|
||||
@ -17,19 +15,9 @@ pub fn auth_router(state: AppState) -> Router<AppState> {
|
||||
.route("/register", post(register_handler))
|
||||
.route("/refresh", post(refresh_handler))
|
||||
.route("/logout", post(logout_handler))
|
||||
.layer(AntiEnumerationLayer {
|
||||
min_ms: 150,
|
||||
max_ms: 300,
|
||||
})
|
||||
.layer(RateLimitLayer {
|
||||
state,
|
||||
max_tokens: 5.0,
|
||||
refill_rate: 1.0 / 12.0, // 1 token every 12 seconds = 5 per minute
|
||||
})
|
||||
.layer(CookieManagerLayer::new())
|
||||
}
|
||||
|
||||
|
||||
async fn login_handler(
|
||||
State(s): State<AppState>,
|
||||
cookies: Cookies,
|
||||
@ -52,9 +40,6 @@ async fn refresh_handler(
|
||||
refresh(&s, cookies).await
|
||||
}
|
||||
|
||||
async fn logout_handler(
|
||||
State(s): State<AppState>,
|
||||
cookies: Cookies,
|
||||
) -> Result<(), AppError> {
|
||||
async fn logout_handler(State(s): State<AppState>, cookies: Cookies) -> Result<(), AppError> {
|
||||
crate::service::auth_service::logout(&s, cookies).await
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user