- update: tell consul to use docker dns to resolve CNAME addresses
- add: load balancer for consul services - update: dns lookup to now return the service address - update: docker consul to the latest version
This commit is contained in:
@@ -3,3 +3,4 @@ pub mod null_string;
|
||||
pub mod redis_cache;
|
||||
pub mod service_discovery;
|
||||
pub mod signal_handler;
|
||||
pub mod multi_service_load_balancer;
|
||||
|
||||
135
utils/src/multi_service_load_balancer.rs
Normal file
135
utils/src/multi_service_load_balancer.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use rand::seq::SliceRandom;
|
||||
use crate::service_discovery::get_service_endpoints_by_dns;
|
||||
|
||||
pub enum LoadBalancingStrategy {
|
||||
Random,
|
||||
RoundRobin,
|
||||
}
|
||||
|
||||
// Service identifier
|
||||
#[derive(Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ServiceId {
|
||||
pub name: String,
|
||||
pub protocol: String,
|
||||
}
|
||||
|
||||
impl ServiceId {
|
||||
pub fn new(name: &str, protocol: &str) -> Self {
|
||||
ServiceId {
|
||||
name: name.to_string(),
|
||||
protocol: protocol.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Per-service state
|
||||
struct ServiceState {
|
||||
endpoints: Vec<SocketAddr>,
|
||||
current_index: usize,
|
||||
}
|
||||
|
||||
impl ServiceState {
|
||||
fn new(endpoints: Vec<SocketAddr>) -> Self {
|
||||
ServiceState {
|
||||
endpoints,
|
||||
current_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_endpoint(&mut self, strategy: &LoadBalancingStrategy) -> Option<SocketAddr> {
|
||||
if self.endpoints.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
match strategy {
|
||||
LoadBalancingStrategy::Random => {
|
||||
let mut rng = rand::thread_rng();
|
||||
self.endpoints.choose(&mut rng).copied()
|
||||
}
|
||||
LoadBalancingStrategy::RoundRobin => {
|
||||
let endpoint = self.endpoints[self.current_index].clone();
|
||||
self.current_index = (self.current_index + 1) % self.endpoints.len();
|
||||
Some(endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MultiServiceLoadBalancer {
|
||||
consul_url: String,
|
||||
strategy: LoadBalancingStrategy,
|
||||
services: Arc<Mutex<HashMap<ServiceId, ServiceState>>>,
|
||||
}
|
||||
|
||||
impl MultiServiceLoadBalancer {
|
||||
pub fn new(consul_url: &str, strategy: LoadBalancingStrategy) -> Self {
|
||||
MultiServiceLoadBalancer {
|
||||
consul_url: consul_url.to_string(),
|
||||
strategy,
|
||||
services: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_endpoint(
|
||||
&self,
|
||||
service_name: &str,
|
||||
service_protocol: &str,
|
||||
) -> Result<Option<SocketAddr>, Box<dyn std::error::Error>> {
|
||||
let service_id = ServiceId::new(service_name, service_protocol);
|
||||
|
||||
// Try to get an endpoint from the cache first
|
||||
{
|
||||
let mut services = self.services.lock().unwrap();
|
||||
if let Some(service_state) = services.get_mut(&service_id) {
|
||||
if let Some(endpoint) = service_state.get_endpoint(&self.strategy) {
|
||||
return Ok(Some(endpoint));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have endpoints or they're all unavailable, refresh them
|
||||
self.refresh_service_endpoints(service_name, service_protocol).await?;
|
||||
|
||||
// Try again after refresh
|
||||
let mut services = self.services.lock().unwrap();
|
||||
if let Some(service_state) = services.get_mut(&service_id) {
|
||||
return Ok(service_state.get_endpoint(&self.strategy));
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn refresh_service_endpoints(
|
||||
&self,
|
||||
service_name: &str,
|
||||
service_protocol: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let endpoints = get_service_endpoints_by_dns(
|
||||
&self.consul_url,
|
||||
service_protocol,
|
||||
service_name,
|
||||
).await?;
|
||||
|
||||
let service_id = ServiceId::new(service_name, service_protocol);
|
||||
let mut services = self.services.lock().unwrap();
|
||||
|
||||
services.insert(service_id, ServiceState::new(endpoints));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn refresh_all_services(&self) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let service_ids: Vec<ServiceId> = {
|
||||
let services = self.services.lock().unwrap();
|
||||
services.keys().cloned().collect()
|
||||
};
|
||||
|
||||
for service_id in service_ids {
|
||||
self.refresh_service_endpoints(&service_id.name, &service_id.protocol).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ use hickory_resolver::{Resolver, TokioAsyncResolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use tokio::runtime::Runtime;
|
||||
use tracing::log::debug;
|
||||
|
||||
@@ -17,12 +18,12 @@ pub async fn get_service_endpoints_by_dns(consul_url: &str, service_protocol: &s
|
||||
let srv_record = resolver.srv_lookup(&srv_name).await?;
|
||||
|
||||
let mut endpoints = Vec::new();
|
||||
debug!("service records: {:?}", srv_record);
|
||||
for record in srv_record {
|
||||
let hostname = record.target();
|
||||
debug!("hostname: {:?}", hostname);
|
||||
|
||||
// endpoints.push(SocketAddr::new(, record.port()));
|
||||
let lookup_responses = resolver.lookup_ip(hostname.to_string()).await?;
|
||||
for response in lookup_responses {
|
||||
endpoints.push(SocketAddr::from_str(&format!("{}:{}", &response.to_string(), record.port()))?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(endpoints)
|
||||
|
||||
Reference in New Issue
Block a user