Compare commits
1 Commits
main
...
57e0db4950
| Author | SHA1 | Date | |
|---|---|---|---|
| 57e0db4950 |
4
Cargo.lock
generated
4
Cargo.lock
generated
@@ -654,9 +654,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.176"
|
||||
version = "0.2.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174"
|
||||
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,12 +0,0 @@
|
||||
# Build Stage
|
||||
FROM rust:latest AS builder
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN cargo build --release
|
||||
|
||||
# Runtime Stage
|
||||
FROM debian:stable-slim
|
||||
WORKDIR /app
|
||||
COPY --from=builder /app/target/release/handler .
|
||||
COPY --from=builder /app/target/release/skubelb .
|
||||
CMD ["./handler"]
|
||||
16
Makefile
16
Makefile
@@ -1,16 +0,0 @@
|
||||
MAJOR_VERSION = 0
|
||||
MINOR_VERSION = 0
|
||||
PATH_VERSION = 1
|
||||
|
||||
TAG = $(MAJOR_VERSION).$(MINOR_VERSION).$(PATH_VERSION)
|
||||
|
||||
build:
|
||||
docker build . -t skubelb-handler:$(TAG)
|
||||
docker tag skubelb-handler:$(TAG) us-west4-docker.pkg.dev/nixernetes/images/skubelb-handler:$(TAG)
|
||||
|
||||
kube:
|
||||
cat kubernetes.yaml.tmpl | sed 's/TAG/$(TAG)/' > kubernetes.yaml
|
||||
|
||||
deploy: build kube
|
||||
docker push us-west4-docker.pkg.dev/nixernetes/images/skubelb-handler:$(TAG)
|
||||
kubectl apply -f kubernetes.yaml
|
||||
32
README.md
32
README.md
@@ -144,35 +144,3 @@ spec:
|
||||
```
|
||||
|
||||
NOTE: you should need to make an entry in the firewall to allow this request through. It is very important that the firewall entry has a source filter; it should only be allowed from the Kubernetes cluster. Nginx will forward traffic to any host that registers, and this could easily become a MitM vulnerability.
|
||||
|
||||
## Other tips
|
||||
|
||||
### Use 'upstream' in nginx
|
||||
|
||||
Do this:
|
||||
|
||||
```
|
||||
upstream hosts {
|
||||
server 10.182.0.36:30004;
|
||||
server 10.182.0.39:30004;
|
||||
}
|
||||
server {
|
||||
server_name git.tipsy.codes tipsy.codes;
|
||||
|
||||
location / {
|
||||
proxy_pass http://hosts;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Rather than just writing out the IP in the proxy_pass.
|
||||
|
||||
### visudo to only allow the nginx reload command
|
||||
|
||||
Use `sudo visudo` to update the sudoers file and add this line:
|
||||
|
||||
```
|
||||
skubelb ALL=(root) NOPASSWD: /usr/bin/systemctl reload nginx
|
||||
```
|
||||
|
||||
This will prevent the user from running commands other than reload.
|
||||
@@ -1,41 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: skubelb
|
||||
namespace: skubelb
|
||||
labels:
|
||||
k8s-app: skubelb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: skubelb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: skubelb
|
||||
spec:
|
||||
tolerations:
|
||||
# these tolerations are to have the daemonset runnable on control plane nodes
|
||||
# remove them if your control plane nodes should not run pods
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: skubelb
|
||||
image: us-west4-docker.pkg.dev/nixernetes/images/skubelb-handler:0.0.1
|
||||
env:
|
||||
- name: NODE_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
command: ["sh", "-c", "./handler -s 10.128.0.2:8888 -l ${NODE_IP}"]
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
terminationGracePeriodSeconds: 30
|
||||
@@ -1,41 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: skubelb
|
||||
namespace: skubelb
|
||||
labels:
|
||||
k8s-app: skubelb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: skubelb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: skubelb
|
||||
spec:
|
||||
tolerations:
|
||||
# these tolerations are to have the daemonset runnable on control plane nodes
|
||||
# remove them if your control plane nodes should not run pods
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: skubelb
|
||||
image: us-west4-docker.pkg.dev/nixernetes/images/skubelb-handler:TAG
|
||||
env:
|
||||
- name: NODE_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
command: ["sh", "-c", "./handler -s 10.128.0.2:8888 -l ${NODE_IP}"]
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 100Mi
|
||||
terminationGracePeriodSeconds: 30
|
||||
78
src/main.rs
78
src/main.rs
@@ -1,26 +1,22 @@
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::thread;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::process::Command;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use skubelb::Rewriter;
|
||||
use skubelb::Server;
|
||||
|
||||
use anyhow::{Result, anyhow};
|
||||
use env_logger::Env;
|
||||
use log::{info, warn};
|
||||
use rouille::{Request, Response, router};
|
||||
use anyhow::Result;
|
||||
use rouille::{router, Request, Response};
|
||||
|
||||
/// Implements a HTTP server which allows clients to 'register'
|
||||
/// themselves. Their IP address will be used to replace a
|
||||
/// needle in a set of config files. This is intended to be
|
||||
/// used as a low-cost way of enabling Kubernetes ingress
|
||||
/// using nginx running on a machine that has a public port.
|
||||
///
|
||||
///
|
||||
/// The needle is expected to be a dummy IP address; something
|
||||
/// fairly unique. The goal is to replace nginx files, where
|
||||
/// we often repeat lines if we want nginx to load balance between
|
||||
@@ -41,7 +37,7 @@ struct Args {
|
||||
template_dir: String,
|
||||
|
||||
/// The symlink that should be updated each time the config changes.
|
||||
///
|
||||
///
|
||||
/// Symlinks are used because file updates are not atomic.
|
||||
#[arg(short, long)]
|
||||
config_symlink: String,
|
||||
@@ -65,31 +61,10 @@ fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
let rewriter = Rewriter::new(args.needle);
|
||||
let server_impl = Arc::new(Mutex::new(Server::new(
|
||||
rewriter,
|
||||
args.workspace_dir,
|
||||
args.template_dir,
|
||||
args.config_symlink,
|
||||
)));
|
||||
let server_impl = Mutex::new(Server::new(rewriter, args.workspace_dir, args.template_dir, args.config_symlink));
|
||||
let reload_command = args.reload_cmd.leak();
|
||||
let reload_command: Vec<&str> = reload_command.split_ascii_whitespace().collect();
|
||||
|
||||
// Start cleanup thread
|
||||
{
|
||||
let server_impl = server_impl.clone();
|
||||
thread::spawn(move || {
|
||||
loop {
|
||||
sleep(Duration::from_secs(30));
|
||||
match cleanup_worker(&server_impl) {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
warn!("Error cleaning up handlers {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
rouille::start_server(args.listen, move |request| {
|
||||
info!("Processing request: {:?}", request);
|
||||
match handle(request, &server_impl) {
|
||||
@@ -101,30 +76,19 @@ fn main() {
|
||||
match output {
|
||||
Ok(o) => {
|
||||
info!("Ran {:?}; exit code: {}", reload_command, o.status);
|
||||
info!(
|
||||
"Ran {:?}; stdout: {}",
|
||||
reload_command,
|
||||
String::from_utf8_lossy(&o.stdout)
|
||||
);
|
||||
info!(
|
||||
"Ran {:?}; stderr: {}",
|
||||
reload_command,
|
||||
String::from_utf8_lossy(&o.stderr)
|
||||
);
|
||||
}
|
||||
info!("Ran {:?}; stdout: {}", reload_command, String::from_utf8_lossy(&o.stdout));
|
||||
info!("Ran {:?}; stderr: {}", reload_command, String::from_utf8_lossy(&o.stderr));
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("Failed to run {:?}: {:?}", reload_command, e);
|
||||
}
|
||||
};
|
||||
}
|
||||
resp
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("{:?}", e);
|
||||
Response {
|
||||
status_code: 500,
|
||||
..Response::empty_400()
|
||||
}
|
||||
Response{status_code: 500, ..Response::empty_400()}
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -133,23 +97,13 @@ fn main() {
|
||||
fn handle(request: &Request, server_impl: &Mutex<Server>) -> Result<(Response, bool)> {
|
||||
router!(request,
|
||||
(POST) (/register/{ip: String}) => {
|
||||
let mut server_impl = server_impl.lock().map_err(|_| anyhow!("failed to acquire lock"))?;
|
||||
server_impl.register(request, &ip)?;
|
||||
let must_reload = server_impl.cleanup()?;
|
||||
Ok((Response{status_code: 200, ..Response::empty_204()}, must_reload))
|
||||
server_impl.lock().unwrap().register(request, &ip)?;
|
||||
Ok((Response{status_code: 200, ..Response::empty_204()}, true))
|
||||
},
|
||||
(DELETE) (/register/{ip: String}) => {
|
||||
let mut server_impl = server_impl.lock().map_err(|_| anyhow!("failed to acquire lock"))?;
|
||||
server_impl.unregister(request, &ip)?;
|
||||
let must_reload = server_impl.cleanup()?;
|
||||
Ok((Response{status_code: 200, ..Response::empty_204()}, must_reload))
|
||||
server_impl.lock().unwrap().unregister(request, &ip)?;
|
||||
Ok((Response{status_code: 200, ..Response::empty_204()}, true))
|
||||
},
|
||||
_ => Ok((Response::empty_404(), false)),
|
||||
)
|
||||
}
|
||||
|
||||
fn cleanup_worker(server_impl: &Mutex<Server>) -> Result<()> {
|
||||
let mut server_impl = server_impl.lock().map_err(|_| anyhow!("failed to acquire lock"))?;
|
||||
server_impl.cleanup()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
fs::{self, File},
|
||||
io::{BufReader, prelude::*},
|
||||
@@ -10,8 +9,6 @@ use std::{
|
||||
pub struct Rewriter {
|
||||
source: String,
|
||||
replacements: HashSet<String>,
|
||||
// When each replacement should be cleaned up
|
||||
replacement_cleanup: HashMap<String, Instant>,
|
||||
}
|
||||
|
||||
impl Rewriter {
|
||||
@@ -19,18 +16,15 @@ impl Rewriter {
|
||||
Self {
|
||||
source,
|
||||
replacements: HashSet::new(),
|
||||
replacement_cleanup: HashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_replacement(&mut self, replacement: String, cleanup: Instant) -> bool {
|
||||
self.replacement_cleanup.insert(replacement.clone(), cleanup);
|
||||
self.replacements.insert(replacement)
|
||||
pub fn add_replacement(&mut self, replacement: String) {
|
||||
self.replacements.insert(replacement);
|
||||
}
|
||||
|
||||
pub fn remove_replacement(&mut self, replacement: &str) -> bool {
|
||||
self.replacement_cleanup.remove(replacement);
|
||||
self.replacements.remove(replacement)
|
||||
pub fn remove_replacement(&mut self, replacement: &str) {
|
||||
self.replacements.remove(replacement);
|
||||
}
|
||||
|
||||
pub fn rewrite_folder(&self, src: &str, dst: &str) -> Result<()> {
|
||||
@@ -97,21 +91,6 @@ impl Rewriter {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cleanup(&mut self) -> bool {
|
||||
let now = Instant::now();
|
||||
let mut to_remove = vec![];
|
||||
for (name, when) in self.replacement_cleanup.iter() {
|
||||
if when < &now {
|
||||
to_remove.push(name.clone());
|
||||
}
|
||||
}
|
||||
let will_cleanup = to_remove.len() > 0;
|
||||
for name in to_remove {
|
||||
self.remove_replacement(&name);
|
||||
}
|
||||
will_cleanup
|
||||
}
|
||||
}
|
||||
|
||||
fn contains(needle: &[u8], haystack: &[u8]) -> Option<(usize, usize)> {
|
||||
@@ -131,8 +110,6 @@ fn contains(needle: &[u8], haystack: &[u8]) -> Option<(usize, usize)> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use include_directory::{Dir, include_directory};
|
||||
use tempdir::TempDir;
|
||||
|
||||
@@ -146,11 +123,10 @@ mod tests {
|
||||
let src = testdata.path().join("testsrc");
|
||||
let dst = TempDir::new("").unwrap();
|
||||
|
||||
let now = Instant::now();
|
||||
let mut rewriter = Rewriter::new("to_be_replaced".into());
|
||||
rewriter.add_replacement("abc".into(), now.checked_add(Duration::new(60*60*24, 0)).unwrap());
|
||||
rewriter.add_replacement("def".into(), now);
|
||||
rewriter.add_replacement("zyx".into(), now);
|
||||
rewriter.add_replacement("abc".into());
|
||||
rewriter.add_replacement("def".into());
|
||||
rewriter.add_replacement("zyx".into());
|
||||
rewriter.remove_replacement("zyx");
|
||||
rewriter
|
||||
.rewrite_folder(
|
||||
@@ -163,20 +139,5 @@ mod tests {
|
||||
assert!(
|
||||
dir_diff::is_different(testdata.path().join("testdst"), dst.path()).unwrap() == false
|
||||
);
|
||||
|
||||
// Trigger the cleanup, which should GC abc
|
||||
let dst = TempDir::new("").unwrap();
|
||||
rewriter.cleanup();
|
||||
rewriter
|
||||
.rewrite_folder(
|
||||
src.as_os_str().to_str().unwrap(),
|
||||
dst.path().as_os_str().to_str().unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Validate that everything matches
|
||||
assert!(
|
||||
dir_diff::is_different(testdata.path().join("testdst_after_gc"), dst.path()).unwrap() == false
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
use std::{
|
||||
fs,
|
||||
path::Path,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use std::{fs, path::Path};
|
||||
|
||||
use anyhow::{Context, Result, anyhow};
|
||||
use chrono::Utc;
|
||||
use log::info;
|
||||
use rouille::Request;
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use crate::Rewriter;
|
||||
|
||||
@@ -22,12 +18,7 @@ pub struct Server {
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new(
|
||||
rewriter: Rewriter,
|
||||
workspace_dir: String,
|
||||
template_dir: String,
|
||||
config_dir: String,
|
||||
) -> Self {
|
||||
pub fn new(rewriter: Rewriter, workspace_dir: String, template_dir: String, config_dir: String) -> Self {
|
||||
Self {
|
||||
rewriter,
|
||||
workspace_dir,
|
||||
@@ -36,30 +27,17 @@ impl Server {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cleanup(&mut self) -> Result<bool> {
|
||||
let cleaned_up = self.rewriter.cleanup();
|
||||
if cleaned_up {
|
||||
self.generate_config()?;
|
||||
}
|
||||
Ok(cleaned_up)
|
||||
}
|
||||
|
||||
pub fn register(&mut self, _request: &Request, ip: &str) -> Result<()> {
|
||||
info!("Registering {} as a handler", ip);
|
||||
let cleanup_time = Instant::now()
|
||||
.checked_add(Duration::from_secs(60))
|
||||
.ok_or(anyhow!("failed to convert time"))?;
|
||||
if self.rewriter.add_replacement(ip.to_string(), cleanup_time) {
|
||||
self.generate_config()?;
|
||||
}
|
||||
self.rewriter.add_replacement(ip.to_string());
|
||||
self.generate_config()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn unregister(&mut self, _request: &Request, ip: &str) -> Result<()> {
|
||||
info!("Deregistering {} as a handler", ip);
|
||||
if self.rewriter.remove_replacement(ip) {
|
||||
self.generate_config()?;
|
||||
}
|
||||
self.rewriter.remove_replacement(ip);
|
||||
self.generate_config()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -71,13 +49,11 @@ impl Server {
|
||||
let path = Path::new(&self.workspace_dir).join(&now.format("%Y/%m/%d/%s").to_string());
|
||||
let path = path.as_os_str().to_str().unwrap();
|
||||
fs::create_dir_all(path).with_context(|| "creating directory")?;
|
||||
self.rewriter
|
||||
.rewrite_folder(&self.template_dir, path)
|
||||
.with_context(|| "generating configs")?;
|
||||
self.rewriter.rewrite_folder(&self.template_dir, path).with_context(|| "generating configs")?;
|
||||
// Finally, symlink it to the output folder; only support Linux for now
|
||||
let symlink = Path::new(&self.workspace_dir).join("symlink.tmp");
|
||||
std::os::unix::fs::symlink(path, &symlink).with_context(|| "creating symlink")?;
|
||||
fs::rename(symlink, &self.config_dir).with_context(|| "renaming symlink")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
4
src/testdata/testdst_after_gc/hello.txt
vendored
4
src/testdata/testdst_after_gc/hello.txt
vendored
@@ -1,4 +0,0 @@
|
||||
This is a line
|
||||
This is abc line
|
||||
|
||||
This is another line
|
||||
@@ -1,3 +0,0 @@
|
||||
This is a abc line.
|
||||
|
||||
In a nested directory.
|
||||
Reference in New Issue
Block a user