Compare commits

...

37 commits
v0.9.0 ... main

Author SHA1 Message Date
3f6edc7662 Hubris 2024-08-17 08:30:41 -07:00
aad9c74a63 Vanity 2024-08-17 08:29:27 -07:00
eede5b0e50 Support showing and hiding anonymous ports
I'm still not convinced that showing a big list of disabled ports is
the right thing to do so here's the ability to turn it off.
2024-08-17 08:03:41 -07:00
3430cae957 Add fwd-browse to the debian package 2024-08-16 10:36:56 -07:00
a4df8fc588 This is a better name for the build 2024-08-16 10:29:37 -07:00
9e8fa4d0a6 Add debian build to release matrix 2024-08-16 10:26:50 -07:00
940e573468 Support for debian packaging 2024-08-16 10:23:57 -07:00
f13139e79b Remove older documentation notes 2024-08-16 10:23:44 -07:00
ff92002dcf Update year I guess 2024-08-16 10:22:53 -07:00
2d1c8a4ceb Set the github token for the release action 2024-08-15 11:59:00 -07:00
241e8e1eea This is broken because I have consummate vs 2024-08-15 11:52:50 -07:00
666456e456 More release stuff (tools) 2024-08-15 11:50:44 -07:00
73126ba770 Update the release workflow
Use the python automation script instead
2024-08-15 11:43:53 -07:00
9c9f7cfa82 Release automation
There are a lot of steps in preparing the release and so I'm trying to
make sure that we're in a place where I can iterate on it locally.
2024-08-15 11:40:04 -07:00
7a40326719 Re-work config code
Add raw description as a possible config for a port, and update the
documentation appropriately.
2024-08-15 10:14:43 -07:00
74e2da2f29 Man page edits 2024-08-14 11:24:23 -07:00
cfde429786 A man page, somewhat 2024-08-14 11:22:50 -07:00
afa13bf920 This description is out of date
Given the introduction of anonymous ports
2024-08-14 10:52:19 -07:00
38fbfbd918 Move config file to ~/.config/fwd/config.toml
Presumably this also works for MacOS and windows.

While doing this, move away from xdg and home and use this
directories-next crate instead. Reverse connections still seem to
work.
2024-08-14 10:51:19 -07:00
663ce42016 tempdir -> tempfile
According to the documentation of the tempdir crate
2024-08-13 10:59:47 -07:00
e44d4dea7a Also update the fuzzing targets, I guess 2024-08-13 10:56:29 -07:00
4fe255e7d2 Fix colors in the help box
When the lines of the help box overlap with disabled or error'd ports
you might notice that those lines are dark grey or red. That's
surprising!

The bug is that Style::default() means "don't change anything", just
continue being whatever color the current cell is, which is deeply
surprising. What we really want here is `Style::reset()`, which means
"reset the colors to whatever the terminal would show by default."
2024-08-13 10:52:20 -07:00
b381f71692 Move from tui to ratatui
Tui is no longer supported, ratatui is the new hotness. Fortunately
there is very little difference between the two, except I've noticed a
fun new bug in the help screen. (Maybe it's been there the whole time?)
2024-08-13 10:44:58 -07:00
7e047626df Bump to the next version 2024-08-13 07:24:29 -07:00
68f3c4fa4e Experimental updates to release workflow 2024-08-13 07:23:59 -07:00
df914e68f2 I *think* I need something other than macos-12 for aarch64
https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories
2024-08-12 17:57:36 -07:00
a7202010d0 Probably we should run tests as part of release? 2024-08-12 17:43:17 -07:00
df0ca4ce31 Remove "users" crate, call libc directly
This is all I actually needed anyways
2024-08-12 17:37:34 -07:00
35dcf93971 Add fuzzing based on serde_json
This test ensures that we can parse anything that serde_json can
produce, which *ought* to ensure reasonable coverage?
2024-08-12 17:18:26 -07:00
43f6b75762 Rename fuzz target to something more meaningful 2024-08-12 11:44:02 -07:00
542127f723 Handle transfer-encoding chunked in docker responses
Yeah, OK, thanks HTTP.
2024-08-12 11:28:59 -07:00
665fccf753 Add trace logging to the docker refresh
That way we can see what's going on with docker responses if they're weird.
2024-08-12 10:07:42 -07:00
e27b788e8f Fuzzing for the json decoder
Hey it seems like it's working!
2024-08-12 09:43:56 -07:00
77cbf1700f Check for unterminated strings properly
Also, public to enable fuzzing. This was the first catch!
2024-08-12 09:41:22 -07:00
9b0a39fa90 Bump crate version 2024-08-12 09:14:22 -07:00
4647226ee7 Handle blank input a little more cleanly 2024-08-12 09:11:21 -07:00
03de4a4661 Yet another tweak to git process for cargo publish 2024-08-10 09:00:20 -07:00
22 changed files with 3448 additions and 505 deletions

View file

@ -1,10 +1,3 @@
# From https://github.com/BurntSushi/ripgrep/blob/master/.github/workflows/release.yml
# Which is also via https://eugene-babichenko.github.io/blog/2020/05/09/github-actions-cross-platform-auto-releases/
# ...both of which are very good.
#
# I'm sure I don't need half the stuff I have in here (around cargo
# customization and whatnot) but.
#
name: release
on:
@ -13,109 +6,96 @@ on:
tags:
- "v[0-9]+.[0-9]+.[0-9]+"
permissions:
contents: write
jobs:
create_release:
name: Create release
runs-on: ubuntu-22.04
outputs:
upload_url: ${{ steps.create_release.outputs.upload_url }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Get the release version
if: env.VERSION == ''
run: echo "VERSION=${{ github.ref_name }}" >> $GITHUB_ENV
- name: Show the version
run: |
echo "version is: $VERSION"
- name: Create GitHub release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: true
run: gh release create $VERSION --draft --verify-tag --title $VERSION
release_assets:
name: Release assets
outputs:
version: ${{ env.VERSION }}
build_release:
name: Build all the stuff
needs: ['create_release'] # We need to know the upload URL
runs-on: ${{ matrix.os }} # We run many different builds
env:
# For some builds, we use cross to test on 32-bit and big-endian
# systems.
CARGO: cargo
# When CARGO is set to CROSS, this is set to `--target matrix.target`.
TARGET_FLAGS: ""
# When CARGO is set to CROSS, TARGET_DIR includes matrix.target.
TARGET_DIR: ./target
# Emit backtraces on panics.
RUST_BACKTRACE: 1
strategy:
fail-fast: false
matrix:
build: ['linux', 'macos', 'arm-macos', 'windows']
build: ['linux', 'debian', 'macos', 'arm-macos', 'windows']
include:
- build: linux
os: ubuntu-22.04
target: x86_64-unknown-linux-musl
packages: apt
- build: debian
os: ubuntu-22.04
target: x86_64-unknown-linux-musl
packages: apt
- build: macos
os: macos-12
os: macos-latest
target: x86_64-apple-darwin
packages: brew
- build: arm-macos
os: macos-12
os: macos-latest
target: aarch64-apple-darwin
packages: brew
- build: windows
os: windows-2022
target: x86_64-pc-windows-msvc
packages: none
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install Rust
- name: Install packages (linux)
if: matrix.packages == 'apt'
run: |
sudo apt-get update
sudo apt-get install -y pandoc
- name: Install packages (macos)
if: matrix.packages == 'brew'
run: |
brew update
brew install pandoc
- name: Install rust
uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.target }}
- name: Use Cross
- name: Run the release automation
shell: bash
run: |
cargo install cross
echo "CARGO=cross" >> $GITHUB_ENV
echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV
echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV
- name: Build release binary
run: ${{ env.CARGO }} build --verbose --release ${{ env.TARGET_FLAGS }}
- name: Strip release binary (linux and macos)
if: matrix.build == 'linux' || matrix.build == 'macos' || matrix.build == 'arm-macos'
run: |
strip "target/${{ matrix.target }}/release/fwd"
strip "target/${{ matrix.target }}/release/fwd-browse"
- name: Build archive
shell: bash
run: |
staging="fwd-${{ matrix.target }}"
mkdir -p "$staging"
if [ "${{ matrix.os }}" = "windows-2022" ]; then
cp "target/${{ matrix.target }}/release/fwd.exe" "$staging/"
7z a "$staging.zip" "$staging"
echo "ASSET=$staging.zip" >> $GITHUB_ENV
else
cp "target/${{ matrix.target }}/release/fwd" "$staging/"
cp "target/${{ matrix.target }}/release/fwd-browse" "$staging/"
tar czf "$staging.tar.gz" "$staging"
echo "ASSET=$staging.tar.gz" >> $GITHUB_ENV
fi
- name: Upload release archive
uses: actions/upload-release-asset@v1.0.2
env:
RELEASE_TAG: ${{ needs.create_release.outputs.version }}
BUILD: ${{ matrix.build }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
asset_name: ${{ env.ASSET }}
asset_path: ${{ env.ASSET }}
asset_content_type: application/octet-stream
run: python3 release.py

740
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,12 +1,14 @@
[package]
name = "fwd"
version = "0.9.0"
version = "0.9.2"
authors = ["John Doty <john@d0ty.me>"]
edition = "2021"
license = "MIT"
description = "Automatically forward ports to a remote server over ssh"
description = "Automatically forward ports to a remote server"
readme = "README.md"
documentation = "https://github.com/DeCarabas/fwd"
homepage = "https://github.com/DeCarabas/fwd"
repository = "https://github.com/DeCarabas/fwd"
readme = "README.md"
[[bin]]
name = "fwd-browse"
@ -17,27 +19,46 @@ bench = false
anyhow = "1.0"
bytes = "1"
copypasta = "0.10.1"
crossterm = { version = "0.25", features = ["event-stream"] }
crossterm = { version = "0.28.1", features = ["event-stream"] }
directories-next = "2"
env_logger = { version = "0.11.5", default-features = false }
home = "0.5.4"
indoc = "1"
log = { version = "0.4", features = ["std"] }
open = "3"
rand = "0.8.5"
ratatui = "0.28.0"
thiserror = "1.0"
tokio = { version = "1", features = ["io-std", "io-util", "macros", "net", "process", "rt", "rt-multi-thread", "fs"] }
tokio-stream = "0.1"
toml = "0.5"
tui = "0.19"
xdg = "2"
[dev-dependencies]
assert_matches = "1"
pretty_assertions = "1"
tempdir = "0.3"
tempfile = "3"
[target.'cfg(target_os="linux")'.dependencies]
procfs = "0.14.1"
[target.'cfg(target_family="unix")'.dependencies]
users = "0.11"
libc = "0.2.155"
[package.metadata.deb]
section = "net"
depends = [] # No auto deps?
assets = [
["target/release/fwd", "usr/bin/", "755"],
["target/release/fwd-browse", "usr/bin/", "755"],
["LICENSE", "usr/share/doc/fwd/", "644"],
["README.md", "usr/share/doc/fwd/README", "644"],
# The man page is automatically generated by fwd's build process. See
# release.py for details.
["target/release/fwd.1", "usr/share/man/man1/fwd.1", "644"],
]
extended-description = """\
fwd enumerates the listening ports the remote server and automatically listens
for connections on the same ports on the local machine. When fwd receives a
connection on the local machine, it automatically forwards that connection to
the remote machine.
"""

View file

@ -1,4 +1,4 @@
Copyright 2022 John Doty
Copyright 2024 John Doty
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View file

@ -4,7 +4,7 @@ A port-forwarding utility.
Here's how it works:
1. Get the latest [release](https://github.com/DeCarabas/fwd/releases) of `fwd`
2. You install `fwd` on the server somewhere in your `$PATH` (like `/usr/bin/`)
2. You install `fwd` on the server somewhere in your `$PATH` (like `/usr/bin/`, or `.local/bin`)
3. You install `fwd` on the client (like your laptop)
4. You run `fwd` on the client to connect to the server, like so:
@ -13,6 +13,9 @@ doty@my.laptop$ fwd some.server
```
`fwd` will connect to `some.server` via ssh, and then show you a screen listing all of the ports that the server is listening on locally.
<img width="1337" src="doc/screenshot-01.png" alt="A terminal displaying a list of ports and descriptions. Some are dimmed and one is highlighted." />
Use the up and down arrow keys (or `j`/`k`) to select the port you're interested in and press `e` to toggle forwarding of that port.
Now, connections to that port locally will be forwarded to the remote server.
@ -21,9 +24,3 @@ If the port is something that might be interesting to a web browser, you can pre
If something is going wrong, pressing `l` will toggle logs that might explain it.
Press `q` to quit.
## Future Improvements:
- Clipboard integration: send something from the remote end of the pipe to the host's clipboard. (Sometimes you *really* want to copy some big buffer from the remote side and your terminal just can't make that work.)
- Client heartbeats: I frequently wind up in a situation where the pipe is stalled: not broken but nothing is getting through. (This happens with my coder.com pipes all the time.)

View file

@ -21,14 +21,36 @@ fn file_contents<P: AsRef<Path>>(path: P) -> String {
std::fs::read_to_string(path).expect("Unable to read file")
}
fn git_rel<P: AsRef<Path>>(path: P) -> PathBuf {
let output = std::process::Command::new("git")
.arg("rev-parse")
.arg("--show-toplevel")
.output()
.expect("Error launching git rev-parse");
if !output.status.success() {
let stderr = std::str::from_utf8(&output.stderr)
.expect("git failed and stderr was not utf8");
eprintln!("`git rev-parse --show-toplevel` failed, stderr: {stderr}");
panic!("`git rev-parse --show-toplevel` failed");
}
let mut root = PathBuf::from(
std::str::from_utf8(&output.stdout)
.expect("Output was not utf-8")
.trim(),
);
root.push(path);
root
}
/// Emit the current git commit.
fn emit_git_commit() {
// Fetch the current commit from the head. We do it this way instead of
// asking `git rev-parse` to do it for us because we want to reliably
// tell cargo which files it should monitor for changes.
let head = file_contents("./.git/HEAD");
let head = file_contents(git_rel(".git/HEAD"));
let rev = if let Some(r) = head.strip_prefix("ref: ") {
let mut ref_path = PathBuf::from("./.git/");
let mut ref_path = git_rel(".git/");
ref_path.push(r.trim());
file_contents(ref_path)
} else {

156
doc/fwd.man.md Normal file
View file

@ -0,0 +1,156 @@
% fwd(1)
% John Doty <john@d0ty.me>
% August 2024
# NAME
fwd - Automatically forward connections to remote machines
# SYNOPSIS
**fwd** [OPTIONS] SERVER
**fwd** [OPTIONS] browse URL
**fwd** [OPTIONS] clip FILE
**fwd-browse** URL
# DESCRIPTION
**fwd** enumerates the listening ports the remote server and automatically listens for connections on the same ports on the local machine.
When **fwd** receives a connection on the local machine, it automatically forwards that connection to the remote machine.
**-s**, **-\-sudo**
: Run the server side of fwd with `sudo`.
: This allows the client to forward ports that are open by processes being run under other accounts (e.g., docker containers being run as root), but requires sudo access on the server and *might* end up forwarding ports that you do not want forwarded (e.g., port 22 for sshd, or port 53 for systemd.)
**-\-log-filter** **FILTER**
: Set remote server's log level. Default is `warn`.
: Supports all of Rust's env_logger filter syntax, e.g. `--log-filter=fwd::trace`.
**-\-version**
: Print the version of fwd and exit.
# INTERACTIVE COMMANDS
Once **fwd** is connected, it displays an interactive list of the ports available on the remote server.
- Ports that **fwd** is listening on are displayed in the default terminal color.
- Ports that **fwd** is aware of but which are disabled are displayed in dark gray.
- Ports that **fwd** has tried to listen on but which have failed are displayed in red.
Details on the error may be found in the log window.
Disabling and re-enabling the port will cause **fwd** to try again.
The following commands are available while **fwd** is connected:
**Esc, q, Ctrl-C**
: Exit **fwd**.
**?, h**
: Display the help window.
**Up, k**
: Select the previous port in the list.
**Down, j**
: Select the next port in the list.
**Enter**
: Attempt to browse to localhost on the specified port with the default browser.
**a**
: Hide or show anonymous ports.
: (See "identifying ports" below for more information on anonymous ports.)
**e**
: Enable or disable the selected port.
**l**
: Show or hide the log window.
# IDENTIFYING PORTS
**fwd** enumerates all of the ports that the remote server is listening on, and attempts to identify the process that is listening on each port.
It can identify ports in the following ways:
*docker*
: **fwd** will attempt to find and connect to a docker engine on the remote machine.
: If successful, it will list all of the forwarded ports, and identify each port as belonging to that docker container.
*procfs*
: On Linux, the listening ports are found by reading procfs and mapping them back to process command lines.
: **fwd** can only identify processes that the user it is connected as has permissions to read on the remote machine.
(Earlier methods take precedence over later methods.)
If **fwd** cannot identify the process that is listening on a given port, then the port is *anonymous*.
Anonymous ports are not enabled by default, but can be enabled manually, either with the UI or by configuration.
# OPENING BROWSERS
**fwd** can be used to open URLs in the default browser on the local machine.
Run **fwd browse URL** on the remote server to open the `URL` in the default browser on the local machine.
This only works if **fwd** is connected, and if the user running **fwd browse** is the same as the user that connected the **fwd** session.
The **fwd-browse** program acts as a wrapper around **fwd browse**, to be used with configurations that can't handle a browser being a program with an argument.
# CLIPBOARD
**fwd** can be used from the remote machine to place text on the clipboard of the local machine.
Run **fwd clip FILE** to copy the contents of the named file to the clipboard.
If **FILE** is **-**, this reads text from stdin instead.
# CONFIGURATION
**fwd** can be configured with a configuration file.
- On Windows, the config file will be in your roaming AppData folder.
(e.g., *c:\\Users\\Winifred\\AppData\\Roaming\\fwd\\config\\config.toml*)
- On MacOS, the config file will be in *$HOME/Library/Application Support/fwd/config.toml*.
(e.g., /Users/Margarie/Library/Application Support/fwd/config.toml)
- On XDG-ish systems (like Linux), the config file is in *~/.config/fwd/config.toml*.
(e.g., */home/lynette/.config/fwd/config.toml*)
The following is an example of a *config.toml* file:
```
auto=true # should `fwd` should enable identified ports (default true)
[servers.foo] # Server-specific settings for foo
auto=true # defaults to the global setting
ports=[1080, 1082] # ports that are always present
[servers.bar.ports] # `ports` can also be a table with port numbers as keys
1080=true # the values can be booleans (for enabled)...
1081="My program" # or strings (for descriptions).
[servers.bar.ports.1082] # port values can also be tables
enabled=true
description="A humble python"
```
Ports that are specified in the configuration file will always be present in the list of ports for a given server, even if no process is listening on that port.
# TROUBLESHOOTING
Connections are made via the **ssh** command.
Your **ssh** must:
- Be on your path, so that **fwd** can find it to invoke it
- Be able to authenticate you to the remote server.
(Interactive authentication is fine.)
- Understand the **-D** command line option, to operate as a SOCKS5 server
- Be able to start the **fwd** command on the remote server
A typical ssh invocation from **fwd** looks like:
```bash
ssh -T -D XXXX me@server FWD_LOG=warning FWD_SEND_ANONYMOUS=1 fwd --server
```
**fwd** only enumerates ports that are listening on loopback addresses (e.g., 127.0.0.1) or on all addresses (e.g., 0.0.0.0).
If it cannot find a particular port, check to make sure that the process listening on that port is accessible via localhost.
# SEE ALSO
ssh(1)

BIN
doc/screenshot-01.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

4
fuzz/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
target
corpus
artifacts
coverage

1735
fuzz/Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

30
fuzz/Cargo.toml Normal file
View file

@ -0,0 +1,30 @@
[package]
name = "fwd-fuzz"
version = "0.0.0"
publish = false
edition = "2021"
[package.metadata]
cargo-fuzz = true
[dependencies]
arbitrary = { version = "1.3.2", features = ["derive"] }
libfuzzer-sys = "0.4"
serde_json = "1.0.124"
[dependencies.fwd]
path = ".."
[[bin]]
name = "json_raw_input"
path = "fuzz_targets/json_raw_input.rs"
test = false
doc = false
bench = false
[[bin]]
name = "json_only_valid_serde"
path = "fuzz_targets/json_only_valid_serde.rs"
test = false
doc = false
bench = false

View file

@ -0,0 +1,77 @@
#![no_main]
use arbitrary::{Arbitrary, Error, Unstructured};
use libfuzzer_sys::fuzz_target;
use std::collections::HashMap;
extern crate fwd;
use fwd::server::refresh::docker::JsonValue;
/// InputNumber is a JSON number, i.e., a finite 64-bit floating point value
/// that is not NaN. We need to define our own little wrapper here so that we
/// can convince Arbitrary to only make finite f64s.
///
/// Ideally we would actually wrap serde_json::Number but there are rules
/// about mixing 3rd party traits with 3rd party types.
#[derive(Debug, PartialEq)]
struct InputNumber(f64);
impl<'a> Arbitrary<'a> for InputNumber {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self, Error> {
let value = f64::arbitrary(u)?;
if value.is_finite() {
Ok(InputNumber(value))
} else {
Err(Error::IncorrectFormat) // REJECT
}
}
#[inline]
fn size_hint(depth: usize) -> (usize, Option<usize>) {
f64::size_hint(depth)
}
}
/// TestInput is basically serde_json::Value, except (a) it has a HashMap and
/// not serde_json's special `Map` structure, and (b) it has `InputNumber`
/// instead of `json_serde::Number` for reasons described above.
#[derive(Debug, PartialEq, Arbitrary)]
enum TestInput {
Null,
Bool(bool),
Number(InputNumber),
String(String),
Object(HashMap<String, TestInput>),
Array(Vec<TestInput>),
}
fn convert(value: &TestInput) -> serde_json::Value {
match value {
TestInput::Null => serde_json::Value::Null,
TestInput::Bool(b) => serde_json::Value::Bool(*b),
TestInput::Number(n) => serde_json::Value::Number(
serde_json::Number::from_f64(n.0).expect("Unable to make an f64"),
),
TestInput::String(s) => serde_json::Value::String(s.clone()),
TestInput::Object(o) => {
let mut out = serde_json::map::Map::new();
for (k, v) in o.into_iter() {
out.insert(k.clone(), convert(v));
}
serde_json::Value::Object(out)
}
TestInput::Array(v) => {
serde_json::Value::Array(v.into_iter().map(convert).collect())
}
}
}
fuzz_target!(|data: TestInput| {
// Convert the arbitrary TestInput into an arbitrary serde_json::Value,
// then use serde_json to write out arbitrary JSON.
let converted = convert(&data).to_string();
// Parse the JSON that serde_json produced. This fuzz test should ensure
// that we can parse anything that serde_json can produce.
let _ = JsonValue::parse(converted.as_bytes());
});

View file

@ -0,0 +1,10 @@
#![no_main]
use libfuzzer_sys::fuzz_target;
extern crate fwd;
use fwd::server::refresh::docker::JsonValue;
fuzz_target!(|data: &[u8]| {
let _ = JsonValue::parse(data);
});

161
release.py Normal file
View file

@ -0,0 +1,161 @@
"""A script to automate building and uploading a release archive.
This is in python instead of bash because I abhor bash. Even though it's a
little nicer for running commands, it's worse at everything else.
"""
import dataclasses
import enum
import os
import os.path
import pathlib
import shutil
import subprocess
RELEASE_TAG = os.getenv("RELEASE_TAG")
BUILD = os.getenv("BUILD")
if BUILD is None:
raise Exception("you *must* set the BUILD environment variable")
class Archive(enum.Enum):
TARBALL = 1
ZIP = 2
DEB = 3
@dataclasses.dataclass
class BuildSettings:
target: str # The rust target to build for
test: bool = True # Whether or not to run tests
man_page: bool = True # Whether or not to generate a man page
strip: bool = True # Whether or not to strip binaries
archive: Archive = Archive.TARBALL # Archive type
ext: str = "" # The file extension of the binary
print(f"doing release: {BUILD}")
build = {
"linux": BuildSettings(
target="x86_64-unknown-linux-musl",
),
"debian": BuildSettings(
target="x86_64-unknown-linux-musl",
test=False,
archive=Archive.DEB,
),
"macos": BuildSettings(
target="x86_64-apple-darwin",
),
"arm-macos": BuildSettings(
target="aarch64-apple-darwin",
),
"windows": BuildSettings(
target="x86_64-pc-windows-msvc",
strip=False,
man_page=False,
archive=Archive.ZIP,
ext=".exe",
),
}[BUILD]
print(f"settings: {build}")
target_dir = pathlib.Path("target") / build.target / "release"
bins = [(target_dir / bin).with_suffix(build.ext) for bin in ["fwd", "fwd-browse"]]
def build_and_test(staging: pathlib.Path):
# Tools
subprocess.run(
["rustup", "target", "add", build.target],
check=True,
)
# Test...?
if build.test:
subprocess.run(
["cargo", "test", "--verbose", "--release", "--target", build.target],
check=True,
)
# Build
subprocess.run(
["cargo", "build", "--verbose", "--release", "--target", build.target],
check=True,
)
# Strip
if build.strip:
for bin in bins:
subprocess.run(["strip", bin], check=True)
# Copy
for bin in bins:
shutil.copyfile(bin, os.path.join(staging, os.path.basename(bin)))
def build_docs(staging: pathlib.Path):
shutil.copyfile("README.md", staging / "README.md")
if build.man_page:
print("Creating man page...")
proc = subprocess.run(
["pandoc", "-s", "-tman", os.path.join("doc", "fwd.man.md")],
check=True,
capture_output=True,
encoding="utf8",
)
contents = proc.stdout
with open(staging / "fwd.1", "w", encoding="utf-8") as f:
f.write(contents)
def build_archive(staging: pathlib.Path) -> pathlib.Path:
print("Creating archive...")
if build.archive == Archive.ZIP:
archive = pathlib.Path(f"{staging}.zip")
subprocess.run(["7z", "a", archive, f"{staging}"], check=True)
elif build.archive == Archive.DEB:
subprocess.run(["cargo", "install", "cargo-deb"], check=True)
shutil.copyfile(staging / "fwd.1", target_dir / "fwd.1")
subprocess.run(["cargo", "deb", "--target", build.target], check=True)
# Knowing the deb path means knowing the target version but I don't
# actually have the version here. (Or, like, I have the release tag
# but not in testing.) So just find the hopefully singular .deb that
# we made.
deb_path = pathlib.Path("target") / build.target / "debian"
archives = list(deb_path.glob("*.deb"))
assert len(archives) == 1
archive = archives[0]
else:
assert build.archive == Archive.TARBALL
archive = pathlib.Path(f"{staging}.tar.gz")
subprocess.run(["tar", "czf", archive, f"{staging}"], check=True)
return archive
staging = pathlib.Path(f"fwd-{build.target}")
os.makedirs(staging, exist_ok=True)
build_and_test(staging)
build_docs(staging)
archive = build_archive(staging)
shutil.rmtree(staging)
assert archive.exists()
if RELEASE_TAG is None:
print(f"Not releasing {archive} to github, RELEASE_TAG is none.")
else:
print(f"Uploading {archive} to github release {RELEASE_TAG}...")
subprocess.run(
["gh", "release", "upload", RELEASE_TAG, archive, "--clobber"],
check=True,
)
os.unlink(archive)

View file

@ -1,15 +1,17 @@
use anyhow::{bail, Result};
use std::collections::hash_map;
use std::collections::HashMap;
use toml::Value;
use toml::value::{Table, Value};
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq, Eq))]
pub struct PortConfig {
pub enabled: bool,
pub description: Option<String>,
}
#[derive(Debug, Clone)]
#[cfg_attr(test, derive(PartialEq, Eq))]
pub struct ServerConfig {
auto: bool,
ports: HashMap<u16, PortConfig>,
@ -45,6 +47,7 @@ impl ServerConfig {
}
#[derive(Debug)]
#[cfg_attr(test, derive(PartialEq, Eq))]
pub struct Config {
auto: bool,
servers: HashMap<String, ServerConfig>,
@ -62,13 +65,15 @@ impl Config {
pub fn load_config() -> Result<Config> {
use std::io::ErrorKind;
let mut home = match home::home_dir() {
Some(h) => h,
None => return Ok(default()),
let Some(directories) = directories_next::ProjectDirs::from("", "", "fwd")
else {
return Ok(default());
};
home.push(".fwd");
let contents = match std::fs::read_to_string(home) {
let mut config_path = directories.config_dir().to_path_buf();
config_path.push("config.toml");
let contents = match std::fs::read_to_string(config_path) {
Ok(contents) => contents,
Err(e) => match e.kind() {
ErrorKind::NotFound => return Ok(default()),
@ -83,85 +88,101 @@ fn default() -> Config {
Config { auto: true, servers: HashMap::new() }
}
fn parse_config(value: &Value) -> Result<Config> {
match value {
Value::Table(table) => Ok({
let auto = match table.get("auto") {
None => true,
Some(Value::Boolean(v)) => *v,
Some(v) => bail!("expected a true or false, got {:?}", v),
};
Config { auto, servers: get_servers(table, auto)? }
}),
_ => bail!("top level must be a table"),
fn get_bool(table: &Table, key: &str, default: bool) -> Result<bool> {
match table.get(key) {
None => Ok(default),
Some(Value::Boolean(v)) => Ok(*v),
Some(v) => bail!("expected a true or false, got {v:?}"),
}
}
fn get_servers(
table: &toml::value::Table,
fn parse_config(value: &Value) -> Result<Config> {
let Value::Table(table) = value else {
bail!("top level must be a table")
};
let auto = get_bool(table, "auto", true)?;
let servers = match table.get("servers") {
None => &Table::new(),
Some(Value::Table(t)) => t,
Some(v) => bail!("Expected a table in the servers key, got {v:?}"),
};
Ok(Config {
auto,
servers: parse_servers(servers, auto)?,
})
}
fn parse_servers(
table: &Table,
auto: bool,
) -> Result<HashMap<String, ServerConfig>> {
match table.get("servers") {
None => Ok(HashMap::new()),
Some(Value::Table(table)) => Ok({
let mut servers = HashMap::new();
for (k, v) in table {
servers.insert(k.clone(), get_server(v, auto)?);
}
servers
}),
v => bail!("expected a table in the servers key, got {:?}", v),
let Value::Table(table) = v else {
bail!("expected a table for server {k}, got {v:?}");
};
servers.insert(k.clone(), parse_server(table, auto)?);
}
Ok(servers)
}
fn get_server(value: &Value, auto: bool) -> Result<ServerConfig> {
fn parse_server(table: &Table, auto: bool) -> Result<ServerConfig> {
let auto = get_bool(table, "auto", auto)?;
let ports = match table.get("ports") {
None => HashMap::new(),
Some(v) => parse_ports(v)?,
};
Ok(ServerConfig { auto, ports })
}
fn parse_ports(value: &Value) -> Result<HashMap<u16, PortConfig>> {
match value {
Value::Table(table) => Ok(ServerConfig {
auto: match table.get("auto") {
None => auto, // Default to global default
Some(Value::Boolean(v)) => *v,
Some(v) => bail!("expected true or false, got {:?}", v),
},
ports: get_ports(table)?,
}),
value => bail!("expected a table, got {:?}", value),
Value::Array(array) => {
let mut ports = HashMap::new();
for v in array {
ports.insert(
get_port_number(v)?,
PortConfig { enabled: true, description: None },
);
}
Ok(ports)
}
fn get_ports(table: &toml::value::Table) -> Result<HashMap<u16, PortConfig>> {
match table.get("ports") {
None => Ok(HashMap::new()),
Some(Value::Table(table)) => Ok({
Value::Table(table) => {
let mut ports = HashMap::new();
for (k, v) in table {
let port: u16 = k.parse()?;
let config = match v {
Value::Boolean(enabled) => PortConfig{enabled:*enabled, description:None},
Value::Table(table) => PortConfig{
enabled: match table.get("enabled") {
Some(Value::Boolean(enabled)) => *enabled,
_ => bail!("not implemented"),
},
description: match table.get("description") {
Some(Value::String(desc)) => Some(desc.clone()),
Some(v) => bail!("expect a string description, got {:?}", v),
None => None,
},
},
_ => bail!("expected either a boolean (enabled) or a table for a port config, got {:?}", v),
};
let config = parse_port_config(v)?;
ports.insert(port, config);
}
ports
}),
Some(Value::Array(array)) => Ok({
let mut ports = HashMap::new();
for v in array {
ports.insert(get_port_number(v)?, PortConfig{enabled:true, description:None});
Ok(ports)
}
ports
_ => bail!("ports must be either an array or a table, got {value:?}"),
}
}
fn parse_port_config(value: &Value) -> Result<PortConfig> {
match value {
Value::Boolean(enabled) => Ok(PortConfig{enabled:*enabled, description:None}),
Value::String(description) => Ok(PortConfig{
enabled: true,
description: Some(description.clone()),
}),
Some(v) => bail!("ports must be either a table of '<port> = ...' or an array of ports, got {:?}", v),
Value::Table(table) => {
let enabled = get_bool(table, "enabled", true)?;
let description = match table.get("description") {
Some(Value::String(desc)) => Some(desc.clone()),
Some(v) => bail!("expect a string description, got {v:?}"),
None => None,
};
Ok(PortConfig { enabled, description })
},
_ => bail!("expected either a boolean (enabled), a string (description), or a table for a port config, got {value:?}"),
}
}
@ -172,3 +193,258 @@ fn get_port_number(v: &Value) -> Result<u16> {
};
Ok(port)
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
fn config_test(config: &str, expected: Config) {
let config = config.parse::<Value>().expect("case not toml");
let config = parse_config(&config).expect("unable to parse config");
assert_eq!(expected, config);
}
fn config_error_test(config: &str) {
let config = config.parse::<Value>().expect("case not toml");
assert!(parse_config(&config).is_err());
}
#[test]
fn empty() {
config_test("", Config { auto: true, servers: HashMap::new() });
}
#[test]
fn auto_false() {
config_test(
"
auto=false
",
Config { auto: false, servers: HashMap::new() },
);
}
#[test]
fn auto_not_boolean() {
config_error_test(
"
auto='what is going on'
",
);
}
#[test]
fn servers_not_table() {
config_error_test("servers=1234");
}
#[test]
fn servers_default() {
config_test("servers.foo={}", {
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig { auto: true, ports: HashMap::new() },
);
Config { auto: true, servers }
})
}
#[test]
fn servers_auto_false() {
config_test(
"
[servers.foo]
auto=false
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig { auto: false, ports: HashMap::new() },
);
Config { auto: true, servers }
},
)
}
#[test]
fn servers_auto_not_bool() {
config_error_test(
"
[servers.foo]
auto=1234
",
)
}
#[test]
fn servers_ports_list() {
config_test(
"
[servers.foo]
ports=[1,2,3]
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig {
auto: true,
ports: {
let mut ports = HashMap::new();
ports.insert(
1,
PortConfig { enabled: true, description: None },
);
ports.insert(
2,
PortConfig { enabled: true, description: None },
);
ports.insert(
3,
PortConfig { enabled: true, description: None },
);
ports
},
},
);
Config { auto: true, servers }
},
)
}
#[test]
fn servers_ports_table_variations() {
config_test(
"
[servers.foo.ports]
1=true
2={enabled=false}
3=false
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig {
auto: true,
ports: {
let mut ports = HashMap::new();
ports.insert(
1,
PortConfig { enabled: true, description: None },
);
ports.insert(
2,
PortConfig {
enabled: false,
description: None,
},
);
ports.insert(
3,
PortConfig {
enabled: false,
description: None,
},
);
ports
},
},
);
Config { auto: true, servers }
},
)
}
#[test]
fn servers_ports_table_descriptions() {
config_test(
"
[servers.foo.ports]
1={enabled=false}
2={description='humble'}
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig {
auto: true,
ports: {
let mut ports = HashMap::new();
ports.insert(
1,
PortConfig {
enabled: false,
description: None,
},
);
ports.insert(
2,
PortConfig {
enabled: true,
description: Some("humble".to_string()),
},
);
ports
},
},
);
Config { auto: true, servers }
},
)
}
#[test]
fn servers_ports_raw_desc() {
config_test(
"
[servers.foo.ports]
1='humble'
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig {
auto: true,
ports: {
let mut ports = HashMap::new();
ports.insert(
1,
PortConfig {
enabled: true,
description: Some("humble".to_string()),
},
);
ports
},
},
);
Config { auto: true, servers }
},
)
}
#[test]
fn servers_inherit_auto() {
config_test(
"
auto=false
servers.foo={}
",
{
let mut servers = HashMap::new();
servers.insert(
"foo".to_string(),
ServerConfig { auto: false, ports: HashMap::new() },
);
Config { auto: false, servers }
},
)
}
}

View file

@ -14,15 +14,8 @@ use crossterm::{
},
};
use log::{error, info, warn, Level, Metadata, Record};
use std::collections::vec_deque::VecDeque;
use std::collections::{HashMap, HashSet};
use std::io::stdout;
use std::sync::{Arc, Mutex};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_stream::StreamExt;
use tui::{
backend::{Backend, CrosstermBackend},
use ratatui::{
backend::CrosstermBackend,
layout::{Constraint, Direction, Layout, Margin, Rect},
style::{Color, Modifier, Style},
widgets::{
@ -30,6 +23,13 @@ use tui::{
},
Frame, Terminal,
};
use std::collections::vec_deque::VecDeque;
use std::collections::{HashMap, HashSet};
use std::io::stdout;
use std::sync::{Arc, Mutex};
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio_stream::StreamExt;
pub enum UIEvent {
Connected(u16),
@ -155,6 +155,16 @@ impl Listener {
""
}
pub fn is_anonymous(&self) -> bool {
// Anonynous ports are not configured and came from the server but
// had no description there.
self.config.is_none()
&& match self.desc.as_ref() {
Some(desc) => desc.desc.is_empty(),
None => false,
}
}
fn state(&self) -> State {
*self.state.lock().unwrap()
}
@ -223,6 +233,7 @@ pub struct UI {
show_help: bool,
alternate_screen: bool,
raw_mode: bool,
show_anonymous: bool,
clipboard: Option<ClipboardContext>,
}
@ -247,6 +258,7 @@ impl UI {
config,
alternate_screen: false,
raw_mode: false,
show_anonymous: true,
clipboard,
}
}
@ -301,7 +313,7 @@ impl UI {
Ok(code)
}
fn render_connected<T: Backend>(&mut self, frame: &mut Frame<T>) {
fn render_connected(&mut self, frame: &mut Frame) {
let constraints = if self.show_logs {
vec![Constraint::Percentage(50), Constraint::Percentage(50)]
} else {
@ -311,7 +323,7 @@ impl UI {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints(constraints)
.split(frame.size());
.split(frame.area());
self.render_ports(frame, chunks[0]);
if self.show_logs {
@ -322,11 +334,11 @@ impl UI {
}
}
fn render_ports<B: Backend>(&mut self, frame: &mut Frame<B>, size: Rect) {
let enabled_port_style = Style::default();
let disabled_port_style = Style::default().fg(Color::DarkGray);
fn render_ports(&mut self, frame: &mut Frame, size: Rect) {
let enabled_port_style = Style::reset();
let disabled_port_style = Style::reset().fg(Color::DarkGray);
let broken_port_style =
Style::default().fg(Color::Red).add_modifier(Modifier::DIM);
Style::reset().fg(Color::Red).add_modifier(Modifier::DIM);
let mut rows = Vec::new();
let ports = self.get_ui_ports();
@ -334,6 +346,10 @@ impl UI {
ports.iter().map(|p| format!("{p}")).collect();
for (index, port) in ports.into_iter().enumerate() {
let listener = self.ports.get(&port).unwrap();
if !self.should_render_listener(listener) {
continue;
}
let (symbol, style) = match listener.state() {
State::Enabled => ("", enabled_port_style),
State::Broken => ("", broken_port_style),
@ -358,17 +374,16 @@ impl UI {
Constraint::Length(size.width),
];
let port_list = Table::new(rows)
let port_list = Table::new(rows, &widths)
.header(Row::new(vec!["fwd", "Port", "Description"]))
.block(Block::default().title("Ports").borders(Borders::ALL))
.column_spacing(1)
.widths(&widths)
.highlight_symbol(">> ");
frame.render_stateful_widget(port_list, size, &mut self.selection);
}
fn render_help<B: Backend>(&mut self, frame: &mut Frame<B>) {
fn render_help(&mut self, frame: &mut Frame) {
let keybindings = vec![
Row::new(vec!["↑ / k", "Move cursor up"]),
Row::new(vec!["↓ / j", "Move cursor down"]),
@ -380,6 +395,7 @@ impl UI {
Row::new(vec!["ESC / q", "Quit"]),
Row::new(vec!["? / h", "Show this help text"]),
Row::new(vec!["l", "Show fwd's logs"]),
Row::new(vec!["a", "Hide/show anonymous ports"]),
];
let border_lines = 3;
@ -387,10 +403,10 @@ impl UI {
let help_popup_area = centered_rect(
65,
keybindings.len() as u16 + border_lines,
frame.size(),
frame.area(),
);
let inner_area =
help_popup_area.inner(&Margin { vertical: 1, horizontal: 1 });
help_popup_area.inner(Margin { vertical: 1, horizontal: 1 });
let key_width = 7;
let binding_width = inner_area.width.saturating_sub(key_width);
@ -398,16 +414,16 @@ impl UI {
Constraint::Length(key_width),
Constraint::Length(binding_width),
];
let keybindings = Table::new(keybindings)
.widths(keybindings_widths)
let keybindings = Table::new(keybindings, keybindings_widths)
.column_spacing(1)
.block(Block::default().title("Keys").borders(Borders::ALL));
.block(Block::default().title("Keys").borders(Borders::ALL))
.style(Style::reset());
// keybindings
frame.render_widget(keybindings, inner_area);
}
fn render_logs<B: Backend>(&mut self, frame: &mut Frame<B>, size: Rect) {
fn render_logs(&mut self, frame: &mut Frame, size: Rect) {
let items: Vec<_> =
self.lines.iter().map(|l| ListItem::new(&l[..])).collect();
@ -463,7 +479,7 @@ impl UI {
fn enter_alternate_screen(&mut self) -> Result<()> {
if !self.alternate_screen {
enable_raw_mode()?;
execute!(stdout(), EnterAlternateScreen, DisableLineWrap)?;
execute!(stdout(), EnterAlternateScreen, DisableLineWrap,)?;
self.alternate_screen = true;
}
Ok(())
@ -478,6 +494,19 @@ impl UI {
Ok(())
}
fn toggle_show_anonymous(&mut self) {
self.show_anonymous = !self.show_anonymous;
}
fn should_render_listener(&self, listener: &Listener) -> bool {
// Named/Configured ports are always rendered
!listener.is_anonymous()
// ...or we might be explicitly asked to render everything
|| self.show_anonymous
// ...or the port might be enabled or errored
|| listener.state() != State::Disabled
}
async fn handle_events(&mut self, console_events: &mut EventStream) {
tokio::select! {
ev = console_events.next() => self.handle_console_event(ev),
@ -585,6 +614,10 @@ impl UI {
_ = open::that(format!("http://127.0.0.1:{}/", p));
}
}
KeyEvent { code: KeyCode::Char('a'), .. } => {
self.toggle_show_anonymous()
}
_ => (),
},
Some(Ok(_)) => (), // Don't care about this event...
@ -1251,4 +1284,118 @@ mod tests {
drop(sender);
}
#[test]
fn listener_anonymous() {
let (sender, receiver) = mpsc::channel(64);
let mut config = ServerConfig::default();
config.insert(
8079,
PortConfig {
enabled: false,
description: Some("body once told me".to_string()),
},
);
let mut ui = UI::new(receiver, config);
ui.handle_internal_event(Some(UIEvent::Ports(vec![
PortDesc {
port: 8080,
desc: "python3 blaster.py".to_string(),
},
PortDesc { port: 8081, desc: "".to_string() },
PortDesc { port: 8082, desc: "".to_string() },
])));
// (Pretend that 8082 broke.)
ui.ports.get_mut(&8082).unwrap().state = State::Broken.boxed();
let listener = ui.ports.get(&8079).unwrap();
assert!(
!listener.is_anonymous(),
"Configured ports should not be anonymous"
);
let listener = ui.ports.get(&8080).unwrap();
assert!(
!listener.is_anonymous(),
"Ports with descriptions should not be anonymous"
);
let listener = ui.ports.get(&8081).unwrap();
assert!(
listener.is_anonymous(),
"Not configured, disabled, no description should be anonymous"
);
drop(sender);
}
#[test]
fn render_anonymous() {
let (sender, receiver) = mpsc::channel(64);
let mut config = ServerConfig::default();
config.insert(
8079,
PortConfig {
enabled: false,
description: Some("body once told me".to_string()),
},
);
let mut ui = UI::new(receiver, config);
ui.handle_internal_event(Some(UIEvent::Ports(vec![
PortDesc {
port: 8080,
desc: "python3 blaster.py".to_string(),
},
PortDesc { port: 8081, desc: "".to_string() },
PortDesc { port: 8082, desc: "".to_string() },
PortDesc { port: 8083, desc: "".to_string() },
])));
// (Pretend that 8082 broke.)
ui.ports.get_mut(&8082).unwrap().state = State::Broken.boxed();
// No showing anonymous ports!
ui.show_anonymous = false;
let listener = ui.ports.get(&8079).unwrap();
assert!(
ui.should_render_listener(listener),
"Configured ports should always be rendered"
);
let listener = ui.ports.get(&8080).unwrap();
assert!(
ui.should_render_listener(listener),
"Ports with descriptions should be rendered"
);
let listener = ui.ports.get(&8081).unwrap();
assert!(
!ui.should_render_listener(listener),
"Not configured, disabled, no description should be hidden"
);
ui.enable_disable_port(8081);
let listener = ui.ports.get(&8081).unwrap();
assert_eq!(listener.state(), State::Enabled);
assert!(
ui.should_render_listener(listener),
"Enabled ports should be rendered"
);
let listener = ui.ports.get(&8082).unwrap();
assert_eq!(listener.state(), State::Broken);
assert!(
ui.should_render_listener(listener),
"Broken ports should be rendered"
);
drop(sender);
}
}

View file

@ -1,7 +1,7 @@
mod client;
mod message;
mod reverse;
mod server;
pub mod server;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
pub const REV: &str = env!("REPO_REV");

View file

@ -19,10 +19,10 @@ to send the the contents of `file`.
Options:
--version Print the version of fwd and exit
--sudo, -s Run the server side of fwd with `sudo`. This allows the
client to forward ports that are open by processes being
run under other accounts (e.g., docker containers being
run as root), but requires sudo access on the server and
*might* end up forwarding ports that you do not want
client to identify the ports that are open by processes
being run under other accounts (e.g., docker containers
being run as root), but requires sudo access on the server
and *might* end up forwarding ports that you do not want
forwarded (e.g., port 22 for sshd, or port 53 for systemd.)
--log-filter FILTER
Set remote server's log level. Default is `warn`. Supports

View file

@ -49,13 +49,14 @@ pub fn socket_path() -> Result<PathBuf> {
}
fn socket_directory() -> Result<std::path::PathBuf> {
let base_directories = xdg::BaseDirectories::new()
.context("Error creating BaseDirectories")?;
match base_directories.place_runtime_file("fwd") {
Ok(path) => Ok(path),
Err(_) => {
match directories_next::ProjectDirs::from("", "", "fwd")
.and_then(|p| p.runtime_dir().map(|p| p.to_path_buf()))
{
Some(p) => Ok(p),
None => {
let mut path = std::env::temp_dir();
path.push(format!("fwd{}", users::get_current_uid()));
let uid = unsafe { libc::getuid() };
path.push(format!("fwd{}", uid));
Ok(path)
}
}
@ -116,7 +117,7 @@ async fn handle_connection(
mod tests {
use super::*;
use crate::message::MessageWriter;
use tempdir::TempDir;
use tempfile::TempDir;
#[test]
fn socket_path_repeats() {
@ -130,8 +131,8 @@ mod tests {
async fn url_to_message() {
let (sender, mut receiver) = mpsc::channel(64);
let tmp_dir =
TempDir::new("url_to_message").expect("Error getting tmpdir");
let tmp_dir = TempDir::with_prefix("url_to_message")
.expect("Error getting tmpdir");
let path = tmp_dir.path().join("socket");
let path_override = path.clone();

View file

@ -5,7 +5,7 @@ use log::{error, warn};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, BufWriter};
use tokio::sync::mpsc;
mod refresh;
pub mod refresh;
// We drive writes through an mpsc queue, because we not only handle requests
// and responses from the client (refresh ports and the like) but also need

View file

@ -10,7 +10,7 @@ use crate::message::PortDesc;
mod procfs;
#[cfg(unix)]
mod docker;
pub mod docker;
pub async fn get_entries(_send_anonymous: bool) -> Result<Vec<PortDesc>> {
#[cfg_attr(not(target_os = "linux"), allow(unused_mut))]

View file

@ -1,4 +1,5 @@
use anyhow::{bail, Context, Result};
use log::trace;
use std::collections::HashMap;
use tokio::io::{
AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt,
@ -19,6 +20,7 @@ Host: localhost\r\n\
User-Agent: fwd/1.0\r\n\
Accept: */*\r\n\
\r\n";
let mut stream = tokio::io::BufStream::new(stream);
stream.write_all(DOCKER_LIST_CONTAINERS).await?;
stream.flush().await?;
@ -26,28 +28,84 @@ Accept: */*\r\n\
// Check the HTTP response.
let mut line = String::new();
stream.read_line(&mut line).await?;
trace!("[docker] {}", &line.trim_end());
let parts: Vec<&str> = line.split(" ").collect();
if parts.len() < 2 || parts[1] != "200" {
bail!("Error response from docker: {line}");
bail!("Error response from docker: {line:?}");
}
// Process the headers; all we really care about is content-length.
let mut content_length: usize = 0;
// Process the headers; all we really care about is content-length or content-encoding.
let mut content_length: Option<usize> = None;
let mut chunked = false;
loop {
line.clear();
stream.read_line(&mut line).await?;
trace!("[docker] {}", line.trim_end());
if line.trim().is_empty() {
break;
}
line.make_ascii_lowercase();
if let Some(rest) = line.strip_prefix("content-length: ") {
content_length = rest.trim().parse()?;
content_length = Some(rest.trim().parse()?);
}
if let Some(rest) = line.strip_prefix("transfer-encoding: ") {
chunked = rest.trim() == "chunked";
}
}
// Read the JSON response.
let mut response_buffer = vec![0; content_length];
let mut response_buffer = vec![0; content_length.unwrap_or(0)];
if content_length.is_some() {
stream.read_exact(&mut response_buffer).await?;
} else if chunked {
// Docker will send a chunked encoding if the response seems too big to do
// all at once. I don't know the heuristic it uses but we need to deal with
// it. Fortunately chunked encoding is not too bad?
loop {
line.clear();
stream.read_line(&mut line).await?;
// This is the hex length of the thing.
let Some(chunk_length) = line.split(";").next() else {
bail!("Can't make sense of chunk length line: {line:?}");
};
let Ok(chunk_length) =
usize::from_str_radix(chunk_length.trim(), 16)
else {
bail!("Cannot interpret chunk length '{chunk_length}' as hex (Full line: {line:?})");
};
if chunk_length > 0 {
let old_length = response_buffer.len();
let new_length = old_length + chunk_length;
response_buffer.resize(new_length, 0);
stream
.read_exact(&mut response_buffer[old_length..new_length])
.await?;
}
let mut eol: [u8; 2] = [0, 0];
stream.read_exact(&mut eol).await?;
if eol[0] != b'\r' || eol[1] != b'\n' {
bail!("Mal-formed end-of-chunk marker from server");
}
if chunk_length == 0 {
break; // All done.
}
}
} else {
trace!("Docker did not send a content_length, just reading to the end");
stream.read_to_end(&mut response_buffer).await?;
}
if log::log_enabled!(log::Level::Trace) {
match std::str::from_utf8(&response_buffer) {
Ok(s) => trace!("[docker][{}b] {}", s.len(), s),
Err(_) => trace!(
"[docker][{}b, raw] {:?}",
response_buffer.len(),
&response_buffer
),
}
}
// Done with the stream.
Ok(response_buffer)
@ -56,6 +114,7 @@ Accept: */*\r\n\
async fn list_containers() -> Result<Vec<u8>> {
let host = std::env::var("DOCKER_HOST")
.unwrap_or_else(|_| DEFAULT_DOCKER_HOST.to_string());
trace!("[docker] Connecting to {host}");
match host {
h if h.starts_with("unix://") => {
let socket_path = &h[7..];
@ -77,7 +136,7 @@ async fn list_containers() -> Result<Vec<u8>> {
}
#[derive(Debug, PartialEq)]
enum JsonValue {
pub enum JsonValue {
Null,
True,
False,
@ -105,8 +164,14 @@ impl JsonValue {
pub fn parse(blob: &[u8]) -> Result<Self> {
Self::parse_impl(blob).with_context(|| {
match std::str::from_utf8(blob) {
Ok(s) => format!("Failed to parse: {s}"),
Err(_) => format!("Failed to parse {blob:?}"),
Ok(s) => format!("Failed to parse {} bytes: '{}'", s.len(), s),
Err(_) => {
format!(
"Failed to parse {} bytes (not utf-8): {:?}",
blob.len(),
blob
)
}
}
})
}
@ -201,7 +266,7 @@ impl JsonValue {
}
i += 1;
}
if i == blob.len() {
if i >= blob.len() {
bail!("Unterminated string at {i}");
}
assert_eq!(blob[i], b'"');
@ -295,10 +360,11 @@ impl JsonValue {
}
}
match stack.pop().expect("underflow somehow") {
Tok::Val(v) => Ok(v),
Tok::StartObject => bail!("unterminated object"),
Tok::StartArray => bail!("unterminated array"),
match stack.pop() {
Some(Tok::Val(v)) => Ok(v),
Some(Tok::StartObject) => bail!("unterminated object"),
Some(Tok::StartArray) => bail!("unterminated array"),
None => bail!("No JSON found in input"),
}
}
@ -501,6 +567,11 @@ mod test {
}
}
#[test]
pub fn json_decode_empty() {
assert!(JsonValue::parse(b" ").is_err());
}
#[test]
pub fn json_decode_docker() {
use pretty_assertions::assert_eq;
@ -862,4 +933,97 @@ mod test {
]);
assert_eq!(result, expected);
}
#[test]
pub fn json_decode_unterminated_string_with_escape() {
let input = b"\"\\";
let _ = JsonValue::parse(input);
}
async fn accept_and_send_single_response(
listener: tokio::net::TcpListener,
response: &[u8],
) {
println!("[server] Awaiting connection...");
let (stream, _) = listener
.accept()
.await
.expect("Unable to accept connection");
let mut stream = tokio::io::BufStream::new(stream);
println!("[server] Reading request...");
let mut line = String::new();
loop {
line.clear();
stream
.read_line(&mut line)
.await
.expect("Unable to read line in server");
if line.trim().is_empty() {
break;
}
}
println!("[server] Sending response...");
stream
.write_all(response)
.await
.expect("Unable to write response");
stream.flush().await.expect("Unable to flush");
println!("[server] Done.");
}
#[tokio::test]
pub async fn docker_chunked_transfer_encoding() {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.expect("Unable to create listener on localhost");
let port = listener.local_addr().unwrap().port();
let mut set = tokio::task::JoinSet::new();
set.spawn(async move {
const RESPONSE: &[u8] = b"\
HTTP/1.1 200 OK\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
4\r\nWiki\r\n7\r\npedia i\r\nB\r\nn \r\nchunks.\r\n0\r\n\r\n";
accept_and_send_single_response(listener, RESPONSE).await;
});
let addr = format!("127.0.0.1:{port}");
let stream = tokio::net::TcpStream::connect(&addr)
.await
.expect("Unable to connect");
let response = list_containers_with_connection(stream)
.await
.expect("Unable to get response");
assert_eq!(&response, b"Wikipedia in \r\nchunks.");
}
#[tokio::test]
pub async fn docker_with_no_content_length() {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
.expect("Unable to create listener on localhost");
let port = listener.local_addr().unwrap().port();
let mut set = tokio::task::JoinSet::new();
set.spawn(async move {
const RESPONSE: &[u8] = b"\
HTTP/1.1 200 OK\r\n\
\r\n\
[\"Booo this is some data\"]\r\n";
accept_and_send_single_response(listener, RESPONSE).await;
});
let addr = format!("127.0.0.1:{port}");
let stream = tokio::net::TcpStream::connect(&addr)
.await
.expect("Unable to connect");
let response = list_containers_with_connection(stream)
.await
.expect("Unable to get response");
assert_eq!(&response, b"[\"Booo this is some data\"]\r\n");
}
}