mirror of
https://github.com/sshuttle/sshuttle.git
synced 2025-06-20 09:57:42 +02:00
Compare commits
17 Commits
master
...
sshuttle-0
Author | SHA1 | Date | |
---|---|---|---|
|
29d2e06bf5 | ||
|
bff1610050 | ||
|
cce6a9d96d | ||
|
5743f29ed6 | ||
|
42bc6d62db | ||
|
274ee854d4 | ||
|
12f6a52ec6 | ||
|
e737f4b944 | ||
|
d9f761a8a3 | ||
|
bd20841782 | ||
|
4c1a505e37 | ||
|
41d1f73dc2 | ||
|
cbc32ff8d8 | ||
|
6698992f4f | ||
|
e2c682084c | ||
|
89e914e9d1 | ||
|
2268e76771 |
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@ -1,13 +0,0 @@
|
|||||||
version: 2
|
|
||||||
enable-beta-ecosystems: true
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: uv
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: daily
|
|
||||||
open-pull-requests-limit: 10
|
|
||||||
- package-ecosystem: github-actions
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: daily
|
|
||||||
open-pull-requests-limit: 10
|
|
70
.github/workflows/codeql.yml
vendored
70
.github/workflows/codeql.yml
vendored
@ -1,70 +0,0 @@
|
|||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches: [ master ]
|
|
||||||
schedule:
|
|
||||||
- cron: '31 21 * * 3'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'python' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
|
||||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v3
|
|
||||||
with:
|
|
||||||
languages: ${{ matrix.language }}
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v3
|
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
|
||||||
# 📚 https://git.io/JvXDl
|
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
|
||||||
# and modify them (or add more) to build your code if your project
|
|
||||||
# uses a compiled language
|
|
||||||
|
|
||||||
#- run: |
|
|
||||||
# make bootstrap
|
|
||||||
# make release
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v3
|
|
38
.github/workflows/pythonpackage.yml
vendored
38
.github/workflows/pythonpackage.yml
vendored
@ -1,38 +0,0 @@
|
|||||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
|
||||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
|
||||||
|
|
||||||
name: Python package
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
workflow_dispatch: {}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
|
||||||
poetry-version: ["main"]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- name: Install uv
|
|
||||||
uses: astral-sh/setup-uv@v6
|
|
||||||
with:
|
|
||||||
version: "0.4.30"
|
|
||||||
enable-cache: true
|
|
||||||
cache-dependency-glob: "uv.lock"
|
|
||||||
- name: Install the project
|
|
||||||
run: uv sync --all-extras --dev
|
|
||||||
- name: Lint with flake8
|
|
||||||
run: uv run flake8 sshuttle tests --count --show-source --statistics
|
|
||||||
- name: Run the automated tests
|
|
||||||
run: uv run pytest -v
|
|
66
.github/workflows/release-please.yml
vendored
66
.github/workflows/release-please.yml
vendored
@ -1,66 +0,0 @@
|
|||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
name: release-please
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
|
|
||||||
release-please:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pull-requests: write
|
|
||||||
outputs:
|
|
||||||
release_created: ${{ steps.release.outputs.release_created }}
|
|
||||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
|
||||||
steps:
|
|
||||||
- uses: googleapis/release-please-action@v4
|
|
||||||
id: release
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.MY_RELEASE_PLEASE_TOKEN }}
|
|
||||||
release-type: python
|
|
||||||
|
|
||||||
build-pypi:
|
|
||||||
name: Build for pypi
|
|
||||||
needs: [release-please]
|
|
||||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Set up Python 3.12
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: 3.12
|
|
||||||
- name: Install uv
|
|
||||||
uses: astral-sh/setup-uv@v6
|
|
||||||
with:
|
|
||||||
version: "0.4.30"
|
|
||||||
enable-cache: true
|
|
||||||
cache-dependency-glob: "uv.lock"
|
|
||||||
- name: Build project
|
|
||||||
run: uv build
|
|
||||||
- name: Store the distribution packages
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: python-package-distributions
|
|
||||||
path: dist/
|
|
||||||
|
|
||||||
upload-pypi:
|
|
||||||
name: Upload to pypi
|
|
||||||
needs: [build-pypi]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
environment:
|
|
||||||
name: pypi
|
|
||||||
url: https://pypi.org/p/sshuttle
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
steps:
|
|
||||||
- name: Download all the dists
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: python-package-distributions
|
|
||||||
path: dist/
|
|
||||||
- name: Publish package distributions to PyPI
|
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
|
14
.gitignore
vendored
14
.gitignore
vendored
@ -1,20 +1,6 @@
|
|||||||
/tmp/
|
|
||||||
/.coverage
|
|
||||||
/.cache/
|
|
||||||
/.eggs/
|
|
||||||
/.tox/
|
|
||||||
/build/
|
|
||||||
/dist/
|
|
||||||
/sshuttle.egg-info/
|
|
||||||
/docs/_build/
|
|
||||||
*.pyc
|
*.pyc
|
||||||
*~
|
*~
|
||||||
*.8
|
*.8
|
||||||
/.do_built
|
/.do_built
|
||||||
/.do_built.dir
|
/.do_built.dir
|
||||||
/.redo
|
/.redo
|
||||||
/.pytest_cache/
|
|
||||||
/.python-version
|
|
||||||
/.direnv/
|
|
||||||
/result
|
|
||||||
/.vscode/
|
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
strictness: medium
|
|
||||||
|
|
||||||
pylint:
|
|
||||||
disable:
|
|
||||||
- too-many-statements
|
|
||||||
- too-many-locals
|
|
||||||
- too-many-function-args
|
|
||||||
- too-many-arguments
|
|
||||||
- too-many-branches
|
|
||||||
- bare-except
|
|
||||||
- protected-access
|
|
||||||
- no-else-return
|
|
||||||
- unused-argument
|
|
||||||
- method-hidden
|
|
||||||
- arguments-differ
|
|
||||||
- wrong-import-position
|
|
||||||
- raising-bad-type
|
|
||||||
|
|
||||||
pep8:
|
|
||||||
options:
|
|
||||||
max-line-length: 79
|
|
||||||
|
|
||||||
mccabe:
|
|
||||||
run: false
|
|
@ -1,13 +0,0 @@
|
|||||||
version: 2
|
|
||||||
|
|
||||||
build:
|
|
||||||
os: ubuntu-20.04
|
|
||||||
tools:
|
|
||||||
python: "3.10"
|
|
||||||
jobs:
|
|
||||||
post_install:
|
|
||||||
- pip install uv
|
|
||||||
- UV_PROJECT_ENVIRONMENT=$READTHEDOCS_VIRTUALENV_PATH uv sync --all-extras --group docs --link-mode=copy
|
|
||||||
|
|
||||||
sphinx:
|
|
||||||
configuration: docs/conf.py
|
|
@ -1 +0,0 @@
|
|||||||
python 3.10.6
|
|
54
CHANGELOG.md
54
CHANGELOG.md
@ -1,54 +0,0 @@
|
|||||||
# Changelog
|
|
||||||
|
|
||||||
## [1.3.1](https://github.com/sshuttle/sshuttle/compare/v1.3.0...v1.3.1) (2025-03-25)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* add pycodestyle config ([5942376](https://github.com/sshuttle/sshuttle/commit/5942376090395d0a8dfe38fe012a519268199341))
|
|
||||||
* add python lint tools ([ae3c022](https://github.com/sshuttle/sshuttle/commit/ae3c022d1d67de92f1c4712d06eb8ae76c970624))
|
|
||||||
* correct bad version number at runtime ([7b66253](https://github.com/sshuttle/sshuttle/commit/7b662536ba92d724ed8f86a32a21282fea66047c))
|
|
||||||
* Restore "nft" method ([375810a](https://github.com/sshuttle/sshuttle/commit/375810a9a8910a51db22c9fe4c0658c39b16c9e7))
|
|
||||||
|
|
||||||
## [1.3.0](https://github.com/sshuttle/sshuttle/compare/v1.2.0...v1.3.0) (2025-02-23)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* switch to a network namespace on Linux ([8a123d9](https://github.com/sshuttle/sshuttle/commit/8a123d9762b84f168a8ca8c75f73e590954e122d))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* prevent UnicodeDecodeError parsing iptables rule with comments ([cbe3d1e](https://github.com/sshuttle/sshuttle/commit/cbe3d1e402cac9d3fbc818fe0cb8a87be2e94348))
|
|
||||||
* remove temp build hack ([1f5e6ce](https://github.com/sshuttle/sshuttle/commit/1f5e6cea703db33761fb1c3f999b9624cf3bc7ad))
|
|
||||||
* support ':' sign in password ([7fa927e](https://github.com/sshuttle/sshuttle/commit/7fa927ef8ceea6b1b2848ca433b8b3e3b63f0509))
|
|
||||||
|
|
||||||
|
|
||||||
### Documentation
|
|
||||||
|
|
||||||
* replace nix-env with nix-shell ([340ccc7](https://github.com/sshuttle/sshuttle/commit/340ccc705ebd9499f14f799fcef0b5d2a8055fb4))
|
|
||||||
* update installation instructions ([a2d405a](https://github.com/sshuttle/sshuttle/commit/a2d405a6a7f9d1a301311a109f8411f2fe8deb37))
|
|
||||||
|
|
||||||
## [1.2.0](https://github.com/sshuttle/sshuttle/compare/v1.1.2...v1.2.0) (2025-02-07)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* Add release-please to build workflow ([d910b64](https://github.com/sshuttle/sshuttle/commit/d910b64be77fd7ef2a5f169b780bfda95e67318d))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* Add support for Python 3.11 and Python 3.11 ([a3396a4](https://github.com/sshuttle/sshuttle/commit/a3396a443df14d3bafc3d25909d9221aa182b8fc))
|
|
||||||
* bad file descriptor error in windows, fix pytest errors ([d4d0fa9](https://github.com/sshuttle/sshuttle/commit/d4d0fa945d50606360aa7c5f026a0f190b026c68))
|
|
||||||
* drop Python 3.8 support ([1084c0f](https://github.com/sshuttle/sshuttle/commit/1084c0f2458c1595b00963b3bd54bd667e4cfc9f))
|
|
||||||
* ensure poetry works for Python 3.9 ([693ee40](https://github.com/sshuttle/sshuttle/commit/693ee40c485c70f353326eb0e8f721f984850f5c))
|
|
||||||
* fix broken workflow_dispatch CI rule ([4b6f7c6](https://github.com/sshuttle/sshuttle/commit/4b6f7c6a656a752552295863092d3b8af0b42b31))
|
|
||||||
* Remove more references to legacy Python versions ([339b522](https://github.com/sshuttle/sshuttle/commit/339b5221bc33254329f79f2374f6114be6f30aed))
|
|
||||||
* replace requirements.txt files with poetry ([85dc319](https://github.com/sshuttle/sshuttle/commit/85dc3199a332f9f9f0e4c6037c883a8f88dc09ca))
|
|
||||||
* replace requirements.txt files with poetry (2) ([d08f78a](https://github.com/sshuttle/sshuttle/commit/d08f78a2d9777951d7e18f6eaebbcdd279d7683a))
|
|
||||||
* replace requirements.txt files with poetry (3) ([62da705](https://github.com/sshuttle/sshuttle/commit/62da70510e8a1f93e8b38870fdebdbace965cd8e))
|
|
||||||
* replace requirements.txt files with poetry (4) ([9bcedf1](https://github.com/sshuttle/sshuttle/commit/9bcedf19049e5b3a8ae26818299cc518ec03a926))
|
|
||||||
* update nix flake to fix problems ([cda60a5](https://github.com/sshuttle/sshuttle/commit/cda60a52331c7102cff892b9b77c8321e276680a))
|
|
||||||
* use Python >= 3.10 for docs ([bf29464](https://github.com/sshuttle/sshuttle/commit/bf294643e283cef9fb285d44e307e958686caf46))
|
|
315
CHANGES.rst
315
CHANGES.rst
@ -1,315 +0,0 @@
|
|||||||
==========
|
|
||||||
Change log
|
|
||||||
==========
|
|
||||||
Release notes now moved to https://github.com/sshuttle/sshuttle/releases/
|
|
||||||
|
|
||||||
These are the old release notes.
|
|
||||||
|
|
||||||
|
|
||||||
1.0.5 - 2020-12-29
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Added
|
|
||||||
~~~~~
|
|
||||||
* IPv6 support in nft method.
|
|
||||||
* Intercept DNS requests sent by systemd-resolved.
|
|
||||||
* Set default tmark.
|
|
||||||
* Fix python2 server compatibility.
|
|
||||||
* Python 3.9 support.
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Change license text to LGPL-2.1
|
|
||||||
* Fix #494 sshuttle caught in infinite select() loop.
|
|
||||||
* Include sshuttle version in verbose output.
|
|
||||||
* Add psutil as dependency in setup.py
|
|
||||||
* When subnets and excludes are specified with hostnames, use all IPs.
|
|
||||||
* Update/document client's handling of IPv4 and IPv6.
|
|
||||||
* Update sdnotify.py documentation.
|
|
||||||
* Allow no remote to work.
|
|
||||||
* Make prefixes in verbose output more consistent.
|
|
||||||
* Make nat and nft rules consistent; improve rule ordering.
|
|
||||||
* Make server and client handle resolv.conf differently.
|
|
||||||
* Fix handling OSError in FirewallClient#__init__
|
|
||||||
* Refactor automatic method selection.
|
|
||||||
|
|
||||||
Removed
|
|
||||||
~~~~~~~
|
|
||||||
* Drop testing of Python 3.5
|
|
||||||
|
|
||||||
|
|
||||||
1.0.4 - 2020-08-24
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Allow Mux() flush/fill to work with python < 3.5
|
|
||||||
* Fix parse_hostport to always return string for host.
|
|
||||||
* Require -r/--remote parameter.
|
|
||||||
* Add missing package in OpenWRT documentation.
|
|
||||||
* Fix doc about --listen option.
|
|
||||||
* README: add Ubuntu.
|
|
||||||
* Increase IP4 ttl to 63 hops instead of 42.
|
|
||||||
* Fix formatting in installation.rst
|
|
||||||
|
|
||||||
|
|
||||||
1.0.3 - 2020-07-12
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Ask setuptools to require Python 3.5 and above.
|
|
||||||
* Add missing import.
|
|
||||||
* Fix formatting typos in usage docs
|
|
||||||
|
|
||||||
|
|
||||||
1.0.2 - 2020-06-18
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Leave use of default port to ssh command.
|
|
||||||
* Remove unwanted references to Python 2.7 in docs.
|
|
||||||
* Replace usage of deprecated imp.
|
|
||||||
* Fix connection with @ sign in username.
|
|
||||||
|
|
||||||
|
|
||||||
1.0.1 - 2020-06-05
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Errors in python long_documentation.
|
|
||||||
|
|
||||||
|
|
||||||
1.0.0 - 2020-06-05
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Added
|
|
||||||
~~~~~
|
|
||||||
* Python 3.8 support.
|
|
||||||
* sshpass support.
|
|
||||||
* Auto sudoers file (#269).
|
|
||||||
* option for latency control buffer size.
|
|
||||||
* Docs: FreeBSD'.
|
|
||||||
* Docs: Nix'.
|
|
||||||
* Docs: openwrt'.
|
|
||||||
* Docs: install instructions for Fedora'.
|
|
||||||
* Docs: install instructions for Arch Linux'.
|
|
||||||
* Docs: 'My VPN broke and need a solution fast'.
|
|
||||||
|
|
||||||
Removed
|
|
||||||
~~~~~~~
|
|
||||||
* Python 2.6 support.
|
|
||||||
* Python 2.7 support.
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Remove debug message for getpeername failure.
|
|
||||||
* Fix crash triggered by port scans closing socket.
|
|
||||||
* Added "Running as a service" to docs.
|
|
||||||
* Systemd integration.
|
|
||||||
* Trap UnicodeError to handle cases where hostnames returned by DNS are invalid.
|
|
||||||
* Formatting error in CHANGES.rst
|
|
||||||
* Various errors in documentation.
|
|
||||||
* Nftables based method.
|
|
||||||
* Make hostwatch locale-independent (#379).
|
|
||||||
* Add tproxy udp port mark filter that was missed in #144, fixes #367.
|
|
||||||
* Capturing of local DNS servers.
|
|
||||||
* Crashing on ECONNABORTED.
|
|
||||||
* Size of pf_rule, which grew in OpenBSD 6.4.
|
|
||||||
* Use prompt for sudo, not needed for doas.
|
|
||||||
* Arch linux installation instructions.
|
|
||||||
* tests for existing PR-312 (#337).
|
|
||||||
* Hyphen in hostname.
|
|
||||||
* Assembler import (#319).
|
|
||||||
|
|
||||||
|
|
||||||
0.78.5 - 2019-01-28
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Added
|
|
||||||
~~~~~
|
|
||||||
* doas support as replacement for sudo on OpenBSD.
|
|
||||||
* Added ChromeOS section to documentation (#262)
|
|
||||||
* Add --no-sudo-pythonpath option
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Fix forwarding to a single port.
|
|
||||||
* Various updates to documentation.
|
|
||||||
* Don't crash if we can't look up peername
|
|
||||||
* Fix missing string formatting argument
|
|
||||||
* Moved sshuttle/tests into tests.
|
|
||||||
* Updated bandit config.
|
|
||||||
* Replace path /dev/null by os.devnull.
|
|
||||||
* Added coverage report to tests.
|
|
||||||
* Fixes support for OpenBSD (6.1+) (#282).
|
|
||||||
* Close stdin, stdout, and stderr when using syslog or forking to daemon (#283).
|
|
||||||
* Changes pf exclusion rules precedence.
|
|
||||||
* Fix deadlock with iptables with large ruleset.
|
|
||||||
* docs: document --ns-hosts --to-ns and update --dns.
|
|
||||||
* Use subprocess.check_output instead of run.
|
|
||||||
* Fix potential deadlock condition in nft_get_handle.
|
|
||||||
* auto-nets: retrieve routes only if using auto-nets.
|
|
||||||
|
|
||||||
|
|
||||||
0.78.4 - 2018-04-02
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Added
|
|
||||||
~~~~~
|
|
||||||
* Add homebrew instructions.
|
|
||||||
* Route traffic by linux user.
|
|
||||||
* Add nat-like method using nftables instead of iptables.
|
|
||||||
|
|
||||||
Changed
|
|
||||||
~~~~~~~
|
|
||||||
* Talk to custom DNS server on pod, instead of the ones in /etc/resolv.conf.
|
|
||||||
* Add new option for overriding destination DNS server.
|
|
||||||
* Changed subnet parsing. Previously 10/8 become 10.0.0.0/8. Now it gets
|
|
||||||
parsed as 0.0.0.10/8.
|
|
||||||
* Make hostwatch find both fqdn and hostname.
|
|
||||||
* Use versions of python3 greater than 3.5 when available (e.g. 3.6).
|
|
||||||
|
|
||||||
Removed
|
|
||||||
~~~~~~~
|
|
||||||
* Remove Python 2.6 from automatic tests.
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Fix case where there is no --dns.
|
|
||||||
* [pf] Avoid port forwarding from loopback address.
|
|
||||||
* Use getaddrinfo to obtain a correct sockaddr.
|
|
||||||
* Skip empty lines on incoming routes data.
|
|
||||||
* Just skip empty lines of routes data instead of stopping processing.
|
|
||||||
* [pf] Load pf kernel module when enabling pf.
|
|
||||||
* [pf] Test double restore (ipv4, ipv6) disables only once; test kldload.
|
|
||||||
* Fixes UDP and DNS proxies binding to the same socket address.
|
|
||||||
* Mock socket bind to avoid depending on local IPs being available in test box.
|
|
||||||
* Fix no value passed for argument auto_hosts in hw_main call.
|
|
||||||
* Fixed incorrect license information in setup.py.
|
|
||||||
* Preserve peer and port properly.
|
|
||||||
* Make --to-dns and --ns-host work well together.
|
|
||||||
* Remove test that fails under OSX.
|
|
||||||
* Specify pip requirements for tests.
|
|
||||||
* Use flake8 to find Python syntax errors or undefined names.
|
|
||||||
* Fix compatibility with the sudoers file.
|
|
||||||
* Stop using SO_REUSEADDR on sockets.
|
|
||||||
* Declare 'verbosity' as global variable to placate linters.
|
|
||||||
* Adds 'cd sshuttle' after 'git' to README and docs.
|
|
||||||
* Documentation for loading options from configuration file.
|
|
||||||
* Load options from a file.
|
|
||||||
* Fix firewall.py.
|
|
||||||
* Move sdnotify after setting up firewall rules.
|
|
||||||
* Fix tests on Macos.
|
|
||||||
|
|
||||||
|
|
||||||
0.78.3 - 2017-07-09
|
|
||||||
-------------------
|
|
||||||
The "I should have done a git pull" first release.
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Order first by port range and only then by swidth
|
|
||||||
|
|
||||||
|
|
||||||
0.78.2 - 2017-07-09
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Added
|
|
||||||
~~~~~
|
|
||||||
* Adds support for tunneling specific port ranges (#144).
|
|
||||||
* Add support for iproute2.
|
|
||||||
* Allow remote hosts with colons in the username.
|
|
||||||
* Re-introduce ipfw support for sshuttle on FreeBSD with support for --DNS option as well.
|
|
||||||
* Add support for PfSense.
|
|
||||||
* Tests and documentation for systemd integration.
|
|
||||||
* Allow subnets to be given only by file (-s).
|
|
||||||
|
|
||||||
Fixed
|
|
||||||
~~~~~
|
|
||||||
* Work around non tabular headers in BSD netstat.
|
|
||||||
* Fix UDP and DNS support on Python 2.7 with tproxy method.
|
|
||||||
* Fixed tests after adding support for iproute2.
|
|
||||||
* Small refactoring of netstat/iproute parsing.
|
|
||||||
* Set started_by_sshuttle False after disabling pf.
|
|
||||||
* Fix punctuation and explain Type=notify.
|
|
||||||
* Move pytest-runner to tests_require.
|
|
||||||
* Fix warning: closed channel got=STOP_SENDING.
|
|
||||||
* Support sdnotify for better systemd integration.
|
|
||||||
* Fix #117 to allow for no subnets via file (-s).
|
|
||||||
* Fix argument splitting for multi-word arguments.
|
|
||||||
* requirements.rst: Fix mistakes.
|
|
||||||
* Fix typo, space not required here.
|
|
||||||
* Update installation instructions.
|
|
||||||
* Support using run from different directory.
|
|
||||||
* Ensure we update sshuttle/version.py in run.
|
|
||||||
* Don't print python version in run.
|
|
||||||
* Add CWD to PYTHONPATH in run.
|
|
||||||
|
|
||||||
|
|
||||||
0.78.1 - 2016-08-06
|
|
||||||
-------------------
|
|
||||||
* Fix readthedocs versioning.
|
|
||||||
* Don't crash on ENETUNREACH.
|
|
||||||
* Various bug fixes.
|
|
||||||
* Improvements to BSD and OSX support.
|
|
||||||
|
|
||||||
|
|
||||||
0.78.0 - 2016-04-08
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
* Don't force IPv6 if IPv6 nameservers supplied. Fixes #74.
|
|
||||||
* Call /bin/sh as users shell may not be POSIX compliant. Fixes #77.
|
|
||||||
* Use argparse for command line processing. Fixes #75.
|
|
||||||
* Remove useless --server option.
|
|
||||||
* Support multiple -s (subnet) options. Fixes #86.
|
|
||||||
* Make server parts work with old versions of Python. Fixes #81.
|
|
||||||
|
|
||||||
|
|
||||||
0.77.2 - 2016-03-07
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
* Accidentally switched LGPL2 license with GPL2 license in 0.77.1 - now fixed.
|
|
||||||
|
|
||||||
|
|
||||||
0.77.1 - 2016-03-07
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
* Use semantic versioning. http://semver.org/
|
|
||||||
* Update GPL 2 license text.
|
|
||||||
* New release to fix PyPI.
|
|
||||||
|
|
||||||
|
|
||||||
0.77 - 2016-03-03
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
* Various bug fixes.
|
|
||||||
* Fix Documentation.
|
|
||||||
* Add fix for MacOS X issue.
|
|
||||||
* Add support for OpenBSD.
|
|
||||||
|
|
||||||
|
|
||||||
0.76 - 2016-01-17
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
* Add option to disable IPv6 support.
|
|
||||||
* Update documentation.
|
|
||||||
* Move documentation, including man page, to Sphinx.
|
|
||||||
* Use setuptools-scm for automatic versioning.
|
|
||||||
|
|
||||||
|
|
||||||
0.75 - 2016-01-12
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
* Revert change that broke sshuttle entry point.
|
|
||||||
|
|
||||||
|
|
||||||
0.74 - 2016-01-10
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
* Add CHANGES.rst file.
|
|
||||||
* Numerous bug fixes.
|
|
||||||
* Python 3.5 fixes.
|
|
||||||
* PF fixes, especially for BSD.
|
|
3
Documentation/.gitignore
vendored
Normal file
3
Documentation/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
*.8
|
||||||
|
/md-to-man
|
||||||
|
/*.md.tmp
|
5
Documentation/all.do
Normal file
5
Documentation/all.do
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
/bin/ls *.md |
|
||||||
|
sed 's/\.md/.8/' |
|
||||||
|
xargs redo-ifchange
|
||||||
|
|
||||||
|
redo-always
|
1
Documentation/clean.do
Normal file
1
Documentation/clean.do
Normal file
@ -0,0 +1 @@
|
|||||||
|
rm -f *~ .*~ *.8 t/*.8 md-to-man *.tmp t/*.tmp
|
2
Documentation/default.8.do
Normal file
2
Documentation/default.8.do
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
redo-ifchange md-to-man $2.md.tmp
|
||||||
|
. ./md-to-man $1 $2 $3
|
3
Documentation/default.md.tmp.do
Normal file
3
Documentation/default.md.tmp.do
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
redo-ifchange ../version/vars $2.md
|
||||||
|
. ../version/vars
|
||||||
|
sed -e "s/%VERSION%/$TAG/" -e "s/%DATE%/$DATE/" $2.md
|
8
Documentation/md-to-man.do
Normal file
8
Documentation/md-to-man.do
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
redo-ifchange md2man.py
|
||||||
|
if ./md2man.py </dev/null >/dev/null; then
|
||||||
|
echo './md2man.py $2.md.tmp'
|
||||||
|
else
|
||||||
|
echo "Warning: md2man.py missing modules; can't generate manpages." >&2
|
||||||
|
echo "Warning: try this: sudo easy_install markdown BeautifulSoup" >&2
|
||||||
|
echo 'echo Skipping: $2.1 >&2'
|
||||||
|
fi
|
278
Documentation/md2man.py
Executable file
278
Documentation/md2man.py
Executable file
@ -0,0 +1,278 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import sys, os, markdown, re
|
||||||
|
from BeautifulSoup import BeautifulSoup
|
||||||
|
|
||||||
|
def _split_lines(s):
|
||||||
|
return re.findall(r'([^\n]*\n?)', s)
|
||||||
|
|
||||||
|
|
||||||
|
class Writer:
|
||||||
|
def __init__(self):
|
||||||
|
self.started = False
|
||||||
|
self.indent = 0
|
||||||
|
self.last_wrote = '\n'
|
||||||
|
|
||||||
|
def _write(self, s):
|
||||||
|
if s:
|
||||||
|
self.last_wrote = s
|
||||||
|
sys.stdout.write(s)
|
||||||
|
|
||||||
|
def writeln(self, s):
|
||||||
|
if s:
|
||||||
|
self.linebreak()
|
||||||
|
self._write('%s\n' % s)
|
||||||
|
|
||||||
|
def write(self, s):
|
||||||
|
if s:
|
||||||
|
self.para()
|
||||||
|
for line in _split_lines(s):
|
||||||
|
if line.startswith('.'):
|
||||||
|
self._write('\\&' + line)
|
||||||
|
else:
|
||||||
|
self._write(line)
|
||||||
|
|
||||||
|
def linebreak(self):
|
||||||
|
if not self.last_wrote.endswith('\n'):
|
||||||
|
self._write('\n')
|
||||||
|
|
||||||
|
def para(self, bullet=None):
|
||||||
|
if not self.started:
|
||||||
|
if not bullet:
|
||||||
|
bullet = ' '
|
||||||
|
if not self.indent:
|
||||||
|
self.writeln(_macro('.PP'))
|
||||||
|
else:
|
||||||
|
assert(self.indent >= 2)
|
||||||
|
prefix = ' '*(self.indent-2) + bullet + ' '
|
||||||
|
self.writeln('.IP "%s" %d' % (prefix, self.indent))
|
||||||
|
self.started = True
|
||||||
|
|
||||||
|
def end_para(self):
|
||||||
|
self.linebreak()
|
||||||
|
self.started = False
|
||||||
|
|
||||||
|
def start_bullet(self):
|
||||||
|
self.indent += 3
|
||||||
|
self.para(bullet='\\[bu]')
|
||||||
|
|
||||||
|
def end_bullet(self):
|
||||||
|
self.indent -= 3
|
||||||
|
self.end_para()
|
||||||
|
|
||||||
|
w = Writer()
|
||||||
|
|
||||||
|
|
||||||
|
def _macro(name, *args):
|
||||||
|
if not name.startswith('.'):
|
||||||
|
raise ValueError('macro names must start with "."')
|
||||||
|
fixargs = []
|
||||||
|
for i in args:
|
||||||
|
i = str(i)
|
||||||
|
i = i.replace('\\', '')
|
||||||
|
i = i.replace('"', "'")
|
||||||
|
if (' ' in i) or not i:
|
||||||
|
i = '"%s"' % i
|
||||||
|
fixargs.append(i)
|
||||||
|
return ' '.join([name] + list(fixargs))
|
||||||
|
|
||||||
|
|
||||||
|
def macro(name, *args):
|
||||||
|
w.writeln(_macro(name, *args))
|
||||||
|
|
||||||
|
|
||||||
|
def _force_string(owner, tag):
|
||||||
|
if tag.string:
|
||||||
|
return tag.string
|
||||||
|
else:
|
||||||
|
out = ''
|
||||||
|
for i in tag:
|
||||||
|
if not (i.string or i.name in ['a', 'br']):
|
||||||
|
raise ValueError('"%s" tags must contain only strings: '
|
||||||
|
'got %r: %r' % (owner.name, tag.name, tag))
|
||||||
|
out += _force_string(owner, i)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def _clean(s):
|
||||||
|
s = s.replace('\\', '\\\\')
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def _bitlist(tag):
|
||||||
|
if getattr(tag, 'contents', None) == None:
|
||||||
|
for i in _split_lines(str(tag)):
|
||||||
|
yield None,_clean(i)
|
||||||
|
else:
|
||||||
|
for e in tag:
|
||||||
|
name = getattr(e, 'name', None)
|
||||||
|
if name in ['a', 'br']:
|
||||||
|
name = None # just treat as simple text
|
||||||
|
s = _force_string(tag, e)
|
||||||
|
if name:
|
||||||
|
yield name,_clean(s)
|
||||||
|
else:
|
||||||
|
for i in _split_lines(s):
|
||||||
|
yield None,_clean(i)
|
||||||
|
|
||||||
|
|
||||||
|
def _bitlist_simple(tag):
|
||||||
|
for typ,text in _bitlist(tag):
|
||||||
|
if typ and not typ in ['em', 'strong', 'code']:
|
||||||
|
raise ValueError('unexpected tag %r inside %r' % (typ, tag.name))
|
||||||
|
yield text
|
||||||
|
|
||||||
|
|
||||||
|
def _text(bitlist):
|
||||||
|
out = ''
|
||||||
|
for typ,text in bitlist:
|
||||||
|
if not typ:
|
||||||
|
out += text
|
||||||
|
elif typ == 'em':
|
||||||
|
out += '\\fI%s\\fR' % text
|
||||||
|
elif typ in ['strong', 'code']:
|
||||||
|
out += '\\fB%s\\fR' % text
|
||||||
|
else:
|
||||||
|
raise ValueError('unexpected tag %r inside %r' % (typ, tag.name))
|
||||||
|
out = out.strip()
|
||||||
|
out = re.sub(re.compile(r'^\s+', re.M), '', out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def text(tag):
|
||||||
|
w.write(_text(_bitlist(tag)))
|
||||||
|
|
||||||
|
|
||||||
|
# This is needed because .BI (and .BR, .RB, etc) are weird little state
|
||||||
|
# machines that alternate between two fonts. So if someone says something
|
||||||
|
# like foo<b>chicken</b><b>wicken</b>dicken we have to convert that to
|
||||||
|
# .BI foo chickenwicken dicken
|
||||||
|
def _boldline(l):
|
||||||
|
out = ['']
|
||||||
|
last_bold = False
|
||||||
|
for typ,text in l:
|
||||||
|
nonzero = not not typ
|
||||||
|
if nonzero != last_bold:
|
||||||
|
last_bold = not last_bold
|
||||||
|
out.append('')
|
||||||
|
out[-1] += re.sub(r'\s+', ' ', text)
|
||||||
|
macro('.BI', *out)
|
||||||
|
|
||||||
|
|
||||||
|
def do_definition(tag):
|
||||||
|
w.end_para()
|
||||||
|
macro('.TP')
|
||||||
|
w.started = True
|
||||||
|
split = 0
|
||||||
|
pre = []
|
||||||
|
post = []
|
||||||
|
for typ,text in _bitlist(tag):
|
||||||
|
if split:
|
||||||
|
post.append((typ,text))
|
||||||
|
elif text.lstrip().startswith(': '):
|
||||||
|
split = 1
|
||||||
|
post.append((typ,text.lstrip()[2:].lstrip()))
|
||||||
|
else:
|
||||||
|
pre.append((typ,text))
|
||||||
|
_boldline(pre)
|
||||||
|
w.write(_text(post))
|
||||||
|
|
||||||
|
|
||||||
|
def do_list(tag):
|
||||||
|
for i in tag:
|
||||||
|
name = getattr(i, 'name', '').lower()
|
||||||
|
if not name and not str(i).strip():
|
||||||
|
pass
|
||||||
|
elif name != 'li':
|
||||||
|
raise ValueError('only <li> is allowed inside <ul>: got %r' % i)
|
||||||
|
else:
|
||||||
|
w.start_bullet()
|
||||||
|
for xi in i:
|
||||||
|
do(xi)
|
||||||
|
w.end_para()
|
||||||
|
w.end_bullet()
|
||||||
|
|
||||||
|
|
||||||
|
def do(tag):
|
||||||
|
name = getattr(tag, 'name', '').lower()
|
||||||
|
if not name:
|
||||||
|
text(tag)
|
||||||
|
elif name == 'h1':
|
||||||
|
macro('.SH', _force_string(tag, tag).upper())
|
||||||
|
w.started = True
|
||||||
|
elif name == 'h2':
|
||||||
|
macro('.SS', _force_string(tag, tag))
|
||||||
|
w.started = True
|
||||||
|
elif name.startswith('h') and len(name)==2:
|
||||||
|
raise ValueError('%r invalid - man page headers must be h1 or h2'
|
||||||
|
% name)
|
||||||
|
elif name == 'pre':
|
||||||
|
t = _force_string(tag.code, tag.code)
|
||||||
|
if t.strip():
|
||||||
|
macro('.RS', '+4n')
|
||||||
|
macro('.nf')
|
||||||
|
w.write(_clean(t).rstrip())
|
||||||
|
macro('.fi')
|
||||||
|
macro('.RE')
|
||||||
|
w.end_para()
|
||||||
|
elif name == 'p' or name == 'br':
|
||||||
|
g = re.match(re.compile(r'([^\n]*)\n +: +(.*)', re.S), str(tag))
|
||||||
|
if g:
|
||||||
|
# it's a definition list (which some versions of python-markdown
|
||||||
|
# don't support, including the one in Debian-lenny, so we can't
|
||||||
|
# enable that markdown extension). Fake it up.
|
||||||
|
do_definition(tag)
|
||||||
|
else:
|
||||||
|
text(tag)
|
||||||
|
w.end_para()
|
||||||
|
elif name == 'ul':
|
||||||
|
do_list(tag)
|
||||||
|
else:
|
||||||
|
raise ValueError('non-man-compatible html tag %r' % name)
|
||||||
|
|
||||||
|
|
||||||
|
PROD='Untitled'
|
||||||
|
VENDOR='Vendor Name'
|
||||||
|
SECTION='9'
|
||||||
|
GROUPNAME='User Commands'
|
||||||
|
DATE=''
|
||||||
|
AUTHOR=''
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
for n in sys.argv[1:]:
|
||||||
|
lines += open(n).read().decode('utf8').split('\n')
|
||||||
|
else:
|
||||||
|
lines += sys.stdin.read().decode('utf8').split('\n')
|
||||||
|
|
||||||
|
# parse pandoc-style document headers (not part of markdown)
|
||||||
|
g = re.match(r'^%\s+(.*?)\((.*?)\)\s+(.*)$', lines[0])
|
||||||
|
if g:
|
||||||
|
PROD = g.group(1)
|
||||||
|
SECTION = g.group(2)
|
||||||
|
VENDOR = g.group(3)
|
||||||
|
lines.pop(0)
|
||||||
|
g = re.match(r'^%\s+(.*?)$', lines[0])
|
||||||
|
if g:
|
||||||
|
AUTHOR = g.group(1)
|
||||||
|
lines.pop(0)
|
||||||
|
g = re.match(r'^%\s+(.*?)$', lines[0])
|
||||||
|
if g:
|
||||||
|
DATE = g.group(1)
|
||||||
|
lines.pop(0)
|
||||||
|
g = re.match(r'^%\s+(.*?)$', lines[0])
|
||||||
|
if g:
|
||||||
|
GROUPNAME = g.group(1)
|
||||||
|
lines.pop(0)
|
||||||
|
|
||||||
|
inp = '\n'.join(lines)
|
||||||
|
if AUTHOR:
|
||||||
|
inp += ('\n# AUTHOR\n\n%s\n' % AUTHOR).replace('<', '\\<')
|
||||||
|
|
||||||
|
html = markdown.markdown(inp)
|
||||||
|
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
|
||||||
|
|
||||||
|
macro('.TH', PROD.upper(), SECTION, DATE, VENDOR, GROUPNAME)
|
||||||
|
macro('.ad', 'l') # left justified
|
||||||
|
macro('.nh') # disable hyphenation
|
||||||
|
for e in soup:
|
||||||
|
do(e)
|
278
Documentation/sshuttle.md
Normal file
278
Documentation/sshuttle.md
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
% sshuttle(8) Sshuttle %VERSION%
|
||||||
|
% Avery Pennarun <apenwarr@gmail.com>
|
||||||
|
% %DATE%
|
||||||
|
|
||||||
|
# NAME
|
||||||
|
|
||||||
|
sshuttle - a transparent proxy-based VPN using ssh
|
||||||
|
|
||||||
|
# SYNOPSIS
|
||||||
|
|
||||||
|
sshuttle [options...] [-r [username@]sshserver[:port]] \<subnets...\>
|
||||||
|
|
||||||
|
|
||||||
|
# DESCRIPTION
|
||||||
|
|
||||||
|
sshuttle allows you to create a VPN connection from your
|
||||||
|
machine to any remote server that you can connect to via
|
||||||
|
ssh, as long as that server has python 2.3 or higher.
|
||||||
|
|
||||||
|
To work, you must have root access on the local machine,
|
||||||
|
but you can have a normal account on the server.
|
||||||
|
|
||||||
|
It's valid to run sshuttle more than once simultaneously on
|
||||||
|
a single client machine, connecting to a different server
|
||||||
|
every time, so you can be on more than one VPN at once.
|
||||||
|
|
||||||
|
If run on a router, sshuttle can forward traffic for your
|
||||||
|
entire subnet to the VPN.
|
||||||
|
|
||||||
|
|
||||||
|
# OPTIONS
|
||||||
|
|
||||||
|
\<subnets...\>
|
||||||
|
: a list of subnets to route over the VPN, in the form
|
||||||
|
`a.b.c.d[/width]`. Valid examples are 1.2.3.4 (a
|
||||||
|
single IP address), 1.2.3.4/32 (equivalent to 1.2.3.4),
|
||||||
|
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0
|
||||||
|
netmask), and 0/0 ('just route everything through the
|
||||||
|
VPN').
|
||||||
|
|
||||||
|
-l, --listen=*[ip:]port*
|
||||||
|
: use this ip address and port number as the transparent
|
||||||
|
proxy port. By default sshuttle finds an available
|
||||||
|
port automatically and listens on IP 127.0.0.1
|
||||||
|
(localhost), so you don't need to override it, and
|
||||||
|
connections are only proxied from the local machine,
|
||||||
|
not from outside machines. If you want to accept
|
||||||
|
connections from other machines on your network (ie. to
|
||||||
|
run sshuttle on a router) try enabling IP Forwarding in
|
||||||
|
your kernel, then using `--listen 0.0.0.0:0`.
|
||||||
|
|
||||||
|
-H, --auto-hosts
|
||||||
|
: scan for remote hostnames and update the local /etc/hosts
|
||||||
|
file with matching entries for as long as the VPN is
|
||||||
|
open. This is nicer than changing your system's DNS
|
||||||
|
(/etc/resolv.conf) settings, for several reasons. First,
|
||||||
|
hostnames are added without domain names attached, so
|
||||||
|
you can `ssh thatserver` without worrying if your local
|
||||||
|
domain matches the remote one. Second, if you sshuttle
|
||||||
|
into more than one VPN at a time, it's impossible to
|
||||||
|
use more than one DNS server at once anyway, but
|
||||||
|
sshuttle correctly merges /etc/hosts entries between
|
||||||
|
all running copies. Third, if you're only routing a
|
||||||
|
few subnets over the VPN, you probably would prefer to
|
||||||
|
keep using your local DNS server for everything else.
|
||||||
|
|
||||||
|
-N, --auto-nets
|
||||||
|
: in addition to the subnets provided on the command
|
||||||
|
line, ask the server which subnets it thinks we should
|
||||||
|
route, and route those automatically. The suggestions
|
||||||
|
are taken automatically from the server's routing
|
||||||
|
table.
|
||||||
|
|
||||||
|
--dns
|
||||||
|
: capture local DNS requests and forward to the remote DNS
|
||||||
|
server.
|
||||||
|
|
||||||
|
--python
|
||||||
|
: specify the name/path of the remote python interpreter.
|
||||||
|
The default is just `python`, which means to use the
|
||||||
|
default python interpreter on the remote system's PATH.
|
||||||
|
|
||||||
|
-r, --remote=*[username@]sshserver[:port]*
|
||||||
|
: the remote hostname and optional username and ssh
|
||||||
|
port number to use for connecting to the remote server.
|
||||||
|
For example, example.com, testuser@example.com,
|
||||||
|
testuser@example.com:2222, or example.com:2244.
|
||||||
|
|
||||||
|
-x, --exclude=*subnet*
|
||||||
|
: explicitly exclude this subnet from forwarding. The
|
||||||
|
format of this option is the same as the `<subnets>`
|
||||||
|
option. To exclude more than one subnet, specify the
|
||||||
|
`-x` option more than once. You can say something like
|
||||||
|
`0/0 -x 1.2.3.0/24` to forward everything except the
|
||||||
|
local subnet over the VPN, for example.
|
||||||
|
|
||||||
|
--exclude-from=*file*
|
||||||
|
: exclude the subnets specified in a file, one subnet per
|
||||||
|
line. Useful when you have lots of subnets to exclude.
|
||||||
|
|
||||||
|
-v, --verbose
|
||||||
|
: print more information about the session. This option
|
||||||
|
can be used more than once for increased verbosity. By
|
||||||
|
default, sshuttle prints only error messages.
|
||||||
|
|
||||||
|
-e, --ssh-cmd
|
||||||
|
: the command to use to connect to the remote server. The
|
||||||
|
default is just `ssh`. Use this if your ssh client is
|
||||||
|
in a non-standard location or you want to provide extra
|
||||||
|
options to the ssh command, for example, `-e 'ssh -v'`.
|
||||||
|
|
||||||
|
--seed-hosts
|
||||||
|
: a comma-separated list of hostnames to use to
|
||||||
|
initialize the `--auto-hosts` scan algorithm.
|
||||||
|
`--auto-hosts` does things like poll local SMB servers
|
||||||
|
for lists of local hostnames, but can speed things up
|
||||||
|
if you use this option to give it a few names to start
|
||||||
|
from.
|
||||||
|
|
||||||
|
--no-latency-control
|
||||||
|
: sacrifice latency to improve bandwidth benchmarks. ssh
|
||||||
|
uses really big socket buffers, which can overload the
|
||||||
|
connection if you start doing large file transfers,
|
||||||
|
thus making all your other sessions inside the same
|
||||||
|
tunnel go slowly. Normally, sshuttle tries to avoid
|
||||||
|
this problem using a "fullness check" that allows only
|
||||||
|
a certain amount of outstanding data to be buffered at
|
||||||
|
a time. But on high-bandwidth links, this can leave a
|
||||||
|
lot of your bandwidth underutilized. It also makes
|
||||||
|
sshuttle seem slow in bandwidth benchmarks (benchmarks
|
||||||
|
rarely test ping latency, which is what sshuttle is
|
||||||
|
trying to control). This option disables the latency
|
||||||
|
control feature, maximizing bandwidth usage. Use at
|
||||||
|
your own risk.
|
||||||
|
|
||||||
|
-D, --daemon
|
||||||
|
: automatically fork into the background after connecting
|
||||||
|
to the remote server. Implies `--syslog`.
|
||||||
|
|
||||||
|
--syslog
|
||||||
|
: after connecting, send all log messages to the
|
||||||
|
`syslog`(3) service instead of stderr. This is
|
||||||
|
implicit if you use `--daemon`.
|
||||||
|
|
||||||
|
--pidfile=*pidfilename*
|
||||||
|
: when using `--daemon`, save sshuttle's pid to
|
||||||
|
*pidfilename*. The default is `sshuttle.pid` in the
|
||||||
|
current directory.
|
||||||
|
|
||||||
|
--server
|
||||||
|
: (internal use only) run the sshuttle server on
|
||||||
|
stdin/stdout. This is what the client runs on
|
||||||
|
the remote end.
|
||||||
|
|
||||||
|
--firewall
|
||||||
|
: (internal use only) run the firewall manager. This is
|
||||||
|
the only part of sshuttle that must run as root. If
|
||||||
|
you start sshuttle as a non-root user, it will
|
||||||
|
automatically run `sudo` or `su` to start the firewall
|
||||||
|
manager, but the core of sshuttle still runs as a
|
||||||
|
normal user.
|
||||||
|
|
||||||
|
--hostwatch
|
||||||
|
: (internal use only) run the hostwatch daemon. This
|
||||||
|
process runs on the server side and collects hostnames for
|
||||||
|
the `--auto-hosts` option. Using this option by itself
|
||||||
|
makes it a lot easier to debug and test the `--auto-hosts`
|
||||||
|
feature.
|
||||||
|
|
||||||
|
|
||||||
|
# EXAMPLES
|
||||||
|
|
||||||
|
Test locally by proxying all local connections, without using ssh:
|
||||||
|
|
||||||
|
$ sshuttle -v 0/0
|
||||||
|
|
||||||
|
Starting sshuttle proxy.
|
||||||
|
Listening on ('0.0.0.0', 12300).
|
||||||
|
[local sudo] Password:
|
||||||
|
firewall manager ready.
|
||||||
|
c : connecting to server...
|
||||||
|
s: available routes:
|
||||||
|
s: 192.168.42.0/24
|
||||||
|
c : connected.
|
||||||
|
firewall manager: starting transproxy.
|
||||||
|
c : Accept: 192.168.42.106:50035 -> 192.168.42.121:139.
|
||||||
|
c : Accept: 192.168.42.121:47523 -> 77.141.99.22:443.
|
||||||
|
...etc...
|
||||||
|
^C
|
||||||
|
firewall manager: undoing changes.
|
||||||
|
KeyboardInterrupt
|
||||||
|
c : Keyboard interrupt: exiting.
|
||||||
|
c : SW#8:192.168.42.121:47523: deleting
|
||||||
|
c : SW#6:192.168.42.106:50035: deleting
|
||||||
|
|
||||||
|
Test connection to a remote server, with automatic hostname
|
||||||
|
and subnet guessing:
|
||||||
|
|
||||||
|
$ sshuttle -vNHr example.org
|
||||||
|
|
||||||
|
Starting sshuttle proxy.
|
||||||
|
Listening on ('0.0.0.0', 12300).
|
||||||
|
firewall manager ready.
|
||||||
|
c : connecting to server...
|
||||||
|
s: available routes:
|
||||||
|
s: 77.141.99.0/24
|
||||||
|
c : connected.
|
||||||
|
c : seed_hosts: []
|
||||||
|
firewall manager: starting transproxy.
|
||||||
|
hostwatch: Found: testbox1: 1.2.3.4
|
||||||
|
hostwatch: Found: mytest2: 5.6.7.8
|
||||||
|
hostwatch: Found: domaincontroller: 99.1.2.3
|
||||||
|
c : Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||||
|
^C
|
||||||
|
firewall manager: undoing changes.
|
||||||
|
c : Keyboard interrupt: exiting.
|
||||||
|
c : SW#6:192.168.42.121:60554: deleting
|
||||||
|
|
||||||
|
|
||||||
|
# DISCUSSION
|
||||||
|
|
||||||
|
When it starts, sshuttle creates an ssh session to the
|
||||||
|
server specified by the `-r` option. If `-r` is omitted,
|
||||||
|
it will start both its client and server locally, which is
|
||||||
|
sometimes useful for testing.
|
||||||
|
|
||||||
|
After connecting to the remote server, sshuttle uploads its
|
||||||
|
(python) source code to the remote end and executes it
|
||||||
|
there. Thus, you don't need to install sshuttle on the
|
||||||
|
remote server, and there are never sshuttle version
|
||||||
|
conflicts between client and server.
|
||||||
|
|
||||||
|
Unlike most VPNs, sshuttle forwards sessions, not packets.
|
||||||
|
That is, it uses kernel transparent proxying (`iptables
|
||||||
|
REDIRECT` rules on Linux, or `ipfw fwd` rules on BSD) to
|
||||||
|
capture outgoing TCP sessions, then creates entirely
|
||||||
|
separate TCP sessions out to the original destination at
|
||||||
|
the other end of the tunnel.
|
||||||
|
|
||||||
|
Packet-level forwarding (eg. using the tun/tap devices on
|
||||||
|
Linux) seems elegant at first, but it results in
|
||||||
|
several problems, notably the 'tcp over tcp' problem. The
|
||||||
|
tcp protocol depends fundamentally on packets being dropped
|
||||||
|
in order to implement its congestion control agorithm; if
|
||||||
|
you pass tcp packets through a tcp-based tunnel (such as
|
||||||
|
ssh), the inner tcp packets will never be dropped, and so
|
||||||
|
the inner tcp stream's congestion control will be
|
||||||
|
completely broken, and performance will be terrible. Thus,
|
||||||
|
packet-based VPNs (such as IPsec and openvpn) cannot use
|
||||||
|
tcp-based encrypted streams like ssh or ssl, and have to
|
||||||
|
implement their own encryption from scratch, which is very
|
||||||
|
complex and error prone.
|
||||||
|
|
||||||
|
sshuttle's simplicity comes from the fact that it can
|
||||||
|
safely use the existing ssh encrypted tunnel without
|
||||||
|
incurring a performance penalty. It does this by letting
|
||||||
|
the client-side kernel manage the incoming tcp stream, and
|
||||||
|
the server-side kernel manage the outgoing tcp stream;
|
||||||
|
there is no need for congestion control to be shared
|
||||||
|
between the two separate streams, so a tcp-based tunnel is
|
||||||
|
fine.
|
||||||
|
|
||||||
|
|
||||||
|
# BUGS
|
||||||
|
|
||||||
|
On MacOS 10.6 (at least up to 10.6.6), your network will
|
||||||
|
stop responding about 10 minutes after the first time you
|
||||||
|
start sshuttle, because of a MacOS kernel bug relating to
|
||||||
|
arp and the net.inet.ip.scopedroute sysctl. To fix it,
|
||||||
|
just switch your wireless off and on. Sshuttle makes the
|
||||||
|
kernel setting it changes permanent, so this won't happen
|
||||||
|
again, even after a reboot.
|
||||||
|
|
||||||
|
|
||||||
|
# SEE ALSO
|
||||||
|
|
||||||
|
`ssh`(1), `python`(1)
|
||||||
|
|
207
LICENSE
207
LICENSE
@ -1,125 +1,112 @@
|
|||||||
GNU LESSER GENERAL PUBLIC LICENSE
|
GNU LIBRARY GENERAL PUBLIC LICENSE
|
||||||
Version 2.1, February 1999
|
Version 2, June 1991
|
||||||
|
|
||||||
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
Copyright (C) 1991 Free Software Foundation, Inc.
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
675 Mass Ave, Cambridge, MA 02139, USA
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
of this license document, but changing it is not allowed.
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
[This is the first released version of the Lesser GPL. It also counts
|
[This is the first released version of the library GPL. It is
|
||||||
as the successor of the GNU Library Public License, version 2, hence
|
numbered 2 because it goes with version 2 of the ordinary GPL.]
|
||||||
the version number 2.1.]
|
|
||||||
|
|
||||||
Preamble
|
Preamble
|
||||||
|
|
||||||
The licenses for most software are designed to take away your
|
The licenses for most software are designed to take away your
|
||||||
freedom to share and change it. By contrast, the GNU General Public
|
freedom to share and change it. By contrast, the GNU General Public
|
||||||
Licenses are intended to guarantee your freedom to share and change
|
Licenses are intended to guarantee your freedom to share and change
|
||||||
free software--to make sure the software is free for all its users.
|
free software--to make sure the software is free for all its users.
|
||||||
|
|
||||||
This license, the Lesser General Public License, applies to some
|
This license, the Library General Public License, applies to some
|
||||||
specially designated software packages--typically libraries--of the
|
specially designated Free Software Foundation software, and to any
|
||||||
Free Software Foundation and other authors who decide to use it. You
|
other libraries whose authors decide to use it. You can use it for
|
||||||
can use it too, but we suggest you first think carefully about whether
|
your libraries, too.
|
||||||
this license or the ordinary General Public License is the better
|
|
||||||
strategy to use in any particular case, based on the explanations below.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom of use,
|
When we speak of free software, we are referring to freedom, not
|
||||||
not price. Our General Public Licenses are designed to make sure that
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
you have the freedom to distribute copies of free software (and charge
|
have the freedom to distribute copies of free software (and charge for
|
||||||
for this service if you wish); that you receive source code or can get
|
this service if you wish), that you receive source code or can get it
|
||||||
it if you want it; that you can change the software and use pieces of
|
if you want it, that you can change the software or use pieces of it
|
||||||
it in new free programs; and that you are informed that you can do
|
in new free programs; and that you know you can do these things.
|
||||||
these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to make restrictions that forbid
|
To protect your rights, we need to make restrictions that forbid
|
||||||
distributors to deny you these rights or to ask you to surrender these
|
anyone to deny you these rights or to ask you to surrender the rights.
|
||||||
rights. These restrictions translate to certain responsibilities for
|
These restrictions translate to certain responsibilities for you if
|
||||||
you if you distribute copies of the library or if you modify it.
|
you distribute copies of the library, or if you modify it.
|
||||||
|
|
||||||
For example, if you distribute copies of the library, whether gratis
|
For example, if you distribute copies of the library, whether gratis
|
||||||
or for a fee, you must give the recipients all the rights that we gave
|
or for a fee, you must give the recipients all the rights that we gave
|
||||||
you. You must make sure that they, too, receive or can get the source
|
you. You must make sure that they, too, receive or can get the source
|
||||||
code. If you link other code with the library, you must provide
|
code. If you link a program with the library, you must provide
|
||||||
complete object files to the recipients, so that they can relink them
|
complete object files to the recipients so that they can relink them
|
||||||
with the library after making changes to the library and recompiling
|
with the library, after making changes to the library and recompiling
|
||||||
it. And you must show them these terms so they know their rights.
|
it. And you must show them these terms so they know their rights.
|
||||||
|
|
||||||
We protect your rights with a two-step method: (1) we copyright the
|
Our method of protecting your rights has two steps: (1) copyright
|
||||||
library, and (2) we offer you this license, which gives you legal
|
the library, and (2) offer you this license which gives you legal
|
||||||
permission to copy, distribute and/or modify the library.
|
permission to copy, distribute and/or modify the library.
|
||||||
|
|
||||||
To protect each distributor, we want to make it very clear that
|
Also, for each distributor's protection, we want to make certain
|
||||||
there is no warranty for the free library. Also, if the library is
|
that everyone understands that there is no warranty for this free
|
||||||
modified by someone else and passed on, the recipients should know
|
library. If the library is modified by someone else and passed on, we
|
||||||
that what they have is not the original version, so that the original
|
want its recipients to know that what they have is not the original
|
||||||
author's reputation will not be affected by problems that might be
|
version, so that any problems introduced by others will not reflect on
|
||||||
introduced by others.
|
the original authors' reputations.
|
||||||
|
|
||||||
Finally, software patents pose a constant threat to the existence of
|
Finally, any free program is threatened constantly by software
|
||||||
any free program. We wish to make sure that a company cannot
|
patents. We wish to avoid the danger that companies distributing free
|
||||||
effectively restrict the users of a free program by obtaining a
|
software will individually obtain patent licenses, thus in effect
|
||||||
restrictive license from a patent holder. Therefore, we insist that
|
transforming the program into proprietary software. To prevent this,
|
||||||
any patent license obtained for a version of the library must be
|
we have made it clear that any patent must be licensed for everyone's
|
||||||
consistent with the full freedom of use specified in this license.
|
free use or not licensed at all.
|
||||||
|
|
||||||
Most GNU software, including some libraries, is covered by the
|
Most GNU software, including some libraries, is covered by the ordinary
|
||||||
ordinary GNU General Public License. This license, the GNU Lesser
|
GNU General Public License, which was designed for utility programs. This
|
||||||
General Public License, applies to certain designated libraries, and
|
license, the GNU Library General Public License, applies to certain
|
||||||
is quite different from the ordinary General Public License. We use
|
designated libraries. This license is quite different from the ordinary
|
||||||
this license for certain libraries in order to permit linking those
|
one; be sure to read it in full, and don't assume that anything in it is
|
||||||
libraries into non-free programs.
|
the same as in the ordinary license.
|
||||||
|
|
||||||
When a program is linked with a library, whether statically or using
|
The reason we have a separate public license for some libraries is that
|
||||||
a shared library, the combination of the two is legally speaking a
|
they blur the distinction we usually make between modifying or adding to a
|
||||||
combined work, a derivative of the original library. The ordinary
|
program and simply using it. Linking a program with a library, without
|
||||||
General Public License therefore permits such linking only if the
|
changing the library, is in some sense simply using the library, and is
|
||||||
entire combination fits its criteria of freedom. The Lesser General
|
analogous to running a utility program or application program. However, in
|
||||||
Public License permits more lax criteria for linking other code with
|
a textual and legal sense, the linked executable is a combined work, a
|
||||||
the library.
|
derivative of the original library, and the ordinary General Public License
|
||||||
|
treats it as such.
|
||||||
|
|
||||||
We call this license the "Lesser" General Public License because it
|
Because of this blurred distinction, using the ordinary General
|
||||||
does Less to protect the user's freedom than the ordinary General
|
Public License for libraries did not effectively promote software
|
||||||
Public License. It also provides other free software developers Less
|
sharing, because most developers did not use the libraries. We
|
||||||
of an advantage over competing non-free programs. These disadvantages
|
concluded that weaker conditions might promote sharing better.
|
||||||
are the reason we use the ordinary General Public License for many
|
|
||||||
libraries. However, the Lesser license provides advantages in certain
|
|
||||||
special circumstances.
|
|
||||||
|
|
||||||
For example, on rare occasions, there may be a special need to
|
However, unrestricted linking of non-free programs would deprive the
|
||||||
encourage the widest possible use of a certain library, so that it becomes
|
users of those programs of all benefit from the free status of the
|
||||||
a de-facto standard. To achieve this, non-free programs must be
|
libraries themselves. This Library General Public License is intended to
|
||||||
allowed to use the library. A more frequent case is that a free
|
permit developers of non-free programs to use free libraries, while
|
||||||
library does the same job as widely used non-free libraries. In this
|
preserving your freedom as a user of such programs to change the free
|
||||||
case, there is little to gain by limiting the free library to free
|
libraries that are incorporated in them. (We have not seen how to achieve
|
||||||
software only, so we use the Lesser General Public License.
|
this as regards changes in header files, but we have achieved it as regards
|
||||||
|
changes in the actual functions of the Library.) The hope is that this
|
||||||
In other cases, permission to use a particular library in non-free
|
will lead to faster development of free libraries.
|
||||||
programs enables a greater number of people to use a large body of
|
|
||||||
free software. For example, permission to use the GNU C Library in
|
|
||||||
non-free programs enables many more people to use the whole GNU
|
|
||||||
operating system, as well as its variant, the GNU/Linux operating
|
|
||||||
system.
|
|
||||||
|
|
||||||
Although the Lesser General Public License is Less protective of the
|
|
||||||
users' freedom, it does ensure that the user of a program that is
|
|
||||||
linked with the Library has the freedom and the wherewithal to run
|
|
||||||
that program using a modified version of the Library.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
The precise terms and conditions for copying, distribution and
|
||||||
modification follow. Pay close attention to the difference between a
|
modification follow. Pay close attention to the difference between a
|
||||||
"work based on the library" and a "work that uses the library". The
|
"work based on the library" and a "work that uses the library". The
|
||||||
former contains code derived from the library, whereas the latter must
|
former contains code derived from the library, while the latter only
|
||||||
be combined with the library in order to run.
|
works together with the library.
|
||||||
|
|
||||||
|
Note that it is possible for a library to be covered by the ordinary
|
||||||
|
General Public License rather than by this special one.
|
||||||
|
|
||||||
GNU LESSER GENERAL PUBLIC LICENSE
|
GNU LIBRARY GENERAL PUBLIC LICENSE
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||||
|
|
||||||
0. This License Agreement applies to any software library or other
|
0. This License Agreement applies to any software library which
|
||||||
program which contains a notice placed by the copyright holder or
|
contains a notice placed by the copyright holder or other authorized
|
||||||
other authorized party saying it may be distributed under the terms of
|
party saying it may be distributed under the terms of this Library
|
||||||
this Lesser General Public License (also called "this License").
|
General Public License (also called "this License"). Each licensee is
|
||||||
Each licensee is addressed as "you".
|
addressed as "you".
|
||||||
|
|
||||||
A "library" means a collection of software functions and/or data
|
A "library" means a collection of software functions and/or data
|
||||||
prepared so as to be conveniently linked with application programs
|
prepared so as to be conveniently linked with application programs
|
||||||
@ -146,7 +133,7 @@ such a program is covered only if its contents constitute a work based
|
|||||||
on the Library (independent of the use of the Library in a tool for
|
on the Library (independent of the use of the Library in a tool for
|
||||||
writing it). Whether that is true depends on what the Library does
|
writing it). Whether that is true depends on what the Library does
|
||||||
and what the program that uses the Library does.
|
and what the program that uses the Library does.
|
||||||
|
|
||||||
1. You may copy and distribute verbatim copies of the Library's
|
1. You may copy and distribute verbatim copies of the Library's
|
||||||
complete source code as you receive it, in any medium, provided that
|
complete source code as you receive it, in any medium, provided that
|
||||||
you conspicuously and appropriately publish on each copy an
|
you conspicuously and appropriately publish on each copy an
|
||||||
@ -268,7 +255,7 @@ distribute the object code for the work under the terms of Section 6.
|
|||||||
Any executables containing that work also fall under Section 6,
|
Any executables containing that work also fall under Section 6,
|
||||||
whether or not they are linked directly with the Library itself.
|
whether or not they are linked directly with the Library itself.
|
||||||
|
|
||||||
6. As an exception to the Sections above, you may also combine or
|
6. As an exception to the Sections above, you may also compile or
|
||||||
link a "work that uses the Library" with the Library to produce a
|
link a "work that uses the Library" with the Library to produce a
|
||||||
work containing portions of the Library, and distribute that work
|
work containing portions of the Library, and distribute that work
|
||||||
under terms of your choice, provided that the terms permit
|
under terms of your choice, provided that the terms permit
|
||||||
@ -295,31 +282,23 @@ of these things:
|
|||||||
Library will not necessarily be able to recompile the application
|
Library will not necessarily be able to recompile the application
|
||||||
to use the modified definitions.)
|
to use the modified definitions.)
|
||||||
|
|
||||||
b) Use a suitable shared library mechanism for linking with the
|
b) Accompany the work with a written offer, valid for at
|
||||||
Library. A suitable mechanism is one that (1) uses at run time a
|
|
||||||
copy of the library already present on the user's computer system,
|
|
||||||
rather than copying library functions into the executable, and (2)
|
|
||||||
will operate properly with a modified version of the library, if
|
|
||||||
the user installs one, as long as the modified version is
|
|
||||||
interface-compatible with the version that the work was made with.
|
|
||||||
|
|
||||||
c) Accompany the work with a written offer, valid for at
|
|
||||||
least three years, to give the same user the materials
|
least three years, to give the same user the materials
|
||||||
specified in Subsection 6a, above, for a charge no more
|
specified in Subsection 6a, above, for a charge no more
|
||||||
than the cost of performing this distribution.
|
than the cost of performing this distribution.
|
||||||
|
|
||||||
d) If distribution of the work is made by offering access to copy
|
c) If distribution of the work is made by offering access to copy
|
||||||
from a designated place, offer equivalent access to copy the above
|
from a designated place, offer equivalent access to copy the above
|
||||||
specified materials from the same place.
|
specified materials from the same place.
|
||||||
|
|
||||||
e) Verify that the user has already received a copy of these
|
d) Verify that the user has already received a copy of these
|
||||||
materials or that you have already sent this user a copy.
|
materials or that you have already sent this user a copy.
|
||||||
|
|
||||||
For an executable, the required form of the "work that uses the
|
For an executable, the required form of the "work that uses the
|
||||||
Library" must include any data and utility programs needed for
|
Library" must include any data and utility programs needed for
|
||||||
reproducing the executable from it. However, as a special exception,
|
reproducing the executable from it. However, as a special exception,
|
||||||
the materials to be distributed need not include anything that is
|
the source code distributed need not include anything that is normally
|
||||||
normally distributed (in either source or binary form) with the major
|
distributed (in either source or binary form) with the major
|
||||||
components (compiler, kernel, and so on) of the operating system on
|
components (compiler, kernel, and so on) of the operating system on
|
||||||
which the executable runs, unless that component itself accompanies
|
which the executable runs, unless that component itself accompanies
|
||||||
the executable.
|
the executable.
|
||||||
@ -368,7 +347,7 @@ Library), the recipient automatically receives a license from the
|
|||||||
original licensor to copy, distribute, link with or modify the Library
|
original licensor to copy, distribute, link with or modify the Library
|
||||||
subject to these terms and conditions. You may not impose any further
|
subject to these terms and conditions. You may not impose any further
|
||||||
restrictions on the recipients' exercise of the rights granted herein.
|
restrictions on the recipients' exercise of the rights granted herein.
|
||||||
You are not responsible for enforcing compliance by third parties with
|
You are not responsible for enforcing compliance by third parties to
|
||||||
this License.
|
this License.
|
||||||
|
|
||||||
11. If, as a consequence of a court judgment or allegation of patent
|
11. If, as a consequence of a court judgment or allegation of patent
|
||||||
@ -411,7 +390,7 @@ excluded. In such case, this License incorporates the limitation as if
|
|||||||
written in the body of this License.
|
written in the body of this License.
|
||||||
|
|
||||||
13. The Free Software Foundation may publish revised and/or new
|
13. The Free Software Foundation may publish revised and/or new
|
||||||
versions of the Lesser General Public License from time to time.
|
versions of the Library General Public License from time to time.
|
||||||
Such new versions will be similar in spirit to the present version,
|
Such new versions will be similar in spirit to the present version,
|
||||||
but may differ in detail to address new problems or concerns.
|
but may differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
@ -432,7 +411,7 @@ decision will be guided by the two goals of preserving the free status
|
|||||||
of all derivatives of our free software and of promoting the sharing
|
of all derivatives of our free software and of promoting the sharing
|
||||||
and reuse of software generally.
|
and reuse of software generally.
|
||||||
|
|
||||||
NO WARRANTY
|
NO WARRANTY
|
||||||
|
|
||||||
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||||
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||||
@ -455,9 +434,9 @@ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
|
|||||||
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||||
DAMAGES.
|
DAMAGES.
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
How to Apply These Terms to Your New Libraries
|
Appendix: How to Apply These Terms to Your New Libraries
|
||||||
|
|
||||||
If you develop a new library, and you want it to be of the greatest
|
If you develop a new library, and you want it to be of the greatest
|
||||||
possible use to the public, we recommend making it free software that
|
possible use to the public, we recommend making it free software that
|
||||||
@ -474,18 +453,18 @@ convey the exclusion of warranty; and each file should have at least the
|
|||||||
Copyright (C) <year> <name of author>
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
This library is free software; you can redistribute it and/or
|
This library is free software; you can redistribute it and/or
|
||||||
modify it under the terms of the GNU Lesser General Public
|
modify it under the terms of the GNU Library General Public
|
||||||
License as published by the Free Software Foundation; either
|
License as published by the Free Software Foundation; either
|
||||||
version 2.1 of the License, or (at your option) any later version.
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
This library is distributed in the hope that it will be useful,
|
This library is distributed in the hope that it will be useful,
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
Lesser General Public License for more details.
|
Library General Public License for more details.
|
||||||
|
|
||||||
You should have received a copy of the GNU Lesser General Public
|
You should have received a copy of the GNU Library General Public
|
||||||
License along with this library; if not, write to the Free Software
|
License along with this library; if not, write to the Free
|
||||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
14
MANIFEST.in
14
MANIFEST.in
@ -1,14 +0,0 @@
|
|||||||
include *.txt
|
|
||||||
include *.rst
|
|
||||||
include *.py
|
|
||||||
include MANIFEST.in
|
|
||||||
include LICENSE
|
|
||||||
include run
|
|
||||||
include tox.ini
|
|
||||||
exclude sshuttle/version.py
|
|
||||||
recursive-include docs *.bat
|
|
||||||
recursive-include docs *.py
|
|
||||||
recursive-include docs *.rst
|
|
||||||
recursive-include docs Makefile
|
|
||||||
recursive-include sshuttle *.py
|
|
||||||
recursive-exclude docs/_build *
|
|
189
README.md
Normal file
189
README.md
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
|
||||||
|
WARNING:
|
||||||
|
On MacOS 10.6 (at least up to 10.6.6), your network will
|
||||||
|
stop responding about 10 minutes after the first time you
|
||||||
|
start sshuttle, because of a MacOS kernel bug relating to
|
||||||
|
arp and the net.inet.ip.scopedroute sysctl. To fix it,
|
||||||
|
just switch your wireless off and on. Sshuttle makes the
|
||||||
|
kernel setting it changes permanent, so this won't happen
|
||||||
|
again, even after a reboot.
|
||||||
|
|
||||||
|
|
||||||
|
sshuttle: where transparent proxy meets VPN meets ssh
|
||||||
|
=====================================================
|
||||||
|
|
||||||
|
As far as I know, sshuttle is the only program that solves the following
|
||||||
|
common case:
|
||||||
|
|
||||||
|
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
|
||||||
|
|
||||||
|
- You have access to a remote network via ssh.
|
||||||
|
|
||||||
|
- You don't necessarily have admin access on the remote network.
|
||||||
|
|
||||||
|
- The remote network has no VPN, or only stupid/complex VPN
|
||||||
|
protocols (IPsec, PPTP, etc). Or maybe you <i>are</i> the
|
||||||
|
admin and you just got frustrated with the awful state of
|
||||||
|
VPN tools.
|
||||||
|
|
||||||
|
- You don't want to create an ssh port forward for every
|
||||||
|
single host/port on the remote network.
|
||||||
|
|
||||||
|
- You hate openssh's port forwarding because it's randomly
|
||||||
|
slow and/or stupid.
|
||||||
|
|
||||||
|
- You can't use openssh's PermitTunnel feature because
|
||||||
|
it's disabled by default on openssh servers; plus it does
|
||||||
|
TCP-over-TCP, which has terrible performance (see below).
|
||||||
|
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
-------------
|
||||||
|
|
||||||
|
- sudo, su, or logged in as root on your client machine.
|
||||||
|
(The server doesn't need admin access.)
|
||||||
|
|
||||||
|
- If you use Linux on your client machine:
|
||||||
|
iptables installed on the client, including at
|
||||||
|
least the iptables DNAT, REDIRECT, and ttl modules.
|
||||||
|
These are installed by default on most Linux distributions.
|
||||||
|
(The server doesn't need iptables and doesn't need to be
|
||||||
|
Linux.)
|
||||||
|
|
||||||
|
- If you use MacOS or BSD on your client machine:
|
||||||
|
Your kernel needs to be compiled with IPFIREWALL_FORWARD
|
||||||
|
(MacOS has this by default) and you need to have ipfw
|
||||||
|
available. (The server doesn't need to be MacOS or BSD.)
|
||||||
|
|
||||||
|
|
||||||
|
This is how you use it:
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
- <tt>git clone git://github.com/apenwarr/sshuttle</tt>
|
||||||
|
on your client machine. You'll need root or sudo
|
||||||
|
access, and python needs to be installed.
|
||||||
|
|
||||||
|
- The most basic use of sshuttle looks like:
|
||||||
|
<tt>./sshuttle -r username@sshserver 0.0.0.0/0 -vv</tt>
|
||||||
|
|
||||||
|
- There is a shortcut for 0.0.0.0/0 for those that value
|
||||||
|
their wrists
|
||||||
|
<tt>./sshuttle -r username@sshserver 0/0 -vv</tt>
|
||||||
|
|
||||||
|
- If you would also like your DNS queries to be proxied
|
||||||
|
through the DNS server of the server you are connect to:
|
||||||
|
<tt>./sshuttle --dns -vvr username@sshserver 0/0</tt>
|
||||||
|
|
||||||
|
The above is probably what you want to use to prevent
|
||||||
|
local network attacks such as Firesheep and friends.
|
||||||
|
|
||||||
|
(You may be prompted for one or more passwords; first, the
|
||||||
|
local password to become root using either sudo or su, and
|
||||||
|
then the remote ssh password. Or you might have sudo and ssh set
|
||||||
|
up to not require passwords, in which case you won't be
|
||||||
|
prompted at all.)
|
||||||
|
|
||||||
|
That's it! Now your local machine can access the remote network as if you
|
||||||
|
were right there. And if your "client" machine is a router, everyone on
|
||||||
|
your local network can make connections to your remote network.
|
||||||
|
|
||||||
|
You don't need to install sshuttle on the remote server;
|
||||||
|
the remote server just needs to have python available.
|
||||||
|
sshuttle will automatically upload and run its source code
|
||||||
|
to the remote python interpreter.
|
||||||
|
|
||||||
|
This creates a transparent proxy server on your local machine for all IP
|
||||||
|
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
|
||||||
|
you want; use any number of IP addresses or subnets to change which
|
||||||
|
addresses get proxied. Using 0.0.0.0/0 proxies <i>everything</i>, which is
|
||||||
|
interesting if you don't trust the people on your local network.)
|
||||||
|
|
||||||
|
Any TCP session you initiate to one of the proxied IP addresses will be
|
||||||
|
captured by sshuttle and sent over an ssh session to the remote copy of
|
||||||
|
sshuttle, which will then regenerate the connection on that end, and funnel
|
||||||
|
the data back and forth through ssh.
|
||||||
|
|
||||||
|
Fun, right? A poor man's instant VPN, and you don't even have to have
|
||||||
|
admin access on the server.
|
||||||
|
|
||||||
|
|
||||||
|
Theory of Operation
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
|
||||||
|
of both, and kind of neither.
|
||||||
|
|
||||||
|
It's like a VPN, since it can forward every port on an entire network, not
|
||||||
|
just ports you specify. Conveniently, it lets you use the "real" IP
|
||||||
|
addresses of each host rather than faking port numbers on localhost.
|
||||||
|
|
||||||
|
On the other hand, the way it *works* is more like ssh port forwarding than
|
||||||
|
a VPN. Normally, a VPN forwards your data one packet at a time, and
|
||||||
|
doesn't care about individual connections; ie. it's "stateless" with respect
|
||||||
|
to the traffic. sshuttle is the opposite of stateless; it tracks every
|
||||||
|
single connection.
|
||||||
|
|
||||||
|
You could compare sshuttle to something like the old <a
|
||||||
|
href="http://en.wikipedia.org/wiki/Slirp">Slirp</a> program, which was a
|
||||||
|
userspace TCP/IP implementation that did something similar. But it
|
||||||
|
operated on a packet-by-packet basis on the client side, reassembling the
|
||||||
|
packets on the server side. That worked okay back in the "real live serial
|
||||||
|
port" days, because serial ports had predictable latency and buffering.
|
||||||
|
|
||||||
|
But you can't safely just forward TCP packets over a TCP session (like ssh),
|
||||||
|
because TCP's performance depends fundamentally on packet loss; it
|
||||||
|
<i>must</i> experience packet loss in order to know when to slow down! At
|
||||||
|
the same time, the outer TCP session (ssh, in this case) is a reliable
|
||||||
|
transport, which means that what you forward through the tunnel <i>never</i>
|
||||||
|
experiences packet loss. The ssh session itself experiences packet loss, of
|
||||||
|
course, but TCP fixes it up and ssh (and thus you) never know the
|
||||||
|
difference. But neither does your inner TCP session, and extremely screwy
|
||||||
|
performance ensues.
|
||||||
|
|
||||||
|
sshuttle assembles the TCP stream locally, multiplexes it statefully over
|
||||||
|
an ssh session, and disassembles it back into packets at the other end. So
|
||||||
|
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
|
||||||
|
safe.
|
||||||
|
|
||||||
|
|
||||||
|
Useless Trivia
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Back in 1998 (12 years ago! Yikes!), I released the first version of <a
|
||||||
|
href="http://alumnit.ca/wiki/?TunnelVisionReadMe">Tunnel Vision</a>, a
|
||||||
|
semi-intelligent VPN client for Linux. Unfortunately, I made two big mistakes:
|
||||||
|
I implemented the key exchange myself (oops), and I ended up doing
|
||||||
|
TCP-over-TCP (double oops). The resulting program worked okay - and people
|
||||||
|
used it for years - but the performance was always a bit funny. And nobody
|
||||||
|
ever found any security flaws in my key exchange, either, but that doesn't
|
||||||
|
mean anything. :)
|
||||||
|
|
||||||
|
The same year, dcoombs and I also released Fast Forward, a proxy server
|
||||||
|
supporting transparent proxying. Among other things, we used it for
|
||||||
|
automatically splitting traffic across more than one Internet connection (a
|
||||||
|
tool we called "Double Vision").
|
||||||
|
|
||||||
|
I was still in university at the time. A couple years after that, one of my
|
||||||
|
professors was working with some graduate students on the technology that
|
||||||
|
would eventually become <a href="http://www.slipstream.com/">Slipstream
|
||||||
|
Internet Acceleration</a>. He asked me to do a contract for him to build an
|
||||||
|
initial prototype of a transparent proxy server for mobile networks. The
|
||||||
|
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
|
||||||
|
packets, you can reduce latency and improve performance vs. just forwarding
|
||||||
|
the packets over a plain VPN or mobile network. (It's unlikely that any of
|
||||||
|
my code has persisted in the Slipstream product today, but the concept is
|
||||||
|
still pretty cool. I'm still horrified that people use plain TCP on
|
||||||
|
complex mobile networks with crazily variable latency, for which it was
|
||||||
|
never really intended.)
|
||||||
|
|
||||||
|
That project I did for Slipstream was what first gave me the idea to merge
|
||||||
|
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
|
||||||
|
program that was the best of all worlds. And here we are, at last, 10 years
|
||||||
|
later. You're welcome.
|
||||||
|
|
||||||
|
--
|
||||||
|
Avery Pennarun <apenwarr@gmail.com>
|
||||||
|
|
||||||
|
Mailing list:
|
||||||
|
Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
|
||||||
|
List archives are at: http://groups.google.com/group/sshuttle
|
49
README.rst
49
README.rst
@ -1,49 +0,0 @@
|
|||||||
sshuttle: where transparent proxy meets VPN meets ssh
|
|
||||||
=====================================================
|
|
||||||
|
|
||||||
As far as I know, sshuttle is the only program that solves the following
|
|
||||||
common case:
|
|
||||||
|
|
||||||
- Your client machine (or router) is Linux, FreeBSD, MacOS or Windows.
|
|
||||||
|
|
||||||
- You have access to a remote network via ssh.
|
|
||||||
|
|
||||||
- You don't necessarily have admin access on the remote network.
|
|
||||||
|
|
||||||
- The remote network has no VPN, or only stupid/complex VPN
|
|
||||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
|
||||||
admin and you just got frustrated with the awful state of
|
|
||||||
VPN tools.
|
|
||||||
|
|
||||||
- You don't want to create an ssh port forward for every
|
|
||||||
single host/port on the remote network.
|
|
||||||
|
|
||||||
- You hate openssh's port forwarding because it's randomly
|
|
||||||
slow and/or stupid.
|
|
||||||
|
|
||||||
- You can't use openssh's PermitTunnel feature because
|
|
||||||
it's disabled by default on openssh servers; plus it does
|
|
||||||
TCP-over-TCP, which has `terrible performance`_.
|
|
||||||
|
|
||||||
.. _terrible performance: https://sshuttle.readthedocs.io/en/stable/how-it-works.html
|
|
||||||
|
|
||||||
Obtaining sshuttle
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Please see the documentation_.
|
|
||||||
|
|
||||||
.. _Documentation: https://sshuttle.readthedocs.io/en/stable/installation.html
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
-------------
|
|
||||||
The documentation for the stable version is available at:
|
|
||||||
https://sshuttle.readthedocs.org/
|
|
||||||
|
|
||||||
The documentation for the latest development version is available at:
|
|
||||||
https://sshuttle.readthedocs.org/en/latest/
|
|
||||||
|
|
||||||
|
|
||||||
Running as a service
|
|
||||||
--------------------
|
|
||||||
Sshuttle can also be run as a service and configured using a config management system:
|
|
||||||
https://medium.com/@mike.reider/using-sshuttle-as-a-service-bec2684a65fe
|
|
11
all.do
Normal file
11
all.do
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
exec >&2
|
||||||
|
UI=
|
||||||
|
[ "$(uname)" = "Darwin" ] && UI=ui-macos/all
|
||||||
|
redo-ifchange Documentation/all version/all $UI
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "What now?"
|
||||||
|
[ -z "$UI" ] || echo "- Try the MacOS GUI: open ui-macos/Sshuttle*.app"
|
||||||
|
echo "- Run sshuttle: ./sshuttle --dns -r HOSTNAME 0/0"
|
||||||
|
echo "- Read the README: less README.md"
|
||||||
|
echo "- Read the man page: less Documentation/sshuttle.md"
|
26
assembler.py
Normal file
26
assembler.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
import sys, zlib
|
||||||
|
|
||||||
|
z = zlib.decompressobj()
|
||||||
|
mainmod = sys.modules[__name__]
|
||||||
|
while 1:
|
||||||
|
name = sys.stdin.readline().strip()
|
||||||
|
if name:
|
||||||
|
nbytes = int(sys.stdin.readline())
|
||||||
|
if verbosity >= 2:
|
||||||
|
sys.stderr.write('server: assembling %r (%d bytes)\n'
|
||||||
|
% (name, nbytes))
|
||||||
|
content = z.decompress(sys.stdin.read(nbytes))
|
||||||
|
exec compile(content, name, "exec")
|
||||||
|
|
||||||
|
# FIXME: this crushes everything into a single module namespace,
|
||||||
|
# then makes each of the module names point at this one. Gross.
|
||||||
|
assert(name.endswith('.py'))
|
||||||
|
modname = name[:-3]
|
||||||
|
mainmod.__dict__[modname] = mainmod
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
verbose = verbosity
|
||||||
|
sys.stderr.flush()
|
||||||
|
sys.stdout.flush()
|
||||||
|
main()
|
@ -1,9 +0,0 @@
|
|||||||
exclude_dirs:
|
|
||||||
- tests
|
|
||||||
skips:
|
|
||||||
- B101
|
|
||||||
- B104
|
|
||||||
- B404
|
|
||||||
- B603
|
|
||||||
- B606
|
|
||||||
- B607
|
|
2
clean.do
Normal file
2
clean.do
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
redo ui-macos/clean Documentation/clean version/clean
|
||||||
|
rm -f *~ */*~ .*~ */.*~ *.8 *.tmp */*.tmp *.pyc */*.pyc
|
402
client.py
Normal file
402
client.py
Normal file
@ -0,0 +1,402 @@
|
|||||||
|
import struct, socket, select, errno, re, signal, time
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
import helpers, ssnet, ssh, ssyslog
|
||||||
|
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||||
|
|
||||||
|
def got_signal(signum, frame):
|
||||||
|
log('exiting on signal %d\n' % signum)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
_pidname = None
|
||||||
|
def check_daemon(pidfile):
|
||||||
|
global _pidname
|
||||||
|
_pidname = os.path.abspath(pidfile)
|
||||||
|
try:
|
||||||
|
oldpid = open(_pidname).read(1024)
|
||||||
|
except IOError, e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
return # no pidfile, ok
|
||||||
|
else:
|
||||||
|
raise Fatal("can't read %s: %s" % (_pidname, e))
|
||||||
|
if not oldpid:
|
||||||
|
os.unlink(_pidname)
|
||||||
|
return # invalid pidfile, ok
|
||||||
|
oldpid = int(oldpid.strip() or 0)
|
||||||
|
if oldpid <= 0:
|
||||||
|
os.unlink(_pidname)
|
||||||
|
return # invalid pidfile, ok
|
||||||
|
try:
|
||||||
|
os.kill(oldpid, 0)
|
||||||
|
except OSError, e:
|
||||||
|
if e.errno == errno.ESRCH:
|
||||||
|
os.unlink(_pidname)
|
||||||
|
return # outdated pidfile, ok
|
||||||
|
elif e.errno == errno.EPERM:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
raise Fatal("%s: sshuttle is already running (pid=%d)"
|
||||||
|
% (_pidname, oldpid))
|
||||||
|
|
||||||
|
|
||||||
|
def daemonize():
|
||||||
|
if os.fork():
|
||||||
|
os._exit(0)
|
||||||
|
os.setsid()
|
||||||
|
if os.fork():
|
||||||
|
os._exit(0)
|
||||||
|
|
||||||
|
outfd = os.open(_pidname, os.O_WRONLY|os.O_CREAT|os.O_EXCL, 0666)
|
||||||
|
try:
|
||||||
|
os.write(outfd, '%d\n' % os.getpid())
|
||||||
|
finally:
|
||||||
|
os.close(outfd)
|
||||||
|
os.chdir("/")
|
||||||
|
|
||||||
|
# Normal exit when killed, or try/finally won't work and the pidfile won't
|
||||||
|
# be deleted.
|
||||||
|
signal.signal(signal.SIGTERM, got_signal)
|
||||||
|
|
||||||
|
si = open('/dev/null', 'r+')
|
||||||
|
os.dup2(si.fileno(), 0)
|
||||||
|
os.dup2(si.fileno(), 1)
|
||||||
|
si.close()
|
||||||
|
|
||||||
|
ssyslog.stderr_to_syslog()
|
||||||
|
|
||||||
|
|
||||||
|
def daemon_cleanup():
|
||||||
|
try:
|
||||||
|
os.unlink(_pidname)
|
||||||
|
except OSError, e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def original_dst(sock):
|
||||||
|
try:
|
||||||
|
SO_ORIGINAL_DST = 80
|
||||||
|
SOCKADDR_MIN = 16
|
||||||
|
sockaddr_in = sock.getsockopt(socket.SOL_IP,
|
||||||
|
SO_ORIGINAL_DST, SOCKADDR_MIN)
|
||||||
|
(proto, port, a,b,c,d) = struct.unpack('!HHBBBB', sockaddr_in[:8])
|
||||||
|
assert(socket.htons(proto) == socket.AF_INET)
|
||||||
|
ip = '%d.%d.%d.%d' % (a,b,c,d)
|
||||||
|
return (ip,port)
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] == errno.ENOPROTOOPT:
|
||||||
|
return sock.getsockname()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class FirewallClient:
|
||||||
|
def __init__(self, port, subnets_include, subnets_exclude, dnsport):
|
||||||
|
self.port = port
|
||||||
|
self.auto_nets = []
|
||||||
|
self.subnets_include = subnets_include
|
||||||
|
self.subnets_exclude = subnets_exclude
|
||||||
|
self.dnsport = dnsport
|
||||||
|
argvbase = ([sys.argv[1], sys.argv[0], sys.argv[1]] +
|
||||||
|
['-v'] * (helpers.verbose or 0) +
|
||||||
|
['--firewall', str(port), str(dnsport)])
|
||||||
|
if ssyslog._p:
|
||||||
|
argvbase += ['--syslog']
|
||||||
|
argv_tries = [
|
||||||
|
['sudo', '-p', '[local sudo] Password: '] + argvbase,
|
||||||
|
['su', '-c', ' '.join(argvbase)],
|
||||||
|
argvbase
|
||||||
|
]
|
||||||
|
|
||||||
|
# we can't use stdin/stdout=subprocess.PIPE here, as we normally would,
|
||||||
|
# because stupid Linux 'su' requires that stdin be attached to a tty.
|
||||||
|
# Instead, attach a *bidirectional* socket to its stdout, and use
|
||||||
|
# that for talking in both directions.
|
||||||
|
(s1,s2) = socket.socketpair()
|
||||||
|
def setup():
|
||||||
|
# run in the child process
|
||||||
|
s2.close()
|
||||||
|
e = None
|
||||||
|
if os.getuid() == 0:
|
||||||
|
argv_tries = argv_tries[-1:] # last entry only
|
||||||
|
for argv in argv_tries:
|
||||||
|
try:
|
||||||
|
if argv[0] == 'su':
|
||||||
|
sys.stderr.write('[local su] ')
|
||||||
|
self.p = ssubprocess.Popen(argv, stdout=s1, preexec_fn=setup)
|
||||||
|
e = None
|
||||||
|
break
|
||||||
|
except OSError, e:
|
||||||
|
pass
|
||||||
|
self.argv = argv
|
||||||
|
s1.close()
|
||||||
|
self.pfile = s2.makefile('wb+')
|
||||||
|
if e:
|
||||||
|
log('Spawning firewall manager: %r\n' % self.argv)
|
||||||
|
raise Fatal(e)
|
||||||
|
line = self.pfile.readline()
|
||||||
|
self.check()
|
||||||
|
if line != 'READY\n':
|
||||||
|
raise Fatal('%r expected READY, got %r' % (self.argv, line))
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
rv = self.p.poll()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (self.argv, rv))
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.pfile.write('ROUTES\n')
|
||||||
|
for (ip,width) in self.subnets_include+self.auto_nets:
|
||||||
|
self.pfile.write('%d,0,%s\n' % (width, ip))
|
||||||
|
for (ip,width) in self.subnets_exclude:
|
||||||
|
self.pfile.write('%d,1,%s\n' % (width, ip))
|
||||||
|
self.pfile.write('GO\n')
|
||||||
|
self.pfile.flush()
|
||||||
|
line = self.pfile.readline()
|
||||||
|
self.check()
|
||||||
|
if line != 'STARTED\n':
|
||||||
|
raise Fatal('%r expected STARTED, got %r' % (self.argv, line))
|
||||||
|
|
||||||
|
def sethostip(self, hostname, ip):
|
||||||
|
assert(not re.search(r'[^-\w]', hostname))
|
||||||
|
assert(not re.search(r'[^0-9.]', ip))
|
||||||
|
self.pfile.write('HOST %s,%s\n' % (hostname, ip))
|
||||||
|
self.pfile.flush()
|
||||||
|
|
||||||
|
def done(self):
|
||||||
|
self.pfile.close()
|
||||||
|
rv = self.p.wait()
|
||||||
|
if rv == EXITCODE_NEEDS_REBOOT:
|
||||||
|
raise FatalNeedsReboot()
|
||||||
|
elif rv:
|
||||||
|
raise Fatal('cleanup: %r returned %d' % (self.argv, rv))
|
||||||
|
|
||||||
|
|
||||||
|
def onaccept(listener, mux, handlers):
|
||||||
|
global _extra_fd
|
||||||
|
try:
|
||||||
|
sock,srcip = listener.accept()
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] in [errno.EMFILE, errno.ENFILE]:
|
||||||
|
debug1('Rejected incoming connection: too many open files!\n')
|
||||||
|
# free up an fd so we can eat the connection
|
||||||
|
os.close(_extra_fd)
|
||||||
|
try:
|
||||||
|
sock,srcip = listener.accept()
|
||||||
|
sock.close()
|
||||||
|
finally:
|
||||||
|
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
dstip = original_dst(sock)
|
||||||
|
debug1('Accept: %s:%r -> %s:%r.\n' % (srcip[0],srcip[1],
|
||||||
|
dstip[0],dstip[1]))
|
||||||
|
if dstip[1] == listener.getsockname()[1] and islocal(dstip[0]):
|
||||||
|
debug1("-- ignored: that's my address!\n")
|
||||||
|
sock.close()
|
||||||
|
return
|
||||||
|
chan = mux.next_channel()
|
||||||
|
if not chan:
|
||||||
|
log('warning: too many open channels. Discarded connection.\n')
|
||||||
|
sock.close()
|
||||||
|
return
|
||||||
|
mux.send(chan, ssnet.CMD_CONNECT, '%s,%s' % dstip)
|
||||||
|
outwrap = MuxWrapper(mux, chan)
|
||||||
|
handlers.append(Proxy(SockWrapper(sock, sock), outwrap))
|
||||||
|
|
||||||
|
|
||||||
|
dnsreqs = {}
|
||||||
|
def dns_done(chan, data):
|
||||||
|
peer,sock,timeout = dnsreqs.get(chan) or (None,None,None)
|
||||||
|
debug3('dns_done: channel=%r peer=%r\n' % (chan, peer))
|
||||||
|
if peer:
|
||||||
|
del dnsreqs[chan]
|
||||||
|
debug3('doing sendto %r\n' % (peer,))
|
||||||
|
sock.sendto(data, peer)
|
||||||
|
|
||||||
|
|
||||||
|
def ondns(listener, mux, handlers):
|
||||||
|
pkt,peer = listener.recvfrom(4096)
|
||||||
|
now = time.time()
|
||||||
|
if pkt:
|
||||||
|
debug1('DNS request from %r: %d bytes\n' % (peer, len(pkt)))
|
||||||
|
chan = mux.next_channel()
|
||||||
|
dnsreqs[chan] = peer,listener,now+30
|
||||||
|
mux.send(chan, ssnet.CMD_DNS_REQ, pkt)
|
||||||
|
mux.channels[chan] = lambda cmd,data: dns_done(chan,data)
|
||||||
|
for chan,(peer,sock,timeout) in dnsreqs.items():
|
||||||
|
if timeout < now:
|
||||||
|
del dnsreqs[chan]
|
||||||
|
debug3('Remaining DNS requests: %d\n' % len(dnsreqs))
|
||||||
|
|
||||||
|
|
||||||
|
def _main(listener, fw, ssh_cmd, remotename, python, latency_control,
|
||||||
|
dnslistener, seed_hosts, auto_nets,
|
||||||
|
syslog, daemon):
|
||||||
|
handlers = []
|
||||||
|
if helpers.verbose >= 1:
|
||||||
|
helpers.logprefix = 'c : '
|
||||||
|
else:
|
||||||
|
helpers.logprefix = 'client: '
|
||||||
|
debug1('connecting to server...\n')
|
||||||
|
|
||||||
|
try:
|
||||||
|
(serverproc, serversock) = ssh.connect(ssh_cmd, remotename, python,
|
||||||
|
stderr=ssyslog._p and ssyslog._p.stdin,
|
||||||
|
options=dict(latency_control=latency_control))
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] == errno.EPIPE:
|
||||||
|
raise Fatal("failed to establish ssh session (1)")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
mux = Mux(serversock, serversock)
|
||||||
|
handlers.append(mux)
|
||||||
|
|
||||||
|
expected = 'SSHUTTLE0001'
|
||||||
|
|
||||||
|
try:
|
||||||
|
v = 'x'
|
||||||
|
while v and v != '\0':
|
||||||
|
v = serversock.recv(1)
|
||||||
|
v = 'x'
|
||||||
|
while v and v != '\0':
|
||||||
|
v = serversock.recv(1)
|
||||||
|
initstring = serversock.recv(len(expected))
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] == errno.ECONNRESET:
|
||||||
|
raise Fatal("failed to establish ssh session (2)")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
rv = serverproc.poll()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('server died with error code %d' % rv)
|
||||||
|
|
||||||
|
if initstring != expected:
|
||||||
|
raise Fatal('expected server init string %r; got %r'
|
||||||
|
% (expected, initstring))
|
||||||
|
debug1('connected.\n')
|
||||||
|
print 'Connected.'
|
||||||
|
sys.stdout.flush()
|
||||||
|
if daemon:
|
||||||
|
daemonize()
|
||||||
|
log('daemonizing (%s).\n' % _pidname)
|
||||||
|
elif syslog:
|
||||||
|
debug1('switching to syslog.\n')
|
||||||
|
ssyslog.stderr_to_syslog()
|
||||||
|
|
||||||
|
def onroutes(routestr):
|
||||||
|
if auto_nets:
|
||||||
|
for line in routestr.strip().split('\n'):
|
||||||
|
(ip,width) = line.split(',', 1)
|
||||||
|
fw.auto_nets.append((ip,int(width)))
|
||||||
|
|
||||||
|
# we definitely want to do this *after* starting ssh, or we might end
|
||||||
|
# up intercepting the ssh connection!
|
||||||
|
#
|
||||||
|
# Moreover, now that we have the --auto-nets option, we have to wait
|
||||||
|
# for the server to send us that message anyway. Even if we haven't
|
||||||
|
# set --auto-nets, we might as well wait for the message first, then
|
||||||
|
# ignore its contents.
|
||||||
|
mux.got_routes = None
|
||||||
|
fw.start()
|
||||||
|
mux.got_routes = onroutes
|
||||||
|
|
||||||
|
def onhostlist(hostlist):
|
||||||
|
debug2('got host list: %r\n' % hostlist)
|
||||||
|
for line in hostlist.strip().split():
|
||||||
|
if line:
|
||||||
|
name,ip = line.split(',', 1)
|
||||||
|
fw.sethostip(name, ip)
|
||||||
|
mux.got_host_list = onhostlist
|
||||||
|
|
||||||
|
handlers.append(Handler([listener], lambda: onaccept(listener, mux, handlers)))
|
||||||
|
|
||||||
|
if dnslistener:
|
||||||
|
handlers.append(Handler([dnslistener], lambda: ondns(dnslistener, mux, handlers)))
|
||||||
|
|
||||||
|
if seed_hosts != None:
|
||||||
|
debug1('seed_hosts: %r\n' % seed_hosts)
|
||||||
|
mux.send(0, ssnet.CMD_HOST_REQ, '\n'.join(seed_hosts))
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
rv = serverproc.poll()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('server died with error code %d' % rv)
|
||||||
|
|
||||||
|
ssnet.runonce(handlers, mux)
|
||||||
|
if latency_control:
|
||||||
|
mux.check_fullness()
|
||||||
|
mux.callback()
|
||||||
|
|
||||||
|
|
||||||
|
def main(listenip, ssh_cmd, remotename, python, latency_control, dns,
|
||||||
|
seed_hosts, auto_nets,
|
||||||
|
subnets_include, subnets_exclude, syslog, daemon, pidfile):
|
||||||
|
if syslog:
|
||||||
|
ssyslog.start_syslog()
|
||||||
|
if daemon:
|
||||||
|
try:
|
||||||
|
check_daemon(pidfile)
|
||||||
|
except Fatal, e:
|
||||||
|
log("%s\n" % e)
|
||||||
|
return 5
|
||||||
|
debug1('Starting sshuttle proxy.\n')
|
||||||
|
|
||||||
|
if listenip[1]:
|
||||||
|
ports = [listenip[1]]
|
||||||
|
else:
|
||||||
|
ports = xrange(12300,9000,-1)
|
||||||
|
last_e = None
|
||||||
|
bound = False
|
||||||
|
debug2('Binding:')
|
||||||
|
for port in ports:
|
||||||
|
debug2(' %d' % port)
|
||||||
|
listener = socket.socket()
|
||||||
|
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
dnslistener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
dnslistener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
try:
|
||||||
|
listener.bind((listenip[0], port))
|
||||||
|
dnslistener.bind((listenip[0], port))
|
||||||
|
bound = True
|
||||||
|
break
|
||||||
|
except socket.error, e:
|
||||||
|
last_e = e
|
||||||
|
debug2('\n')
|
||||||
|
if not bound:
|
||||||
|
assert(last_e)
|
||||||
|
raise last_e
|
||||||
|
listener.listen(10)
|
||||||
|
listenip = listener.getsockname()
|
||||||
|
debug1('Listening on %r.\n' % (listenip,))
|
||||||
|
|
||||||
|
if dns:
|
||||||
|
dnsip = dnslistener.getsockname()
|
||||||
|
debug1('DNS listening on %r.\n' % (dnsip,))
|
||||||
|
dnsport = dnsip[1]
|
||||||
|
else:
|
||||||
|
dnsport = 0
|
||||||
|
dnslistener = None
|
||||||
|
|
||||||
|
fw = FirewallClient(listenip[1], subnets_include, subnets_exclude, dnsport)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return _main(listener, fw, ssh_cmd, remotename,
|
||||||
|
python, latency_control, dnslistener,
|
||||||
|
seed_hosts, auto_nets, syslog, daemon)
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
if daemon:
|
||||||
|
# it's not our child anymore; can't waitpid
|
||||||
|
fw.p.returncode = 0
|
||||||
|
fw.done()
|
||||||
|
finally:
|
||||||
|
if daemon:
|
||||||
|
daemon_cleanup()
|
0
compat/__init__.py
Normal file
0
compat/__init__.py
Normal file
1305
compat/ssubprocess.py
Normal file
1305
compat/ssubprocess.py
Normal file
File diff suppressed because it is too large
Load Diff
7
default.8.do
Normal file
7
default.8.do
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
exec >&2
|
||||||
|
if pandoc </dev/null 2>/dev/null; then
|
||||||
|
pandoc -s -r markdown -w man -o $3 $2.md
|
||||||
|
else
|
||||||
|
echo "Warning: pandoc not installed; can't generate manpages."
|
||||||
|
redo-always
|
||||||
|
fi
|
175
do
Executable file
175
do
Executable file
@ -0,0 +1,175 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# A minimal alternative to djb redo that doesn't support incremental builds.
|
||||||
|
# For the full version, visit http://github.com/apenwarr/redo
|
||||||
|
#
|
||||||
|
# The author disclaims copyright to this source file and hereby places it in
|
||||||
|
# the public domain. (2010 12 14)
|
||||||
|
#
|
||||||
|
|
||||||
|
# By default, no output coloring.
|
||||||
|
green=""
|
||||||
|
bold=""
|
||||||
|
plain=""
|
||||||
|
|
||||||
|
if [ -n "$TERM" -a "$TERM" != "dumb" ] && tty <&2 >/dev/null 2>&1; then
|
||||||
|
green="$(printf '\033[32m')"
|
||||||
|
bold="$(printf '\033[1m')"
|
||||||
|
plain="$(printf '\033[m')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
_dirsplit()
|
||||||
|
{
|
||||||
|
base=${1##*/}
|
||||||
|
dir=${1%$base}
|
||||||
|
}
|
||||||
|
|
||||||
|
dirname()
|
||||||
|
(
|
||||||
|
_dirsplit "$1"
|
||||||
|
dir=${dir%/}
|
||||||
|
echo "${dir:-.}"
|
||||||
|
)
|
||||||
|
|
||||||
|
_dirsplit "$0"
|
||||||
|
export REDO=$(cd "${dir:-.}" && echo "$PWD/$base")
|
||||||
|
|
||||||
|
DO_TOP=
|
||||||
|
if [ -z "$DO_BUILT" ]; then
|
||||||
|
DO_TOP=1
|
||||||
|
[ -n "$*" ] || set all # only toplevel redo has a default target
|
||||||
|
export DO_BUILT=$PWD/.do_built
|
||||||
|
: >>"$DO_BUILT"
|
||||||
|
echo "Removing previously built files..." >&2
|
||||||
|
sort -u "$DO_BUILT" | tee "$DO_BUILT.new" |
|
||||||
|
while read f; do printf "%s\0%s.did\0" "$f" "$f"; done |
|
||||||
|
xargs -0 rm -f 2>/dev/null
|
||||||
|
mv "$DO_BUILT.new" "$DO_BUILT"
|
||||||
|
DO_PATH=$DO_BUILT.dir
|
||||||
|
export PATH=$DO_PATH:$PATH
|
||||||
|
rm -rf "$DO_PATH"
|
||||||
|
mkdir "$DO_PATH"
|
||||||
|
for d in redo redo-ifchange; do
|
||||||
|
ln -s "$REDO" "$DO_PATH/$d";
|
||||||
|
done
|
||||||
|
[ -e /bin/true ] && TRUE=/bin/true || TRUE=/usr/bin/true
|
||||||
|
for d in redo-ifcreate redo-stamp redo-always; do
|
||||||
|
ln -s $TRUE "$DO_PATH/$d";
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
_find_dofile_pwd()
|
||||||
|
{
|
||||||
|
dofile=default.$1.do
|
||||||
|
while :; do
|
||||||
|
dofile=default.${dofile#default.*.}
|
||||||
|
[ -e "$dofile" -o "$dofile" = default.do ] && break
|
||||||
|
done
|
||||||
|
ext=${dofile#default}
|
||||||
|
ext=${ext%.do}
|
||||||
|
base=${1%$ext}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_find_dofile()
|
||||||
|
{
|
||||||
|
local prefix=
|
||||||
|
while :; do
|
||||||
|
_find_dofile_pwd "$1"
|
||||||
|
[ -e "$dofile" ] && break
|
||||||
|
[ "$PWD" = "/" ] && break
|
||||||
|
target=${PWD##*/}/$target
|
||||||
|
tmp=${PWD##*/}/$tmp
|
||||||
|
prefix=${PWD##*/}/$prefix
|
||||||
|
cd ..
|
||||||
|
done
|
||||||
|
base=$prefix$base
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_run_dofile()
|
||||||
|
{
|
||||||
|
export DO_DEPTH="$DO_DEPTH "
|
||||||
|
export REDO_TARGET=$PWD/$target
|
||||||
|
local line1
|
||||||
|
set -e
|
||||||
|
read line1 <"$PWD/$dofile"
|
||||||
|
cmd=${line1#"#!/"}
|
||||||
|
if [ "$cmd" != "$line1" ]; then
|
||||||
|
/$cmd "$PWD/$dofile" "$@" >"$tmp.tmp2"
|
||||||
|
else
|
||||||
|
:; . "$PWD/$dofile" >"$tmp.tmp2"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_do()
|
||||||
|
{
|
||||||
|
local dir=$1 target=$2 tmp=$3
|
||||||
|
if [ ! -e "$target" ] || [ -d "$target" -a ! -e "$target.did" ]; then
|
||||||
|
printf '%sdo %s%s%s%s\n' \
|
||||||
|
"$green" "$DO_DEPTH" "$bold" "$dir$target" "$plain" >&2
|
||||||
|
echo "$PWD/$target" >>"$DO_BUILT"
|
||||||
|
dofile=$target.do
|
||||||
|
base=$target
|
||||||
|
ext=
|
||||||
|
[ -e "$target.do" ] || _find_dofile "$target"
|
||||||
|
if [ ! -e "$dofile" ]; then
|
||||||
|
echo "do: $target: no .do file" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
[ ! -e "$DO_BUILT" ] || [ ! -d "$(dirname "$target")" ] ||
|
||||||
|
: >>"$target.did"
|
||||||
|
( _run_dofile "$target" "$base" "$tmp.tmp" )
|
||||||
|
rv=$?
|
||||||
|
if [ $rv != 0 ]; then
|
||||||
|
printf "do: %s%s\n" "$DO_DEPTH" \
|
||||||
|
"$dir$target: got exit code $rv" >&2
|
||||||
|
rm -f "$tmp.tmp" "$tmp.tmp2"
|
||||||
|
return $rv
|
||||||
|
fi
|
||||||
|
mv "$tmp.tmp" "$target" 2>/dev/null ||
|
||||||
|
! test -s "$tmp.tmp2" ||
|
||||||
|
mv "$tmp.tmp2" "$target" 2>/dev/null
|
||||||
|
rm -f "$tmp.tmp2"
|
||||||
|
else
|
||||||
|
echo "do $DO_DEPTH$target exists." >&2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Make corrections for directories that don't actually exist yet.
|
||||||
|
_dir_shovel()
|
||||||
|
{
|
||||||
|
local dir base
|
||||||
|
xdir=$1 xbase=$2 xbasetmp=$2
|
||||||
|
while [ ! -d "$xdir" -a -n "$xdir" ]; do
|
||||||
|
_dirsplit "${xdir%/}"
|
||||||
|
xbasetmp=${base}__$xbase
|
||||||
|
xdir=$dir xbase=$base/$xbase
|
||||||
|
echo "xbasetmp='$xbasetmp'" >&2
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
redo()
|
||||||
|
{
|
||||||
|
for i in "$@"; do
|
||||||
|
_dirsplit "$i"
|
||||||
|
_dir_shovel "$dir" "$base"
|
||||||
|
dir=$xdir base=$xbase basetmp=$xbasetmp
|
||||||
|
( cd "$dir" && _do "$dir" "$base" "$basetmp" ) || return 1
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
set -e
|
||||||
|
redo "$@"
|
||||||
|
|
||||||
|
if [ -n "$DO_TOP" ]; then
|
||||||
|
echo "Removing stamp files..." >&2
|
||||||
|
[ ! -e "$DO_BUILT" ] ||
|
||||||
|
while read f; do printf "%s.did\0" "$f"; done <"$DO_BUILT" |
|
||||||
|
xargs -0 rm -f 2>/dev/null
|
||||||
|
fi
|
177
docs/Makefile
177
docs/Makefile
@ -1,177 +0,0 @@
|
|||||||
# Makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line.
|
|
||||||
SPHINXOPTS =
|
|
||||||
SPHINXBUILD = sphinx-build
|
|
||||||
PAPER =
|
|
||||||
BUILDDIR = _build
|
|
||||||
|
|
||||||
# User-friendly check for sphinx-build
|
|
||||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
|
||||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Internal variables.
|
|
||||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
|
||||||
PAPEROPT_letter = -D latex_paper_size=letter
|
|
||||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
|
||||||
# the i18n builder cannot share the environment and doctrees with the others
|
|
||||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
|
||||||
|
|
||||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
|
||||||
|
|
||||||
help:
|
|
||||||
@echo "Please use \`make <target>' where <target> is one of"
|
|
||||||
@echo " html to make standalone HTML files"
|
|
||||||
@echo " dirhtml to make HTML files named index.html in directories"
|
|
||||||
@echo " singlehtml to make a single large HTML file"
|
|
||||||
@echo " pickle to make pickle files"
|
|
||||||
@echo " json to make JSON files"
|
|
||||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
|
||||||
@echo " qthelp to make HTML files and a qthelp project"
|
|
||||||
@echo " devhelp to make HTML files and a Devhelp project"
|
|
||||||
@echo " epub to make an epub"
|
|
||||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
|
||||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
|
||||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
|
||||||
@echo " text to make text files"
|
|
||||||
@echo " man to make manual pages"
|
|
||||||
@echo " texinfo to make Texinfo files"
|
|
||||||
@echo " info to make Texinfo files and run them through makeinfo"
|
|
||||||
@echo " gettext to make PO message catalogs"
|
|
||||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
|
||||||
@echo " xml to make Docutils-native XML files"
|
|
||||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
|
||||||
@echo " linkcheck to check all external links for integrity"
|
|
||||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf $(BUILDDIR)/*
|
|
||||||
|
|
||||||
html:
|
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
|
||||||
|
|
||||||
dirhtml:
|
|
||||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
|
||||||
|
|
||||||
singlehtml:
|
|
||||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
|
||||||
|
|
||||||
pickle:
|
|
||||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can process the pickle files."
|
|
||||||
|
|
||||||
json:
|
|
||||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can process the JSON files."
|
|
||||||
|
|
||||||
htmlhelp:
|
|
||||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
|
||||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
|
||||||
|
|
||||||
qthelp:
|
|
||||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
|
||||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
|
||||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sshuttle.qhcp"
|
|
||||||
@echo "To view the help file:"
|
|
||||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sshuttle.qhc"
|
|
||||||
|
|
||||||
devhelp:
|
|
||||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
|
||||||
@echo
|
|
||||||
@echo "Build finished."
|
|
||||||
@echo "To view the help file:"
|
|
||||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/sshuttle"
|
|
||||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sshuttle"
|
|
||||||
@echo "# devhelp"
|
|
||||||
|
|
||||||
epub:
|
|
||||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
|
||||||
|
|
||||||
latex:
|
|
||||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
|
||||||
@echo
|
|
||||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
|
||||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
|
||||||
"(use \`make latexpdf' here to do that automatically)."
|
|
||||||
|
|
||||||
latexpdf:
|
|
||||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
|
||||||
@echo "Running LaTeX files through pdflatex..."
|
|
||||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
|
||||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
|
||||||
|
|
||||||
latexpdfja:
|
|
||||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
|
||||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
|
||||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
|
||||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
|
||||||
|
|
||||||
text:
|
|
||||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
|
||||||
|
|
||||||
man:
|
|
||||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
|
||||||
|
|
||||||
texinfo:
|
|
||||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
|
||||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
|
||||||
"(use \`make info' here to do that automatically)."
|
|
||||||
|
|
||||||
info:
|
|
||||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
|
||||||
@echo "Running Texinfo files through makeinfo..."
|
|
||||||
make -C $(BUILDDIR)/texinfo info
|
|
||||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
|
||||||
|
|
||||||
gettext:
|
|
||||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
|
||||||
|
|
||||||
changes:
|
|
||||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
|
||||||
@echo
|
|
||||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
|
||||||
|
|
||||||
linkcheck:
|
|
||||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
|
||||||
@echo
|
|
||||||
@echo "Link check complete; look for any errors in the above output " \
|
|
||||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
|
||||||
|
|
||||||
doctest:
|
|
||||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
|
||||||
@echo "Testing of doctests in the sources finished, look at the " \
|
|
||||||
"results in $(BUILDDIR)/doctest/output.txt."
|
|
||||||
|
|
||||||
xml:
|
|
||||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
|
||||||
|
|
||||||
pseudoxml:
|
|
||||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
|
||||||
@echo
|
|
||||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
|
@ -1 +0,0 @@
|
|||||||
.. include:: ../CHANGES.rst
|
|
@ -1,11 +0,0 @@
|
|||||||
Google ChromeOS
|
|
||||||
===============
|
|
||||||
|
|
||||||
Currently there is no built in support for running sshuttle directly on
|
|
||||||
Google ChromeOS/Chromebooks.
|
|
||||||
|
|
||||||
What we can really do is to create a Linux VM with Crostini. In the default
|
|
||||||
stretch/Debian 9 VM, you can then install sshuttle as on any Linux box and
|
|
||||||
it just works, as do xterms and ssvncviewer etc.
|
|
||||||
|
|
||||||
https://www.reddit.com/r/Crostini/wiki/getstarted/crostini-setup-guide
|
|
261
docs/conf.py
261
docs/conf.py
@ -1,261 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# sshuttle documentation build configuration file, created by
|
|
||||||
# sphinx-quickstart on Sun Jan 17 12:13:47 2016.
|
|
||||||
#
|
|
||||||
# This file is execfile()d with the current directory set to its
|
|
||||||
# containing dir.
|
|
||||||
#
|
|
||||||
# Note that not all possible configuration values are present in this
|
|
||||||
# autogenerated file.
|
|
||||||
#
|
|
||||||
# All configuration values have a default; values that are commented out
|
|
||||||
# serve to show the default.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
sys.path.insert(0, os.path.abspath('..'))
|
|
||||||
import sshuttle # NOQA
|
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
|
||||||
|
|
||||||
# -- General configuration ------------------------------------------------
|
|
||||||
|
|
||||||
# If your documentation needs a minimal Sphinx version, state it here.
|
|
||||||
# needs_sphinx = '1.0'
|
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
|
||||||
# ones.
|
|
||||||
extensions = [
|
|
||||||
'sphinx.ext.todo',
|
|
||||||
]
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
|
||||||
templates_path = ['_templates']
|
|
||||||
|
|
||||||
# The suffix of source filenames.
|
|
||||||
source_suffix = '.rst'
|
|
||||||
|
|
||||||
# The encoding of source files.
|
|
||||||
# source_encoding = 'utf-8-sig'
|
|
||||||
|
|
||||||
# The master toctree document.
|
|
||||||
master_doc = 'index'
|
|
||||||
|
|
||||||
# General information about the project.
|
|
||||||
project = 'sshuttle'
|
|
||||||
copyright = '2016, Brian May'
|
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
|
||||||
# |version| and |release|, also used in various other places throughout the
|
|
||||||
# built documents.
|
|
||||||
#
|
|
||||||
# The full version, including alpha/beta/rc tags.
|
|
||||||
release = sshuttle.__version__
|
|
||||||
# The short X.Y version.
|
|
||||||
version = '.'.join(release.split('.')[:2])
|
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
|
||||||
# for a list of supported languages.
|
|
||||||
# language = None
|
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
|
||||||
# non-false value, then it is used:
|
|
||||||
# today = ''
|
|
||||||
# Else, today_fmt is used as the format for a strftime call.
|
|
||||||
# today_fmt = '%B %d, %Y'
|
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
|
||||||
# directories to ignore when looking for source files.
|
|
||||||
exclude_patterns = ['_build']
|
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all
|
|
||||||
# documents.
|
|
||||||
# default_role = None
|
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
|
||||||
# add_function_parentheses = True
|
|
||||||
|
|
||||||
# If true, the current module name will be prepended to all description
|
|
||||||
# unit titles (such as .. function::).
|
|
||||||
# add_module_names = True
|
|
||||||
|
|
||||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
|
||||||
# output. They are ignored by default.
|
|
||||||
# show_authors = False
|
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
|
||||||
pygments_style = 'sphinx'
|
|
||||||
|
|
||||||
# A list of ignored prefixes for module index sorting.
|
|
||||||
# modindex_common_prefix = []
|
|
||||||
|
|
||||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
|
||||||
# keep_warnings = False
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for HTML output ----------------------------------------------
|
|
||||||
|
|
||||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
|
||||||
# a list of builtin themes.
|
|
||||||
html_theme = 'furo'
|
|
||||||
|
|
||||||
# Theme options are theme-specific and customize the look and feel of a theme
|
|
||||||
# further. For a list of options available for each theme, see the
|
|
||||||
# documentation.
|
|
||||||
# html_theme_options = {}
|
|
||||||
|
|
||||||
# Add any paths that contain custom themes here, relative to this directory.
|
|
||||||
# html_theme_path = []
|
|
||||||
|
|
||||||
# The name for this set of Sphinx documents. If None, it defaults to
|
|
||||||
# "<project> v<release> documentation".
|
|
||||||
# html_title = None
|
|
||||||
|
|
||||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
|
||||||
# html_short_title = None
|
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top
|
|
||||||
# of the sidebar.
|
|
||||||
# html_logo = None
|
|
||||||
|
|
||||||
# The name of an image file (within the static path) to use as favicon of the
|
|
||||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
|
||||||
# pixels large.
|
|
||||||
# html_favicon = None
|
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
|
||||||
html_static_path = ['_static']
|
|
||||||
|
|
||||||
# Add any extra paths that contain custom files (such as robots.txt or
|
|
||||||
# .htaccess) here, relative to this directory. These files are copied
|
|
||||||
# directly to the root of the documentation.
|
|
||||||
# html_extra_path = []
|
|
||||||
|
|
||||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
|
||||||
# using the given strftime format.
|
|
||||||
# html_last_updated_fmt = '%b %d, %Y'
|
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
|
||||||
# typographically correct entities.
|
|
||||||
# html_use_smartypants = True
|
|
||||||
|
|
||||||
# Custom sidebar templates, maps document names to template names.
|
|
||||||
# html_sidebars = {}
|
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
|
||||||
# template names.
|
|
||||||
# html_additional_pages = {}
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
# html_domain_indices = True
|
|
||||||
|
|
||||||
# If false, no index is generated.
|
|
||||||
# html_use_index = True
|
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
|
||||||
# html_split_index = False
|
|
||||||
|
|
||||||
# If true, links to the reST sources are added to the pages.
|
|
||||||
# html_show_sourcelink = True
|
|
||||||
|
|
||||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
|
||||||
# html_show_sphinx = True
|
|
||||||
|
|
||||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
|
||||||
# html_show_copyright = True
|
|
||||||
|
|
||||||
# If true, an OpenSearch description file will be output, and all pages will
|
|
||||||
# contain a <link> tag referring to it. The value of this option must be the
|
|
||||||
# base URL from which the finished HTML is served.
|
|
||||||
# html_use_opensearch = ''
|
|
||||||
|
|
||||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
|
||||||
# html_file_suffix = None
|
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
|
||||||
htmlhelp_basename = 'sshuttledoc'
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for LaTeX output ---------------------------------------------
|
|
||||||
|
|
||||||
latex_elements = {
|
|
||||||
# The paper size ('letterpaper' or 'a4paper').
|
|
||||||
# 'papersize': 'letterpaper',
|
|
||||||
|
|
||||||
# The font size ('10pt', '11pt' or '12pt').
|
|
||||||
# 'pointsize': '10pt',
|
|
||||||
|
|
||||||
# Additional stuff for the LaTeX preamble.
|
|
||||||
# 'preamble': '',
|
|
||||||
}
|
|
||||||
|
|
||||||
# Grouping the document tree into LaTeX files. List of tuples
|
|
||||||
# (source start file, target name, title,
|
|
||||||
# author, documentclass [howto, manual, or own class]).
|
|
||||||
latex_documents = [
|
|
||||||
('index', 'sshuttle.tex', 'sshuttle documentation', 'Brian May', 'manual'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top of
|
|
||||||
# the title page.
|
|
||||||
# latex_logo = None
|
|
||||||
|
|
||||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
|
||||||
# not chapters.
|
|
||||||
# latex_use_parts = False
|
|
||||||
|
|
||||||
# If true, show page references after internal links.
|
|
||||||
# latex_show_pagerefs = False
|
|
||||||
|
|
||||||
# If true, show URL addresses after external links.
|
|
||||||
# latex_show_urls = False
|
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
|
||||||
# latex_appendices = []
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
# latex_domain_indices = True
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for manual page output ---------------------------------------
|
|
||||||
|
|
||||||
# One entry per manual page. List of tuples
|
|
||||||
# (source start file, name, description, authors, manual section).
|
|
||||||
man_pages = [
|
|
||||||
('manpage', 'sshuttle', 'sshuttle documentation', ['Brian May'], 1)
|
|
||||||
]
|
|
||||||
|
|
||||||
# If true, show URL addresses after external links.
|
|
||||||
# man_show_urls = False
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for Texinfo output -------------------------------------------
|
|
||||||
|
|
||||||
# Grouping the document tree into Texinfo files. List of tuples
|
|
||||||
# (source start file, target name, title, author,
|
|
||||||
# dir menu entry, description, category)
|
|
||||||
texinfo_documents = [
|
|
||||||
('index', 'sshuttle', 'sshuttle documentation',
|
|
||||||
'Brian May', 'sshuttle', 'A transparent proxy-based VPN using ssh',
|
|
||||||
'Miscellaneous'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
|
||||||
# texinfo_appendices = []
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
# texinfo_domain_indices = True
|
|
||||||
|
|
||||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
|
||||||
# texinfo_show_urls = 'footnote'
|
|
||||||
|
|
||||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
|
||||||
# texinfo_no_detailmenu = False
|
|
@ -1,36 +0,0 @@
|
|||||||
How it works
|
|
||||||
============
|
|
||||||
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
|
|
||||||
of both, and kind of neither.
|
|
||||||
|
|
||||||
It's like a VPN, since it can forward every port on an entire network, not
|
|
||||||
just ports you specify. Conveniently, it lets you use the "real" IP
|
|
||||||
addresses of each host rather than faking port numbers on localhost.
|
|
||||||
|
|
||||||
On the other hand, the way it *works* is more like ssh port forwarding than
|
|
||||||
a VPN. Normally, a VPN forwards your data one packet at a time, and
|
|
||||||
doesn't care about individual connections; ie. it's "stateless" with respect
|
|
||||||
to the traffic. sshuttle is the opposite of stateless; it tracks every
|
|
||||||
single connection.
|
|
||||||
|
|
||||||
You could compare sshuttle to something like the old `Slirp
|
|
||||||
<http://en.wikipedia.org/wiki/Slirp>`_ program, which was a userspace TCP/IP
|
|
||||||
implementation that did something similar. But it operated on a
|
|
||||||
packet-by-packet basis on the client side, reassembling the packets on the
|
|
||||||
server side. That worked okay back in the "real live serial port" days,
|
|
||||||
because serial ports had predictable latency and buffering.
|
|
||||||
|
|
||||||
But you can't safely just forward TCP packets over a TCP session (like ssh),
|
|
||||||
because TCP's performance depends fundamentally on packet loss; it
|
|
||||||
*must* experience packet loss in order to know when to slow down! At
|
|
||||||
the same time, the outer TCP session (ssh, in this case) is a reliable
|
|
||||||
transport, which means that what you forward through the tunnel *never*
|
|
||||||
experiences packet loss. The ssh session itself experiences packet loss, of
|
|
||||||
course, but TCP fixes it up and ssh (and thus you) never know the
|
|
||||||
difference. But neither does your inner TCP session, and extremely screwy
|
|
||||||
performance ensues.
|
|
||||||
|
|
||||||
sshuttle assembles the TCP stream locally, multiplexes it statefully over
|
|
||||||
an ssh session, and disassembles it back into packets at the other end. So
|
|
||||||
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
|
|
||||||
safe.
|
|
@ -1,28 +0,0 @@
|
|||||||
sshuttle: where transparent proxy meets VPN meets ssh
|
|
||||||
=====================================================
|
|
||||||
|
|
||||||
:Date: |today|
|
|
||||||
:Version: |version|
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
|
|
||||||
overview
|
|
||||||
requirements
|
|
||||||
installation
|
|
||||||
usage
|
|
||||||
platform
|
|
||||||
Man Page <manpage>
|
|
||||||
how-it-works
|
|
||||||
support
|
|
||||||
trivia
|
|
||||||
changes
|
|
||||||
|
|
||||||
|
|
||||||
Indices and tables
|
|
||||||
==================
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
|
||||||
* :ref:`search`
|
|
@ -1,84 +0,0 @@
|
|||||||
Installation
|
|
||||||
============
|
|
||||||
|
|
||||||
- Ubuntu 16.04 or later::
|
|
||||||
|
|
||||||
apt-get install sshuttle
|
|
||||||
|
|
||||||
- Debian stretch or later::
|
|
||||||
|
|
||||||
apt-get install sshuttle
|
|
||||||
|
|
||||||
- Arch Linux::
|
|
||||||
|
|
||||||
pacman -S sshuttle
|
|
||||||
|
|
||||||
- Fedora::
|
|
||||||
|
|
||||||
dnf install sshuttle
|
|
||||||
|
|
||||||
- openSUSE::
|
|
||||||
|
|
||||||
zypper in sshuttle
|
|
||||||
|
|
||||||
- Gentoo::
|
|
||||||
|
|
||||||
emerge -av net-proxy/sshuttle
|
|
||||||
|
|
||||||
- NixOS::
|
|
||||||
|
|
||||||
nix-env -iA nixos.sshuttle
|
|
||||||
|
|
||||||
- From PyPI::
|
|
||||||
|
|
||||||
sudo pip install sshuttle
|
|
||||||
|
|
||||||
- Clone::
|
|
||||||
|
|
||||||
git clone https://github.com/sshuttle/sshuttle.git
|
|
||||||
cd sshuttle
|
|
||||||
sudo ./setup.py install
|
|
||||||
|
|
||||||
- FreeBSD::
|
|
||||||
|
|
||||||
# ports
|
|
||||||
cd /usr/ports/net/py-sshuttle && make install clean
|
|
||||||
# pkg
|
|
||||||
pkg install py39-sshuttle
|
|
||||||
|
|
||||||
- OpenBSD::
|
|
||||||
|
|
||||||
pkg_add sshuttle
|
|
||||||
|
|
||||||
- macOS, via MacPorts::
|
|
||||||
|
|
||||||
sudo port selfupdate
|
|
||||||
sudo port install sshuttle
|
|
||||||
|
|
||||||
It is also possible to install into a virtualenv as a non-root user.
|
|
||||||
|
|
||||||
- From PyPI::
|
|
||||||
|
|
||||||
python3 -m venv /tmp/sshuttle
|
|
||||||
. /tmp/sshuttle/bin/activate
|
|
||||||
pip install sshuttle
|
|
||||||
|
|
||||||
- Clone::
|
|
||||||
|
|
||||||
git clone https://github.com/sshuttle/sshuttle.git
|
|
||||||
cd sshuttle
|
|
||||||
python3 -m venv /tmp/sshuttle
|
|
||||||
. /tmp/sshuttle/bin/activate
|
|
||||||
python -m pip install .
|
|
||||||
|
|
||||||
- Homebrew::
|
|
||||||
|
|
||||||
brew install sshuttle
|
|
||||||
|
|
||||||
- Nix::
|
|
||||||
|
|
||||||
nix-shell -p sshuttle
|
|
||||||
|
|
||||||
- Windows::
|
|
||||||
|
|
||||||
pip install sshuttle
|
|
242
docs/make.bat
242
docs/make.bat
@ -1,242 +0,0 @@
|
|||||||
@ECHO OFF
|
|
||||||
|
|
||||||
REM Command file for Sphinx documentation
|
|
||||||
|
|
||||||
if "%SPHINXBUILD%" == "" (
|
|
||||||
set SPHINXBUILD=sphinx-build
|
|
||||||
)
|
|
||||||
set BUILDDIR=_build
|
|
||||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
|
||||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
|
||||||
if NOT "%PAPER%" == "" (
|
|
||||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
|
||||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "" goto help
|
|
||||||
|
|
||||||
if "%1" == "help" (
|
|
||||||
:help
|
|
||||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
|
||||||
echo. html to make standalone HTML files
|
|
||||||
echo. dirhtml to make HTML files named index.html in directories
|
|
||||||
echo. singlehtml to make a single large HTML file
|
|
||||||
echo. pickle to make pickle files
|
|
||||||
echo. json to make JSON files
|
|
||||||
echo. htmlhelp to make HTML files and a HTML help project
|
|
||||||
echo. qthelp to make HTML files and a qthelp project
|
|
||||||
echo. devhelp to make HTML files and a Devhelp project
|
|
||||||
echo. epub to make an epub
|
|
||||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
|
||||||
echo. text to make text files
|
|
||||||
echo. man to make manual pages
|
|
||||||
echo. texinfo to make Texinfo files
|
|
||||||
echo. gettext to make PO message catalogs
|
|
||||||
echo. changes to make an overview over all changed/added/deprecated items
|
|
||||||
echo. xml to make Docutils-native XML files
|
|
||||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
|
||||||
echo. linkcheck to check all external links for integrity
|
|
||||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "clean" (
|
|
||||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
|
||||||
del /q /s %BUILDDIR%\*
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
%SPHINXBUILD% 2> nul
|
|
||||||
if errorlevel 9009 (
|
|
||||||
echo.
|
|
||||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
|
||||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
|
||||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
|
||||||
echo.may add the Sphinx directory to PATH.
|
|
||||||
echo.
|
|
||||||
echo.If you don't have Sphinx installed, grab it from
|
|
||||||
echo.http://sphinx-doc.org/
|
|
||||||
exit /b 1
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "html" (
|
|
||||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "dirhtml" (
|
|
||||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "singlehtml" (
|
|
||||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "pickle" (
|
|
||||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished; now you can process the pickle files.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "json" (
|
|
||||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished; now you can process the JSON files.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "htmlhelp" (
|
|
||||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
|
||||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "qthelp" (
|
|
||||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
|
||||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
|
||||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\sshuttle.qhcp
|
|
||||||
echo.To view the help file:
|
|
||||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\sshuttle.ghc
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "devhelp" (
|
|
||||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "epub" (
|
|
||||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "latex" (
|
|
||||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "latexpdf" (
|
|
||||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
|
||||||
cd %BUILDDIR%/latex
|
|
||||||
make all-pdf
|
|
||||||
cd %BUILDDIR%/..
|
|
||||||
echo.
|
|
||||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "latexpdfja" (
|
|
||||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
|
||||||
cd %BUILDDIR%/latex
|
|
||||||
make all-pdf-ja
|
|
||||||
cd %BUILDDIR%/..
|
|
||||||
echo.
|
|
||||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "text" (
|
|
||||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "man" (
|
|
||||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "texinfo" (
|
|
||||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "gettext" (
|
|
||||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "changes" (
|
|
||||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.The overview file is in %BUILDDIR%/changes.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "linkcheck" (
|
|
||||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Link check complete; look for any errors in the above output ^
|
|
||||||
or in %BUILDDIR%/linkcheck/output.txt.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "doctest" (
|
|
||||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Testing of doctests in the sources finished, look at the ^
|
|
||||||
results in %BUILDDIR%/doctest/output.txt.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "xml" (
|
|
||||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%1" == "pseudoxml" (
|
|
||||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
|
||||||
if errorlevel 1 exit /b 1
|
|
||||||
echo.
|
|
||||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
|
||||||
goto end
|
|
||||||
)
|
|
||||||
|
|
||||||
:end
|
|
503
docs/manpage.rst
503
docs/manpage.rst
@ -1,503 +0,0 @@
|
|||||||
sshuttle
|
|
||||||
========
|
|
||||||
|
|
||||||
|
|
||||||
Synopsis
|
|
||||||
--------
|
|
||||||
**sshuttle** [*options*] **-r** *[username@]sshserver[:port]* \<*subnets* ...\>
|
|
||||||
|
|
||||||
|
|
||||||
Description
|
|
||||||
-----------
|
|
||||||
:program:`sshuttle` allows you to create a VPN connection from your
|
|
||||||
machine to any remote server that you can connect to via ssh, as long
|
|
||||||
as that server has a sufficiently new Python installation.
|
|
||||||
|
|
||||||
To work, you must have root access on the local machine,
|
|
||||||
but you can have a normal account on the server.
|
|
||||||
|
|
||||||
It's valid to run :program:`sshuttle` more than once simultaneously on
|
|
||||||
a single client machine, connecting to a different server
|
|
||||||
every time, so you can be on more than one VPN at once.
|
|
||||||
|
|
||||||
If run on a router, :program:`sshuttle` can forward traffic for your
|
|
||||||
entire subnet to the VPN.
|
|
||||||
|
|
||||||
|
|
||||||
Options
|
|
||||||
-------
|
|
||||||
.. program:: sshuttle
|
|
||||||
|
|
||||||
.. option:: <subnets>
|
|
||||||
|
|
||||||
A list of subnets to route over the VPN, in the form
|
|
||||||
``a.b.c.d[/width][port[-port]]``. Valid examples are 1.2.3.4 (a
|
|
||||||
single IP address) and 1.2.3.4/32 (equivalent to 1.2.3.4),
|
|
||||||
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0 netmask).
|
|
||||||
Specify subnets 0/0 to match all IPv4 addresses and ::/0 to match
|
|
||||||
all IPv6 addresses. Any of the previous examples are also valid if
|
|
||||||
you append a port or a port range, so 1.2.3.4:8000 will only
|
|
||||||
tunnel traffic that has as the destination port 8000 of 1.2.3.4
|
|
||||||
and 1.2.3.0/24:8000-9000 will tunnel traffic going to any port
|
|
||||||
between 8000 and 9000 (inclusive) for all IPs in the 1.2.3.0/24
|
|
||||||
subnet. A hostname can be provided instead of an IP address. If
|
|
||||||
the hostname resolves to multiple IPs, all of the IPs are
|
|
||||||
included. If a width is provided with a hostname, the width is
|
|
||||||
applied to all of the hostnames IPs (if they are all either IPv4
|
|
||||||
or IPv6). Widths cannot be supplied to hostnames that resolve to
|
|
||||||
both IPv4 and IPv6. Valid examples are example.com,
|
|
||||||
example.com:8000, example.com/24, example.com/24:8000 and
|
|
||||||
example.com:8000-9000.
|
|
||||||
|
|
||||||
.. option:: --method <auto|nat|nft|tproxy|pf|ipfw>
|
|
||||||
|
|
||||||
Which firewall method should sshuttle use? For auto, sshuttle attempts to
|
|
||||||
guess the appropriate method depending on what it can find in PATH. The
|
|
||||||
default value is auto.
|
|
||||||
|
|
||||||
.. option:: -l <[ip:]port>, --listen=<[ip:]port>
|
|
||||||
|
|
||||||
Use this ip address and port number as the transparent
|
|
||||||
proxy port. By default :program:`sshuttle` finds an available
|
|
||||||
port automatically and listens on IP 127.0.0.1
|
|
||||||
(localhost), so you don't need to override it, and
|
|
||||||
connections are only proxied from the local machine,
|
|
||||||
not from outside machines. If you want to accept
|
|
||||||
connections from other machines on your network (ie. to
|
|
||||||
run :program:`sshuttle` on a router) try enabling IP Forwarding in
|
|
||||||
your kernel, then using ``--listen 0.0.0.0:0``.
|
|
||||||
You can use any name resolving to an IP address of the machine running
|
|
||||||
:program:`sshuttle`, e.g. ``--listen localhost``.
|
|
||||||
|
|
||||||
For the nft, tproxy and pf methods this can be an IPv6 address. Use
|
|
||||||
this option with comma separated values if required, to provide both
|
|
||||||
IPv4 and IPv6 addresses, e.g. ``--listen 127.0.0.1:0,[::1]:0``.
|
|
||||||
|
|
||||||
.. option:: -H, --auto-hosts
|
|
||||||
|
|
||||||
Scan for remote hostnames and update the local /etc/hosts
|
|
||||||
file with matching entries for as long as the VPN is
|
|
||||||
open. This is nicer than changing your system's DNS
|
|
||||||
(/etc/resolv.conf) settings, for several reasons. First,
|
|
||||||
hostnames are added without domain names attached, so
|
|
||||||
you can ``ssh thatserver`` without worrying if your local
|
|
||||||
domain matches the remote one. Second, if you :program:`sshuttle`
|
|
||||||
into more than one VPN at a time, it's impossible to
|
|
||||||
use more than one DNS server at once anyway, but
|
|
||||||
:program:`sshuttle` correctly merges /etc/hosts entries between
|
|
||||||
all running copies. Third, if you're only routing a
|
|
||||||
few subnets over the VPN, you probably would prefer to
|
|
||||||
keep using your local DNS server for everything else.
|
|
||||||
|
|
||||||
:program:`sshuttle` tries to store a cache of the hostnames in
|
|
||||||
~/.sshuttle.hosts on the remote host. Similarly, it tries to read
|
|
||||||
the file when you later reconnect to the host with --auto-hosts
|
|
||||||
enabled to quickly populate the host list. When troubleshooting
|
|
||||||
this feature, try removing this file on the remote host when
|
|
||||||
sshuttle is not running.
|
|
||||||
|
|
||||||
.. option:: -N, --auto-nets
|
|
||||||
|
|
||||||
In addition to the subnets provided on the command
|
|
||||||
line, ask the server which subnets it thinks we should
|
|
||||||
route, and route those automatically. The suggestions
|
|
||||||
are taken automatically from the server's routing
|
|
||||||
table.
|
|
||||||
|
|
||||||
This feature does not detect IPv6 routes. Specify IPv6 subnets
|
|
||||||
manually. For example, specify the ``::/0`` subnet on the command
|
|
||||||
line to route all IPv6 traffic.
|
|
||||||
|
|
||||||
.. option:: --dns
|
|
||||||
|
|
||||||
Capture local DNS requests and forward to the remote DNS
|
|
||||||
server. All queries to any of the local system's DNS
|
|
||||||
servers (/etc/resolv.conf and, if it exists,
|
|
||||||
/run/systemd/resolve/resolv.conf) will be intercepted and
|
|
||||||
resolved on the remote side of the tunnel instead, there
|
|
||||||
using the DNS specified via the :option:`--to-ns` option,
|
|
||||||
if specified. Only plain DNS traffic sent to these servers
|
|
||||||
on port 53 are captured.
|
|
||||||
|
|
||||||
.. option:: --ns-hosts=<server1[,server2[,server3[...]]]>
|
|
||||||
|
|
||||||
Capture local DNS requests to the specified server(s)
|
|
||||||
and forward to the remote DNS server. Contrary to the
|
|
||||||
:option:`--dns` option, this flag allows to specify the
|
|
||||||
DNS server(s) the queries to which to intercept,
|
|
||||||
instead of intercepting all DNS traffic on the local
|
|
||||||
machine. This can be useful when only certain DNS
|
|
||||||
requests should be resolved on the remote side of the
|
|
||||||
tunnel, e.g. in combination with dnsmasq.
|
|
||||||
|
|
||||||
.. option:: --to-ns=<server>
|
|
||||||
|
|
||||||
The DNS to forward requests to when remote DNS
|
|
||||||
resolution is enabled. If not given, sshuttle will
|
|
||||||
simply resolve using the system configured resolver on
|
|
||||||
the remote side (via /etc/resolv.conf on the remote
|
|
||||||
side).
|
|
||||||
|
|
||||||
.. option:: --python
|
|
||||||
|
|
||||||
Specify the name/path of the remote python interpreter. The
|
|
||||||
default is to use ``python3`` (or ``python``, if ``python3``
|
|
||||||
fails) in the remote system's PATH.
|
|
||||||
|
|
||||||
.. option:: -r <[username@]sshserver[:port]>, --remote=<[username@]sshserver[:port]>
|
|
||||||
|
|
||||||
The remote hostname and optional username and ssh
|
|
||||||
port number to use for connecting to the remote server.
|
|
||||||
For example, example.com, testuser@example.com,
|
|
||||||
testuser@example.com:2222, or example.com:2244. This
|
|
||||||
hostname is passed to ssh, so it will recognize any
|
|
||||||
aliases and settings you may have configured in
|
|
||||||
~/.ssh/config.
|
|
||||||
|
|
||||||
.. option:: -x <subnet>, --exclude=<subnet>
|
|
||||||
|
|
||||||
Explicitly exclude this subnet from forwarding. The
|
|
||||||
format of this option is the same as the ``<subnets>``
|
|
||||||
option. To exclude more than one subnet, specify the
|
|
||||||
``-x`` option more than once. You can say something like
|
|
||||||
``0/0 -x 1.2.3.0/24`` to forward everything except the
|
|
||||||
local subnet over the VPN, for example.
|
|
||||||
|
|
||||||
.. option:: -X <file>, --exclude-from=<file>
|
|
||||||
|
|
||||||
Exclude the subnets specified in a file, one subnet per
|
|
||||||
line. Useful when you have lots of subnets to exclude.
|
|
||||||
|
|
||||||
.. option:: -v, --verbose
|
|
||||||
|
|
||||||
Print more information about the session. This option
|
|
||||||
can be used more than once for increased verbosity. By
|
|
||||||
default, :program:`sshuttle` prints only error messages.
|
|
||||||
|
|
||||||
.. option:: -e, --ssh-cmd
|
|
||||||
|
|
||||||
The command to use to connect to the remote server. The
|
|
||||||
default is just ``ssh``. Use this if your ssh client is
|
|
||||||
in a non-standard location or you want to provide extra
|
|
||||||
options to the ssh command, for example, ``-e 'ssh -v'``.
|
|
||||||
|
|
||||||
.. option:: --remote-shell
|
|
||||||
|
|
||||||
For Windows targets, specify configured remote shell program alternative to defacto posix shell.
|
|
||||||
It would be either ``cmd`` or ``powershell`` unless something like git-bash is in use.
|
|
||||||
|
|
||||||
.. option:: --no-cmd-delimiter
|
|
||||||
|
|
||||||
Do not add a double dash (--) delimiter before invoking Python on
|
|
||||||
the remote host. This option is useful when the ssh command used
|
|
||||||
to connect is a custom command that does not interpret this
|
|
||||||
delimiter correctly.
|
|
||||||
|
|
||||||
.. option:: --seed-hosts
|
|
||||||
|
|
||||||
A comma-separated list of hostnames to use to
|
|
||||||
initialize the :option:`--auto-hosts` scan algorithm.
|
|
||||||
:option:`--auto-hosts` does things like poll netstat output
|
|
||||||
for lists of local hostnames, but can speed things up
|
|
||||||
if you use this option to give it a few names to start
|
|
||||||
from.
|
|
||||||
|
|
||||||
If this option is used *without* :option:`--auto-hosts`,
|
|
||||||
then the listed hostnames will be scanned and added, but
|
|
||||||
no further hostnames will be added.
|
|
||||||
|
|
||||||
.. option:: --no-latency-control
|
|
||||||
|
|
||||||
Sacrifice latency to improve bandwidth benchmarks. ssh
|
|
||||||
uses really big socket buffers, which can overload the
|
|
||||||
connection if you start doing large file transfers,
|
|
||||||
thus making all your other sessions inside the same
|
|
||||||
tunnel go slowly. Normally, :program:`sshuttle` tries to avoid
|
|
||||||
this problem using a "fullness check" that allows only
|
|
||||||
a certain amount of outstanding data to be buffered at
|
|
||||||
a time. But on high-bandwidth links, this can leave a
|
|
||||||
lot of your bandwidth underutilized. It also makes
|
|
||||||
:program:`sshuttle` seem slow in bandwidth benchmarks (benchmarks
|
|
||||||
rarely test ping latency, which is what :program:`sshuttle` is
|
|
||||||
trying to control). This option disables the latency
|
|
||||||
control feature, maximizing bandwidth usage. Use at
|
|
||||||
your own risk.
|
|
||||||
|
|
||||||
.. option:: --latency-buffer-size
|
|
||||||
|
|
||||||
Set the size of the buffer used in latency control. The
|
|
||||||
default is ``32768``. Changing this option allows a compromise
|
|
||||||
to be made between latency and bandwidth without completely
|
|
||||||
disabling latency control (with :option:`--no-latency-control`).
|
|
||||||
|
|
||||||
.. option:: -D, --daemon
|
|
||||||
|
|
||||||
Automatically fork into the background after connecting
|
|
||||||
to the remote server. Implies :option:`--syslog`.
|
|
||||||
|
|
||||||
.. option:: -s <file>, --subnets=<file>
|
|
||||||
|
|
||||||
Include the subnets specified in a file instead of on the
|
|
||||||
command line. One subnet per line.
|
|
||||||
|
|
||||||
.. option:: --syslog
|
|
||||||
|
|
||||||
after connecting, send all log messages to the
|
|
||||||
:manpage:`syslog(3)` service instead of stderr. This is
|
|
||||||
implicit if you use :option:`--daemon`.
|
|
||||||
|
|
||||||
.. option:: --pidfile=<pidfilename>
|
|
||||||
|
|
||||||
when using :option:`--daemon`, save :program:`sshuttle`'s pid to
|
|
||||||
*pidfilename*. The default is ``sshuttle.pid`` in the
|
|
||||||
current directory.
|
|
||||||
|
|
||||||
.. option:: --disable-ipv6
|
|
||||||
|
|
||||||
Disable IPv6 support for methods that support it (nat, nft,
|
|
||||||
tproxy, and pf).
|
|
||||||
|
|
||||||
.. option:: --firewall
|
|
||||||
|
|
||||||
(internal use only) run the firewall manager. This is
|
|
||||||
the only part of :program:`sshuttle` that must run as root. If
|
|
||||||
you start :program:`sshuttle` as a non-root user, it will
|
|
||||||
automatically run ``sudo`` or ``su`` to start the firewall
|
|
||||||
manager, but the core of :program:`sshuttle` still runs as a
|
|
||||||
normal user.
|
|
||||||
|
|
||||||
.. option:: --hostwatch
|
|
||||||
|
|
||||||
(internal use only) run the hostwatch daemon. This
|
|
||||||
process runs on the server side and collects hostnames for
|
|
||||||
the :option:`--auto-hosts` option. Using this option by itself
|
|
||||||
makes it a lot easier to debug and test the :option:`--auto-hosts`
|
|
||||||
feature.
|
|
||||||
|
|
||||||
.. option:: --sudoers-no-modify
|
|
||||||
|
|
||||||
sshuttle prints a configuration to stdout which allows a user to
|
|
||||||
run sshuttle without a password. This option is INSECURE because,
|
|
||||||
with some cleverness, it also allows the user to run any command
|
|
||||||
as root without a password. The output also includes a suggested
|
|
||||||
method for you to install the configuration.
|
|
||||||
|
|
||||||
Use --sudoers-user to modify the user that it applies to.
|
|
||||||
|
|
||||||
.. option:: --sudoers-user
|
|
||||||
|
|
||||||
Set the user name or group with %group_name for passwordless
|
|
||||||
operation. Default is the current user. Set to ALL for all users
|
|
||||||
(NOT RECOMMENDED: See note about security in --sudoers-no-modify
|
|
||||||
documentation above). Only works with the --sudoers-no-modify
|
|
||||||
option.
|
|
||||||
|
|
||||||
.. option:: -t <mark>, --tmark=<mark>
|
|
||||||
|
|
||||||
An option used by the tproxy method: Use the specified traffic
|
|
||||||
mark. The mark must be a hexadecimal value. Defaults to 0x01.
|
|
||||||
|
|
||||||
.. option:: --version
|
|
||||||
|
|
||||||
Print program version.
|
|
||||||
|
|
||||||
|
|
||||||
Configuration File
|
|
||||||
------------------
|
|
||||||
All the options described above can optionally be specified in a configuration
|
|
||||||
file.
|
|
||||||
|
|
||||||
To run :program:`sshuttle` with options defined in, e.g., `/etc/sshuttle.conf`
|
|
||||||
just pass the path to the file preceded by the `@` character, e.g.
|
|
||||||
`@/etc/sshuttle.conf`.
|
|
||||||
|
|
||||||
When running :program:`sshuttle` with options defined in a configuration file,
|
|
||||||
options can still be passed via the command line in addition to what is
|
|
||||||
defined in the file. If a given option is defined both in the file and in
|
|
||||||
the command line, the value in the command line will take precedence.
|
|
||||||
|
|
||||||
Arguments read from a file must be one per line, as shown below::
|
|
||||||
|
|
||||||
value
|
|
||||||
--option1
|
|
||||||
value1
|
|
||||||
--option2
|
|
||||||
value2
|
|
||||||
|
|
||||||
The configuration file supports comments for human-readable
|
|
||||||
annotations. For example::
|
|
||||||
|
|
||||||
# company-internal API
|
|
||||||
8.8.8.8/32
|
|
||||||
# home IoT
|
|
||||||
192.168.63.0/24
|
|
||||||
|
|
||||||
|
|
||||||
Environment Variable
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
You can specify command line options with the `SSHUTTLE_ARGS` environment
|
|
||||||
variable. If a given option is defined in both the environment variable and
|
|
||||||
command line, the value on the command line will take precedence.
|
|
||||||
|
|
||||||
For example::
|
|
||||||
|
|
||||||
SSHUTTLE_ARGS="-e 'ssh -v' --dns" sshuttle -r example.com 0/0
|
|
||||||
|
|
||||||
|
|
||||||
Examples
|
|
||||||
--------
|
|
||||||
|
|
||||||
Use the following command to route all IPv4 TCP traffic through remote
|
|
||||||
(-r) host example.com (and possibly other traffic too, depending on
|
|
||||||
the selected --method). The 0/0 subnet, short for 0.0.0.0/0, matches
|
|
||||||
all IPv4 addresses. The ::/0 subnet, matching all IPv6 addresses could
|
|
||||||
be added to the example. We also exclude (-x) example.com:22 so that
|
|
||||||
we can establish ssh connections from our local machine to the remote
|
|
||||||
host without them being routed through sshuttle. Excluding the remote
|
|
||||||
host may be necessary on some machines for sshuttle to work properly.
|
|
||||||
Press Ctrl+C to exit. To also route DNS queries through sshuttle, try
|
|
||||||
adding --dns. Add or remove -v options to see more or less
|
|
||||||
information::
|
|
||||||
|
|
||||||
$ sshuttle -r example.com -x example.com:22 0/0
|
|
||||||
|
|
||||||
Starting sshuttle proxy (version ...).
|
|
||||||
[local sudo] Password:
|
|
||||||
fw: Starting firewall with Python version 3.9.5
|
|
||||||
fw: ready method name nat.
|
|
||||||
c : IPv6 disabled since it isn't supported by method nat.
|
|
||||||
c : Method: nat
|
|
||||||
c : IPv4: on
|
|
||||||
c : IPv6: off (not available with nat method)
|
|
||||||
c : UDP : off (not available with nat method)
|
|
||||||
c : DNS : off (available)
|
|
||||||
c : User: off (available)
|
|
||||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
|
||||||
c : (<AddressFamily.AF_INET: 2>, '0.0.0.0', 0, 0, 0)
|
|
||||||
c : Subnets to exclude from forwarding:
|
|
||||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
|
||||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
|
||||||
c : TCP redirector listening on ('127.0.0.1', 12299).
|
|
||||||
c : Starting client with Python version 3.9.5
|
|
||||||
c : Connecting to server...
|
|
||||||
user@example.com's password:
|
|
||||||
s: Starting server with Python version 3.6.8
|
|
||||||
s: latency control setting = True
|
|
||||||
s: auto-nets:False
|
|
||||||
c : Connected to server.
|
|
||||||
fw: setting up.
|
|
||||||
fw: iptables -w -t nat -N sshuttle-12299
|
|
||||||
fw: iptables -w -t nat -F sshuttle-12299
|
|
||||||
...
|
|
||||||
Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
|
||||||
^C
|
|
||||||
c : Keyboard interrupt: exiting.
|
|
||||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
|
||||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
|
||||||
|
|
||||||
|
|
||||||
Connect to a remote server, with automatic hostname
|
|
||||||
and subnet guessing::
|
|
||||||
|
|
||||||
$ sshuttle -vNHr example.com -x example.com:22
|
|
||||||
Starting sshuttle proxy (version ...).
|
|
||||||
[local sudo] Password:
|
|
||||||
fw: Starting firewall with Python version 3.9.5
|
|
||||||
fw: ready method name nat.
|
|
||||||
c : IPv6 disabled since it isn't supported by method nat.
|
|
||||||
c : Method: nat
|
|
||||||
c : IPv4: on
|
|
||||||
c : IPv6: off (not available with nat method)
|
|
||||||
c : UDP : off (not available with nat method)
|
|
||||||
c : DNS : off (available)
|
|
||||||
c : User: off (available)
|
|
||||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
|
||||||
c : NOTE: Additional subnets to forward may be added below by --auto-nets.
|
|
||||||
c : Subnets to exclude from forwarding:
|
|
||||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
|
||||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
|
||||||
c : TCP redirector listening on ('127.0.0.1', 12300).
|
|
||||||
c : Starting client with Python version 3.9.5
|
|
||||||
c : Connecting to server...
|
|
||||||
user@example.com's password:
|
|
||||||
s: Starting server with Python version 3.6.8
|
|
||||||
s: latency control setting = True
|
|
||||||
s: auto-nets:True
|
|
||||||
c : Connected to server.
|
|
||||||
c : seed_hosts: []
|
|
||||||
s: available routes:
|
|
||||||
s: 77.141.99.0/24
|
|
||||||
fw: setting up.
|
|
||||||
fw: iptables -w -t nat -N sshuttle-12300
|
|
||||||
fw: iptables -w -t nat -F sshuttle-12300
|
|
||||||
...
|
|
||||||
c : Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
|
||||||
^C
|
|
||||||
c : Keyboard interrupt: exiting.
|
|
||||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
|
||||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
|
||||||
|
|
||||||
Run :program:`sshuttle` with a `/etc/sshuttle.conf` configuration file::
|
|
||||||
|
|
||||||
$ sshuttle @/etc/sshuttle.conf
|
|
||||||
|
|
||||||
Use the options defined in `/etc/sshuttle.conf` but be more verbose::
|
|
||||||
|
|
||||||
$ sshuttle @/etc/sshuttle.conf -vvv
|
|
||||||
|
|
||||||
Override the remote server defined in `/etc/sshuttle.conf`::
|
|
||||||
|
|
||||||
$ sshuttle @/etc/sshuttle.conf -r otheruser@test.example.com
|
|
||||||
|
|
||||||
Example configuration file::
|
|
||||||
|
|
||||||
192.168.0.0/16
|
|
||||||
--remote
|
|
||||||
user@example.com
|
|
||||||
|
|
||||||
|
|
||||||
Discussion
|
|
||||||
----------
|
|
||||||
When it starts, :program:`sshuttle` creates an ssh session to the
|
|
||||||
server specified by the ``-r`` option.
|
|
||||||
|
|
||||||
After connecting to the remote server, :program:`sshuttle` uploads its
|
|
||||||
(python) source code to the remote end and executes it
|
|
||||||
there. Thus, you don't need to install :program:`sshuttle` on the
|
|
||||||
remote server, and there are never :program:`sshuttle` version
|
|
||||||
conflicts between client and server.
|
|
||||||
|
|
||||||
Unlike most VPNs, :program:`sshuttle` forwards sessions, not packets.
|
|
||||||
That is, it uses kernel transparent proxying (`iptables
|
|
||||||
REDIRECT` rules on Linux) to
|
|
||||||
capture outgoing TCP sessions, then creates entirely
|
|
||||||
separate TCP sessions out to the original destination at
|
|
||||||
the other end of the tunnel.
|
|
||||||
|
|
||||||
Packet-level forwarding (eg. using the tun/tap devices on
|
|
||||||
Linux) seems elegant at first, but it results in
|
|
||||||
several problems, notably the 'tcp over tcp' problem. The
|
|
||||||
tcp protocol depends fundamentally on packets being dropped
|
|
||||||
in order to implement its congestion control algorithm; if
|
|
||||||
you pass tcp packets through a tcp-based tunnel (such as
|
|
||||||
ssh), the inner tcp packets will never be dropped, and so
|
|
||||||
the inner tcp stream's congestion control will be
|
|
||||||
completely broken, and performance will be terrible. Thus,
|
|
||||||
packet-based VPNs (such as IPsec and openvpn) cannot use
|
|
||||||
tcp-based encrypted streams like ssh or ssl, and have to
|
|
||||||
implement their own encryption from scratch, which is very
|
|
||||||
complex and error prone.
|
|
||||||
|
|
||||||
:program:`sshuttle`'s simplicity comes from the fact that it can
|
|
||||||
safely use the existing ssh encrypted tunnel without
|
|
||||||
incurring a performance penalty. It does this by letting
|
|
||||||
the client-side kernel manage the incoming tcp stream, and
|
|
||||||
the server-side kernel manage the outgoing tcp stream;
|
|
||||||
there is no need for congestion control to be shared
|
|
||||||
between the two separate streams, so a tcp-based tunnel is
|
|
||||||
fine.
|
|
||||||
|
|
||||||
.. seealso::
|
|
||||||
|
|
||||||
:manpage:`ssh(1)`, :manpage:`python(1)`
|
|
@ -1,8 +0,0 @@
|
|||||||
OpenWRT
|
|
||||||
========
|
|
||||||
|
|
||||||
Run::
|
|
||||||
|
|
||||||
opkg install python3 python3-pip iptables-mod-extra iptables-mod-nat-extra iptables-mod-ipopt
|
|
||||||
python3 /usr/bin/pip3 install sshuttle
|
|
||||||
sshuttle -l 0.0.0.0 -r <IP> -x 192.168.1.1 0/0
|
|
@ -1,26 +0,0 @@
|
|||||||
Overview
|
|
||||||
========
|
|
||||||
|
|
||||||
As far as I know, sshuttle is the only program that solves the following
|
|
||||||
common case:
|
|
||||||
|
|
||||||
- Your client machine (or router) is Linux, MacOS, FreeBSD, OpenBSD or pfSense.
|
|
||||||
|
|
||||||
- You have access to a remote network via ssh.
|
|
||||||
|
|
||||||
- You don't necessarily have admin access on the remote network.
|
|
||||||
|
|
||||||
- The remote network has no VPN, or only stupid/complex VPN
|
|
||||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
|
||||||
admin and you just got frustrated with the awful state of
|
|
||||||
VPN tools.
|
|
||||||
|
|
||||||
- You don't want to create an ssh port forward for every
|
|
||||||
single host/port on the remote network.
|
|
||||||
|
|
||||||
- You hate openssh's port forwarding because it's randomly
|
|
||||||
slow and/or stupid.
|
|
||||||
|
|
||||||
- You can't use openssh's PermitTunnel feature because
|
|
||||||
it's disabled by default on openssh servers; plus it does
|
|
||||||
TCP-over-TCP, which has terrible performance (see below).
|
|
@ -1,12 +0,0 @@
|
|||||||
Platform Specific Notes
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
|
|
||||||
chromeos
|
|
||||||
tproxy
|
|
||||||
windows
|
|
||||||
openwrt
|
|
@ -1,97 +0,0 @@
|
|||||||
Requirements
|
|
||||||
============
|
|
||||||
|
|
||||||
Client side Requirements
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
- sudo, or root access on your client machine.
|
|
||||||
(The server doesn't need admin access.)
|
|
||||||
- Python 3.9 or greater.
|
|
||||||
|
|
||||||
|
|
||||||
Linux with NAT method
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Supports:
|
|
||||||
|
|
||||||
* IPv4 TCP
|
|
||||||
* IPv4 DNS
|
|
||||||
* IPv6 TCP
|
|
||||||
* IPv6 DNS
|
|
||||||
|
|
||||||
Requires:
|
|
||||||
|
|
||||||
* iptables DNAT and REDIRECT modules. ip6tables for IPv6.
|
|
||||||
|
|
||||||
Linux with nft method
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Supports
|
|
||||||
|
|
||||||
* IPv4 TCP
|
|
||||||
* IPv4 DNS
|
|
||||||
* IPv6 TCP
|
|
||||||
* IPv6 DNS
|
|
||||||
|
|
||||||
Requires:
|
|
||||||
|
|
||||||
* nftables
|
|
||||||
|
|
||||||
Linux with TPROXY method
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Supports:
|
|
||||||
|
|
||||||
* IPv4 TCP
|
|
||||||
* IPv4 UDP
|
|
||||||
* IPv4 DNS
|
|
||||||
* IPv6 TCP
|
|
||||||
* IPv6 UDP
|
|
||||||
* IPv6 DNS
|
|
||||||
|
|
||||||
|
|
||||||
MacOS / FreeBSD / OpenBSD / pfSense
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
Method: pf
|
|
||||||
|
|
||||||
Supports:
|
|
||||||
|
|
||||||
* IPv4 TCP
|
|
||||||
* IPv4 DNS
|
|
||||||
* IPv6 TCP
|
|
||||||
* IPv6 DNS
|
|
||||||
|
|
||||||
Requires:
|
|
||||||
|
|
||||||
* You need to have the pfctl command.
|
|
||||||
|
|
||||||
Windows
|
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
Experimental built-in support available. See :doc:`windows` for more information.
|
|
||||||
|
|
||||||
|
|
||||||
Server side Requirements
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
- Python 3.9 or greater.
|
|
||||||
|
|
||||||
|
|
||||||
Additional Suggested Software
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
- If you are using systemd, sshuttle can notify it when the connection to
|
|
||||||
the remote end is established and the firewall rules are installed. For
|
|
||||||
this feature to work you must configure the process start-up type for the
|
|
||||||
sshuttle service unit to notify, as shown in the example below.
|
|
||||||
|
|
||||||
.. code-block:: ini
|
|
||||||
:emphasize-lines: 6
|
|
||||||
|
|
||||||
[Unit]
|
|
||||||
Description=sshuttle
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
ExecStart=/usr/bin/sshuttle --dns --remote <user>@<server> <subnets...>
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
@ -1,11 +0,0 @@
|
|||||||
Support
|
|
||||||
=======
|
|
||||||
|
|
||||||
Mailing list:
|
|
||||||
|
|
||||||
* Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
|
|
||||||
* List archives are at: http://groups.google.com/group/sshuttle
|
|
||||||
|
|
||||||
Issue tracker and pull requests at github:
|
|
||||||
|
|
||||||
* https://github.com/sshuttle/sshuttle
|
|
@ -1,40 +0,0 @@
|
|||||||
TPROXY
|
|
||||||
======
|
|
||||||
TPROXY is the only method that supports UDP.
|
|
||||||
|
|
||||||
There are some things you need to consider for TPROXY to work:
|
|
||||||
|
|
||||||
- The following commands need to be run first as root. This only needs to be
|
|
||||||
done once after booting up::
|
|
||||||
|
|
||||||
ip route add local default dev lo table 100
|
|
||||||
ip rule add fwmark {TMARK} lookup 100
|
|
||||||
ip -6 route add local default dev lo table 100
|
|
||||||
ip -6 rule add fwmark {TMARK} lookup 100
|
|
||||||
|
|
||||||
where {TMARK} is the identifier mark passed with -t or --tmark flag
|
|
||||||
as a hexadecimal string (default value is '0x01').
|
|
||||||
|
|
||||||
- The ``--auto-nets`` feature does not detect IPv6 routes automatically. Add IPv6
|
|
||||||
routes manually. e.g. by adding ``'::/0'`` to the end of the command line.
|
|
||||||
|
|
||||||
- The client needs to be run as root. e.g.::
|
|
||||||
|
|
||||||
sudo SSH_AUTH_SOCK="$SSH_AUTH_SOCK" $HOME/tree/sshuttle.tproxy/sshuttle --method=tproxy ...
|
|
||||||
|
|
||||||
- You may need to exclude the IP address of the server you are connecting to.
|
|
||||||
Otherwise sshuttle may attempt to intercept the ssh packets, which will not
|
|
||||||
work. Use the ``--exclude`` parameter for this.
|
|
||||||
|
|
||||||
- You need the ``--method=tproxy`` parameter, as above.
|
|
||||||
|
|
||||||
- The routes for the outgoing packets must already exist. For example, if your
|
|
||||||
connection does not have IPv6 support, no IPv6 routes will exist, IPv6
|
|
||||||
packets will not be generated and sshuttle cannot intercept them::
|
|
||||||
|
|
||||||
telnet -6 www.google.com 80
|
|
||||||
Trying 2404:6800:4001:805::1010...
|
|
||||||
telnet: Unable to connect to remote host: Network is unreachable
|
|
||||||
|
|
||||||
Add some dummy routes to external interfaces. Make sure they get removed
|
|
||||||
however after sshuttle exits.
|
|
@ -1,35 +0,0 @@
|
|||||||
Useless Trivia
|
|
||||||
==============
|
|
||||||
This section written by the original author, Avery Pennarun
|
|
||||||
<apenwarr@gmail.com>.
|
|
||||||
|
|
||||||
Back in 1998, I released the first version of `Tunnel
|
|
||||||
Vision <http://alumnit.ca/wiki/?TunnelVisionReadMe>`_, a semi-intelligent VPN
|
|
||||||
client for Linux. Unfortunately, I made two big mistakes: I implemented the
|
|
||||||
key exchange myself (oops), and I ended up doing TCP-over-TCP (double oops).
|
|
||||||
The resulting program worked okay - and people used it for years - but the
|
|
||||||
performance was always a bit funny. And nobody ever found any security flaws
|
|
||||||
in my key exchange, either, but that doesn't mean anything. :)
|
|
||||||
|
|
||||||
The same year, dcoombs and I also released Fast Forward, a proxy server
|
|
||||||
supporting transparent proxying. Among other things, we used it for
|
|
||||||
automatically splitting traffic across more than one Internet connection (a
|
|
||||||
tool we called "Double Vision").
|
|
||||||
|
|
||||||
I was still in university at the time. A couple years after that, one of my
|
|
||||||
professors was working with some graduate students on the technology that would
|
|
||||||
eventually become `Slipstream Internet Acceleration
|
|
||||||
<http://www.slipstream.com/>`_. He asked me to do a contract for him to build
|
|
||||||
an initial prototype of a transparent proxy server for mobile networks. The
|
|
||||||
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
|
|
||||||
packets, you can reduce latency and improve performance vs. just forwarding
|
|
||||||
the packets over a plain VPN or mobile network. (It's unlikely that any of my
|
|
||||||
code has persisted in the Slipstream product today, but the concept is still
|
|
||||||
pretty cool. I'm still horrified that people use plain TCP on complex mobile
|
|
||||||
networks with crazily variable latency, for which it was never really
|
|
||||||
intended.)
|
|
||||||
|
|
||||||
That project I did for Slipstream was what first gave me the idea to merge
|
|
||||||
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
|
|
||||||
program that was the best of all worlds. And here we are, at last.
|
|
||||||
You're welcome.
|
|
@ -1,93 +0,0 @@
|
|||||||
Usage
|
|
||||||
=====
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
For information on usage with Windows, see the :doc:`windows` section.
|
|
||||||
For information on using the TProxy method, see the :doc:`tproxy` section.
|
|
||||||
|
|
||||||
Forward all traffic::
|
|
||||||
|
|
||||||
sshuttle -r username@sshserver 0.0.0.0/0
|
|
||||||
|
|
||||||
- Use the :option:`sshuttle -r` parameter to specify a remote server.
|
|
||||||
On some systems, you may also need to use the :option:`sshuttle -x`
|
|
||||||
parameter to exclude sshserver or sshserver:22 so that your local
|
|
||||||
machine can communicate directly to sshserver without it being
|
|
||||||
redirected by sshuttle.
|
|
||||||
|
|
||||||
- By default sshuttle will automatically choose a method to use. Override with
|
|
||||||
the :option:`sshuttle --method` parameter.
|
|
||||||
|
|
||||||
- There is a shortcut for 0.0.0.0/0 for those that value
|
|
||||||
their wrists::
|
|
||||||
|
|
||||||
sshuttle -r username@sshserver 0/0
|
|
||||||
|
|
||||||
|
|
||||||
- For 'My VPN broke and need a temporary solution FAST to access local IPv4 addresses'::
|
|
||||||
|
|
||||||
sshuttle --dns -NHr username@sshserver 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
|
|
||||||
|
|
||||||
If you would also like your DNS queries to be proxied
|
|
||||||
through the DNS server of the server you are connect to::
|
|
||||||
|
|
||||||
sshuttle --dns -r username@sshserver 0/0
|
|
||||||
|
|
||||||
The above is probably what you want to use to prevent
|
|
||||||
local network attacks such as Firesheep and friends.
|
|
||||||
See the documentation for the :option:`sshuttle --dns` parameter.
|
|
||||||
|
|
||||||
(You may be prompted for one or more passwords; first, the local password to
|
|
||||||
become root using sudo, and then the remote ssh password. Or you might have
|
|
||||||
sudo and ssh set up to not require passwords, in which case you won't be
|
|
||||||
prompted at all.)
|
|
||||||
|
|
||||||
|
|
||||||
Usage Notes
|
|
||||||
-----------
|
|
||||||
That's it! Now your local machine can access the remote network as if you
|
|
||||||
were right there. And if your "client" machine is a router, everyone on
|
|
||||||
your local network can make connections to your remote network.
|
|
||||||
|
|
||||||
You don't need to install sshuttle on the remote server;
|
|
||||||
the remote server just needs to have python available.
|
|
||||||
sshuttle will automatically upload and run its source code
|
|
||||||
to the remote python interpreter.
|
|
||||||
|
|
||||||
This creates a transparent proxy server on your local machine for all IP
|
|
||||||
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
|
|
||||||
you want; use any number of IP addresses or subnets to change which
|
|
||||||
addresses get proxied. Using 0.0.0.0/0 proxies *everything*, which is
|
|
||||||
interesting if you don't trust the people on your local network.)
|
|
||||||
|
|
||||||
Any TCP session you initiate to one of the proxied IP addresses will be
|
|
||||||
captured by sshuttle and sent over an ssh session to the remote copy of
|
|
||||||
sshuttle, which will then regenerate the connection on that end, and funnel
|
|
||||||
the data back and forth through ssh.
|
|
||||||
|
|
||||||
Fun, right? A poor man's instant VPN, and you don't even have to have
|
|
||||||
admin access on the server.
|
|
||||||
|
|
||||||
Sudoers File
|
|
||||||
------------
|
|
||||||
|
|
||||||
sshuttle can generate a sudoers.d file for Linux and MacOS. This
|
|
||||||
allows one or more users to run sshuttle without entering the
|
|
||||||
local sudo password. **WARNING:** This option is *insecure*
|
|
||||||
because, with some cleverness, it also allows these users to run any
|
|
||||||
command (via the --ssh-cmd option) as root without a password.
|
|
||||||
|
|
||||||
To print a sudo configuration file and see a suggested way to install it, run::
|
|
||||||
|
|
||||||
sshuttle --sudoers-no-modify
|
|
||||||
|
|
||||||
A custom user or group can be set with the
|
|
||||||
:option:`sshuttle --sudoers-no-modify --sudoers-user {user_descriptor}`
|
|
||||||
option. Valid values for this vary based on how your system is configured.
|
|
||||||
Values such as usernames, groups prepended with `%` and sudoers user
|
|
||||||
aliases will work. See the sudoers manual for more information on valid
|
|
||||||
user-specified actions. The option must be used with `--sudoers-no-modify`::
|
|
||||||
|
|
||||||
sshuttle --sudoers-no-modify --sudoers-user mike
|
|
||||||
sshuttle --sudoers-no-modify --sudoers-user %sudo
|
|
@ -1,28 +0,0 @@
|
|||||||
Microsoft Windows
|
|
||||||
=================
|
|
||||||
|
|
||||||
Experimental native support::
|
|
||||||
|
|
||||||
Experimental built-in support for Windows is available through `windivert` method.
|
|
||||||
You have to install https://pypi.org/project/pydivert package. You need Administrator privileges to use windivert method
|
|
||||||
|
|
||||||
Notes
|
|
||||||
- sshuttle should be executed from admin shell (Automatic firewall process admin elevation is not available)
|
|
||||||
- TCP/IPv4 supported (IPv6/UDP/DNS are not available)
|
|
||||||
|
|
||||||
Use Linux VM on Windows::
|
|
||||||
|
|
||||||
What we can really do is to create a Linux VM with Vagrant (or simply
|
|
||||||
Virtualbox if you like). In the Vagrant settings, remember to turn on bridged
|
|
||||||
NIC. Then, run sshuttle inside the VM like below::
|
|
||||||
|
|
||||||
sshuttle -l 0.0.0.0 -x 10.0.0.0/8 -x 192.168.0.0/16 0/0
|
|
||||||
|
|
||||||
10.0.0.0/8 excludes NAT traffic of Vagrant and 192.168.0.0/16 excludes
|
|
||||||
traffic to local area network (assuming that we're using 192.168.0.0 subnet).
|
|
||||||
|
|
||||||
Assuming the VM has the IP 192.168.1.200 obtained on the bridge NIC (we can
|
|
||||||
configure that in Vagrant), we can then ask Windows to route all its traffic
|
|
||||||
via the VM by running the following in cmd.exe with admin right::
|
|
||||||
|
|
||||||
route add 0.0.0.0 mask 0.0.0.0 192.168.1.200
|
|
536
firewall.py
Normal file
536
firewall.py
Normal file
@ -0,0 +1,536 @@
|
|||||||
|
import re, errno, socket, select, signal, struct
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
import helpers, ssyslog
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
# python doesn't have a definition for this
|
||||||
|
IPPROTO_DIVERT = 254
|
||||||
|
|
||||||
|
# return values from sysctl_set
|
||||||
|
SUCCESS = 0
|
||||||
|
SAME = 1
|
||||||
|
FAILED = -1
|
||||||
|
NONEXIST = -2
|
||||||
|
|
||||||
|
|
||||||
|
def nonfatal(func, *args):
|
||||||
|
try:
|
||||||
|
func(*args)
|
||||||
|
except Fatal, e:
|
||||||
|
log('error: %s\n' % e)
|
||||||
|
|
||||||
|
|
||||||
|
def _call(argv):
|
||||||
|
debug1('>> %s\n' % ' '.join(argv))
|
||||||
|
rv = ssubprocess.call(argv)
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (argv, rv))
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
def ipt_chain_exists(name):
|
||||||
|
argv = ['iptables', '-t', 'nat', '-nL']
|
||||||
|
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||||
|
for line in p.stdout:
|
||||||
|
if line.startswith('Chain %s ' % name):
|
||||||
|
return True
|
||||||
|
rv = p.wait()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (argv, rv))
|
||||||
|
|
||||||
|
|
||||||
|
def ipt(*args):
|
||||||
|
argv = ['iptables', '-t', 'nat'] + list(args)
|
||||||
|
_call(argv)
|
||||||
|
|
||||||
|
|
||||||
|
_no_ttl_module = False
|
||||||
|
def ipt_ttl(*args):
|
||||||
|
global _no_ttl_module
|
||||||
|
if not _no_ttl_module:
|
||||||
|
# we avoid infinite loops by generating server-side connections
|
||||||
|
# with ttl 42. This makes the client side not recapture those
|
||||||
|
# connections, in case client == server.
|
||||||
|
try:
|
||||||
|
argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42']
|
||||||
|
ipt(*argsplus)
|
||||||
|
except Fatal:
|
||||||
|
ipt(*args)
|
||||||
|
# we only get here if the non-ttl attempt succeeds
|
||||||
|
log('sshuttle: warning: your iptables is missing '
|
||||||
|
'the ttl module.\n')
|
||||||
|
_no_ttl_module = True
|
||||||
|
else:
|
||||||
|
ipt(*args)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# We name the chain based on the transproxy port number so that it's possible
|
||||||
|
# to run multiple copies of sshuttle at the same time. Of course, the
|
||||||
|
# multiple copies shouldn't have overlapping subnets, or only the most-
|
||||||
|
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
||||||
|
# "-A OUTPUT").
|
||||||
|
def do_iptables(port, dnsport, subnets):
|
||||||
|
chain = 'sshuttle-%s' % port
|
||||||
|
|
||||||
|
# basic cleanup/setup of chains
|
||||||
|
if ipt_chain_exists(chain):
|
||||||
|
nonfatal(ipt, '-D', 'OUTPUT', '-j', chain)
|
||||||
|
nonfatal(ipt, '-D', 'PREROUTING', '-j', chain)
|
||||||
|
nonfatal(ipt, '-F', chain)
|
||||||
|
ipt('-X', chain)
|
||||||
|
|
||||||
|
if subnets or dnsport:
|
||||||
|
ipt('-N', chain)
|
||||||
|
ipt('-F', chain)
|
||||||
|
ipt('-I', 'OUTPUT', '1', '-j', chain)
|
||||||
|
ipt('-I', 'PREROUTING', '1', '-j', chain)
|
||||||
|
|
||||||
|
if subnets:
|
||||||
|
# create new subnet entries. Note that we're sorting in a very
|
||||||
|
# particular order: we need to go from most-specific (largest swidth)
|
||||||
|
# to least-specific, and at any given level of specificity, we want
|
||||||
|
# excludes to come first. That's why the columns are in such a non-
|
||||||
|
# intuitive order.
|
||||||
|
for swidth,sexclude,snet in sorted(subnets, reverse=True):
|
||||||
|
if sexclude:
|
||||||
|
ipt('-A', chain, '-j', 'RETURN',
|
||||||
|
'--dest', '%s/%s' % (snet,swidth),
|
||||||
|
'-p', 'tcp')
|
||||||
|
else:
|
||||||
|
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||||
|
'--dest', '%s/%s' % (snet,swidth),
|
||||||
|
'-p', 'tcp',
|
||||||
|
'--to-ports', str(port))
|
||||||
|
|
||||||
|
if dnsport:
|
||||||
|
nslist = resolvconf_nameservers()
|
||||||
|
for ip in nslist:
|
||||||
|
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||||
|
'--dest', '%s/32' % ip,
|
||||||
|
'-p', 'udp',
|
||||||
|
'--dport', '53',
|
||||||
|
'--to-ports', str(dnsport))
|
||||||
|
|
||||||
|
|
||||||
|
def ipfw_rule_exists(n):
|
||||||
|
argv = ['ipfw', 'list']
|
||||||
|
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||||
|
found = False
|
||||||
|
for line in p.stdout:
|
||||||
|
if line.startswith('%05d ' % n):
|
||||||
|
if not ('ipttl 42' in line
|
||||||
|
or ('skipto %d' % (n+1)) in line
|
||||||
|
or 'check-state' in line):
|
||||||
|
log('non-sshuttle ipfw rule: %r\n' % line.strip())
|
||||||
|
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
|
||||||
|
found = True
|
||||||
|
rv = p.wait()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (argv, rv))
|
||||||
|
return found
|
||||||
|
|
||||||
|
|
||||||
|
_oldctls = {}
|
||||||
|
def _fill_oldctls(prefix):
|
||||||
|
argv = ['sysctl', prefix]
|
||||||
|
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||||
|
for line in p.stdout:
|
||||||
|
assert(line[-1] == '\n')
|
||||||
|
(k,v) = line[:-1].split(': ', 1)
|
||||||
|
_oldctls[k] = v
|
||||||
|
rv = p.wait()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (argv, rv))
|
||||||
|
if not line:
|
||||||
|
raise Fatal('%r returned no data' % (argv,))
|
||||||
|
|
||||||
|
|
||||||
|
KERNEL_FLAGS_PATH = '/Library/Preferences/SystemConfiguration/com.apple.Boot'
|
||||||
|
KERNEL_FLAGS_NAME = 'Kernel Flags'
|
||||||
|
def _defaults_read_kernel_flags():
|
||||||
|
argv = ['defaults', 'read', KERNEL_FLAGS_PATH, KERNEL_FLAGS_NAME]
|
||||||
|
debug1('>> %s\n' % ' '.join(argv))
|
||||||
|
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||||
|
flagstr = p.stdout.read().strip()
|
||||||
|
rv = p.wait()
|
||||||
|
if rv:
|
||||||
|
raise Fatal('%r returned %d' % (argv, rv))
|
||||||
|
flags = flagstr and flagstr.split(' ') or []
|
||||||
|
return flags
|
||||||
|
|
||||||
|
|
||||||
|
def _defaults_write_kernel_flags(flags):
|
||||||
|
flagstr = ' '.join(flags)
|
||||||
|
argv = ['defaults', 'write', KERNEL_FLAGS_PATH, KERNEL_FLAGS_NAME,
|
||||||
|
flagstr]
|
||||||
|
_call(argv)
|
||||||
|
argv = ['plutil', '-convert', 'xml1', KERNEL_FLAGS_PATH + '.plist']
|
||||||
|
_call(argv)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def defaults_write_kernel_flag(name, val):
|
||||||
|
flags = _defaults_read_kernel_flags()
|
||||||
|
found = 0
|
||||||
|
for i in range(len(flags)):
|
||||||
|
if flags[i].startswith('%s=' % name):
|
||||||
|
found += 1
|
||||||
|
flags[i] = '%s=%s' % (name, val)
|
||||||
|
if not found:
|
||||||
|
flags.insert(0, '%s=%s' % (name, val))
|
||||||
|
_defaults_write_kernel_flags(flags)
|
||||||
|
|
||||||
|
|
||||||
|
def _sysctl_set(name, val):
|
||||||
|
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
|
||||||
|
debug1('>> %s\n' % ' '.join(argv))
|
||||||
|
return ssubprocess.call(argv, stdout = open('/dev/null', 'w'))
|
||||||
|
|
||||||
|
|
||||||
|
_changedctls = []
|
||||||
|
def sysctl_set(name, val, permanent=False):
|
||||||
|
PREFIX = 'net.inet.ip'
|
||||||
|
assert(name.startswith(PREFIX + '.'))
|
||||||
|
val = str(val)
|
||||||
|
if not _oldctls:
|
||||||
|
_fill_oldctls(PREFIX)
|
||||||
|
if not (name in _oldctls):
|
||||||
|
debug1('>> No such sysctl: %r\n' % name)
|
||||||
|
return NONEXIST
|
||||||
|
oldval = _oldctls[name]
|
||||||
|
if val == oldval:
|
||||||
|
return SAME
|
||||||
|
|
||||||
|
rv = _sysctl_set(name, val)
|
||||||
|
if rv != 0:
|
||||||
|
return FAILED
|
||||||
|
if permanent:
|
||||||
|
debug1('>> ...saving permanently in /etc/sysctl.conf\n')
|
||||||
|
f = open('/etc/sysctl.conf', 'a')
|
||||||
|
f.write('\n'
|
||||||
|
'# Added by sshuttle\n'
|
||||||
|
'%s=%s\n' % (name, val))
|
||||||
|
f.close()
|
||||||
|
else:
|
||||||
|
_changedctls.append(name)
|
||||||
|
return SUCCESS
|
||||||
|
|
||||||
|
|
||||||
|
def _udp_unpack(p):
|
||||||
|
src = (socket.inet_ntoa(p[12:16]), struct.unpack('!H', p[20:22])[0])
|
||||||
|
dst = (socket.inet_ntoa(p[16:20]), struct.unpack('!H', p[22:24])[0])
|
||||||
|
return src, dst
|
||||||
|
|
||||||
|
|
||||||
|
def _udp_repack(p, src, dst):
|
||||||
|
addrs = socket.inet_aton(src[0]) + socket.inet_aton(dst[0])
|
||||||
|
ports = struct.pack('!HH', src[1], dst[1])
|
||||||
|
return p[:12] + addrs + ports + p[24:]
|
||||||
|
|
||||||
|
|
||||||
|
_real_dns_server = [None]
|
||||||
|
def _handle_diversion(divertsock, dnsport):
|
||||||
|
p,tag = divertsock.recvfrom(4096)
|
||||||
|
src,dst = _udp_unpack(p)
|
||||||
|
debug3('got diverted packet from %r to %r\n' % (src, dst))
|
||||||
|
if dst[1] == 53:
|
||||||
|
# outgoing DNS
|
||||||
|
debug3('...packet is a DNS request.\n')
|
||||||
|
_real_dns_server[0] = dst
|
||||||
|
dst = ('127.0.0.1', dnsport)
|
||||||
|
elif src[1] == dnsport:
|
||||||
|
if islocal(src[0]):
|
||||||
|
debug3('...packet is a DNS response.\n')
|
||||||
|
src = _real_dns_server[0]
|
||||||
|
else:
|
||||||
|
log('weird?! unexpected divert from %r to %r\n' % (src, dst))
|
||||||
|
assert(0)
|
||||||
|
newp = _udp_repack(p, src, dst)
|
||||||
|
divertsock.sendto(newp, tag)
|
||||||
|
|
||||||
|
|
||||||
|
def ipfw(*args):
|
||||||
|
argv = ['ipfw', '-q'] + list(args)
|
||||||
|
_call(argv)
|
||||||
|
|
||||||
|
|
||||||
|
def do_ipfw(port, dnsport, subnets):
|
||||||
|
sport = str(port)
|
||||||
|
xsport = str(port+1)
|
||||||
|
|
||||||
|
# cleanup any existing rules
|
||||||
|
if ipfw_rule_exists(port):
|
||||||
|
ipfw('delete', sport)
|
||||||
|
|
||||||
|
while _changedctls:
|
||||||
|
name = _changedctls.pop()
|
||||||
|
oldval = _oldctls[name]
|
||||||
|
_sysctl_set(name, oldval)
|
||||||
|
|
||||||
|
if subnets or dnsport:
|
||||||
|
sysctl_set('net.inet.ip.fw.enable', 1)
|
||||||
|
|
||||||
|
# This seems to be needed on MacOS 10.6 and 10.7. For more
|
||||||
|
# information, see:
|
||||||
|
# http://groups.google.com/group/sshuttle/browse_thread/thread/bc32562e17987b25/6d3aa2bb30a1edab
|
||||||
|
# and
|
||||||
|
# http://serverfault.com/questions/138622/transparent-proxying-leaves-sockets-with-syn-rcvd-in-macos-x-10-6-snow-leopard
|
||||||
|
changeflag = sysctl_set('net.inet.ip.scopedroute', 0, permanent=True)
|
||||||
|
if changeflag == SUCCESS:
|
||||||
|
log("\n"
|
||||||
|
" WARNING: ONE-TIME NETWORK DISRUPTION:\n"
|
||||||
|
" =====================================\n"
|
||||||
|
"sshuttle has changed a MacOS kernel setting to work around\n"
|
||||||
|
"a bug in MacOS 10.6. This will cause your network to drop\n"
|
||||||
|
"within 5-10 minutes unless you restart your network\n"
|
||||||
|
"interface (change wireless networks or unplug/plug the\n"
|
||||||
|
"ethernet port) NOW, then restart sshuttle. The fix is\n"
|
||||||
|
"permanent; you only have to do this once.\n\n")
|
||||||
|
sys.exit(1)
|
||||||
|
elif changeflag == FAILED:
|
||||||
|
# On MacOS 10.7, the scopedroute sysctl became read-only, so
|
||||||
|
# we have to fix it using a kernel boot parameter instead,
|
||||||
|
# which requires rebooting. For more, see:
|
||||||
|
# http://groups.google.com/group/sshuttle/browse_thread/thread/a42505ca33e1de80/e5e8f3e5a92d25f7
|
||||||
|
log('Updating kernel boot flags.\n')
|
||||||
|
defaults_write_kernel_flag('net.inet.ip.scopedroute', 0)
|
||||||
|
log("\n"
|
||||||
|
" YOU MUST REBOOT TO USE SSHUTTLE\n"
|
||||||
|
" ===============================\n"
|
||||||
|
"sshuttle has changed a MacOS kernel boot-time setting\n"
|
||||||
|
"to work around a bug in MacOS 10.7 Lion. You will need\n"
|
||||||
|
"to reboot before it takes effect. You only have to\n"
|
||||||
|
"do this once.\n\n")
|
||||||
|
sys.exit(EXITCODE_NEEDS_REBOOT)
|
||||||
|
|
||||||
|
ipfw('add', sport, 'check-state', 'ip',
|
||||||
|
'from', 'any', 'to', 'any')
|
||||||
|
|
||||||
|
if subnets:
|
||||||
|
# create new subnet entries
|
||||||
|
for swidth,sexclude,snet in sorted(subnets, reverse=True):
|
||||||
|
if sexclude:
|
||||||
|
ipfw('add', sport, 'skipto', xsport,
|
||||||
|
'tcp',
|
||||||
|
'from', 'any', 'to', '%s/%s' % (snet,swidth))
|
||||||
|
else:
|
||||||
|
ipfw('add', sport, 'fwd', '127.0.0.1,%d' % port,
|
||||||
|
'tcp',
|
||||||
|
'from', 'any', 'to', '%s/%s' % (snet,swidth),
|
||||||
|
'not', 'ipttl', '42', 'keep-state', 'setup')
|
||||||
|
|
||||||
|
# This part is much crazier than it is on Linux, because MacOS (at least
|
||||||
|
# 10.6, and probably other versions, and maybe FreeBSD too) doesn't
|
||||||
|
# correctly fixup the dstip/dstport for UDP packets when it puts them
|
||||||
|
# through a 'fwd' rule. It also doesn't fixup the srcip/srcport in the
|
||||||
|
# response packet. In Linux iptables, all that happens magically for us,
|
||||||
|
# so we just redirect the packets and relax.
|
||||||
|
#
|
||||||
|
# On MacOS, we have to fix the ports ourselves. For that, we use a
|
||||||
|
# 'divert' socket, which receives raw packets and lets us mangle them.
|
||||||
|
#
|
||||||
|
# Here's how it works. Let's say the local DNS server is 1.1.1.1:53,
|
||||||
|
# and the remote DNS server is 2.2.2.2:53, and the local transproxy port
|
||||||
|
# is 10.0.0.1:12300, and a client machine is making a request from
|
||||||
|
# 10.0.0.5:9999. We see a packet like this:
|
||||||
|
# 10.0.0.5:9999 -> 1.1.1.1:53
|
||||||
|
# Since the destip:port matches one of our local nameservers, it will
|
||||||
|
# match a 'fwd' rule, thus grabbing it on the local machine. However,
|
||||||
|
# the local kernel will then see a packet addressed to *:53 and
|
||||||
|
# not know what to do with it; there's nobody listening on port 53. Thus,
|
||||||
|
# we divert it, rewriting it into this:
|
||||||
|
# 10.0.0.5:9999 -> 10.0.0.1:12300
|
||||||
|
# This gets proxied out to the server, which sends it to 2.2.2.2:53,
|
||||||
|
# and the answer comes back, and the proxy sends it back out like this:
|
||||||
|
# 10.0.0.1:12300 -> 10.0.0.5:9999
|
||||||
|
# But that's wrong! The original machine expected an answer from
|
||||||
|
# 1.1.1.1:53, so we have to divert the *answer* and rewrite it:
|
||||||
|
# 1.1.1.1:53 -> 10.0.0.5:9999
|
||||||
|
#
|
||||||
|
# See? Easy stuff.
|
||||||
|
if dnsport:
|
||||||
|
divertsock = socket.socket(socket.AF_INET, socket.SOCK_RAW,
|
||||||
|
IPPROTO_DIVERT)
|
||||||
|
divertsock.bind(('0.0.0.0', port)) # IP field is ignored
|
||||||
|
|
||||||
|
nslist = resolvconf_nameservers()
|
||||||
|
for ip in nslist:
|
||||||
|
# relabel and then catch outgoing DNS requests
|
||||||
|
ipfw('add', sport, 'divert', sport,
|
||||||
|
'udp',
|
||||||
|
'from', 'any', 'to', '%s/32' % ip, '53',
|
||||||
|
'not', 'ipttl', '42')
|
||||||
|
# relabel DNS responses
|
||||||
|
ipfw('add', sport, 'divert', sport,
|
||||||
|
'udp',
|
||||||
|
'from', 'any', str(dnsport), 'to', 'any',
|
||||||
|
'not', 'ipttl', '42')
|
||||||
|
|
||||||
|
def do_wait():
|
||||||
|
while 1:
|
||||||
|
r,w,x = select.select([sys.stdin, divertsock], [], [])
|
||||||
|
if divertsock in r:
|
||||||
|
_handle_diversion(divertsock, dnsport)
|
||||||
|
if sys.stdin in r:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
do_wait = None
|
||||||
|
|
||||||
|
return do_wait
|
||||||
|
|
||||||
|
|
||||||
|
def program_exists(name):
|
||||||
|
paths = (os.getenv('PATH') or os.defpath).split(os.pathsep)
|
||||||
|
for p in paths:
|
||||||
|
fn = '%s/%s' % (p, name)
|
||||||
|
if os.path.exists(fn):
|
||||||
|
return not os.path.isdir(fn) and os.access(fn, os.X_OK)
|
||||||
|
|
||||||
|
|
||||||
|
hostmap = {}
|
||||||
|
def rewrite_etc_hosts(port):
|
||||||
|
HOSTSFILE='/etc/hosts'
|
||||||
|
BAKFILE='%s.sbak' % HOSTSFILE
|
||||||
|
APPEND='# sshuttle-firewall-%d AUTOCREATED' % port
|
||||||
|
old_content = ''
|
||||||
|
st = None
|
||||||
|
try:
|
||||||
|
old_content = open(HOSTSFILE).read()
|
||||||
|
st = os.stat(HOSTSFILE)
|
||||||
|
except IOError, e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
if old_content.strip() and not os.path.exists(BAKFILE):
|
||||||
|
os.link(HOSTSFILE, BAKFILE)
|
||||||
|
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
|
||||||
|
f = open(tmpname, 'w')
|
||||||
|
for line in old_content.rstrip().split('\n'):
|
||||||
|
if line.find(APPEND) >= 0:
|
||||||
|
continue
|
||||||
|
f.write('%s\n' % line)
|
||||||
|
for (name,ip) in sorted(hostmap.items()):
|
||||||
|
f.write('%-30s %s\n' % ('%s %s' % (ip,name), APPEND))
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
if st:
|
||||||
|
os.chown(tmpname, st.st_uid, st.st_gid)
|
||||||
|
os.chmod(tmpname, st.st_mode)
|
||||||
|
else:
|
||||||
|
os.chown(tmpname, 0, 0)
|
||||||
|
os.chmod(tmpname, 0644)
|
||||||
|
os.rename(tmpname, HOSTSFILE)
|
||||||
|
|
||||||
|
|
||||||
|
def restore_etc_hosts(port):
|
||||||
|
global hostmap
|
||||||
|
hostmap = {}
|
||||||
|
rewrite_etc_hosts(port)
|
||||||
|
|
||||||
|
|
||||||
|
# This is some voodoo for setting up the kernel's transparent
|
||||||
|
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
|
||||||
|
# otherwise we delete it, then make them from scratch.
|
||||||
|
#
|
||||||
|
# This code is supposed to clean up after itself by deleting its rules on
|
||||||
|
# exit. In case that fails, it's not the end of the world; future runs will
|
||||||
|
# supercede it in the transproxy list, at least, so the leftover rules
|
||||||
|
# are hopefully harmless.
|
||||||
|
def main(port, dnsport, syslog):
|
||||||
|
assert(port > 0)
|
||||||
|
assert(port <= 65535)
|
||||||
|
assert(dnsport >= 0)
|
||||||
|
assert(dnsport <= 65535)
|
||||||
|
|
||||||
|
if os.getuid() != 0:
|
||||||
|
raise Fatal('you must be root (or enable su/sudo) to set the firewall')
|
||||||
|
|
||||||
|
if program_exists('ipfw'):
|
||||||
|
do_it = do_ipfw
|
||||||
|
elif program_exists('iptables'):
|
||||||
|
do_it = do_iptables
|
||||||
|
else:
|
||||||
|
raise Fatal("can't find either ipfw or iptables; check your PATH")
|
||||||
|
|
||||||
|
# because of limitations of the 'su' command, the *real* stdin/stdout
|
||||||
|
# are both attached to stdout initially. Clone stdout into stdin so we
|
||||||
|
# can read from it.
|
||||||
|
os.dup2(1, 0)
|
||||||
|
|
||||||
|
if syslog:
|
||||||
|
ssyslog.start_syslog()
|
||||||
|
ssyslog.stderr_to_syslog()
|
||||||
|
|
||||||
|
debug1('firewall manager ready.\n')
|
||||||
|
sys.stdout.write('READY\n')
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
# don't disappear if our controlling terminal or stdout/stderr
|
||||||
|
# disappears; we still have to clean up.
|
||||||
|
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||||
|
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
|
||||||
|
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
# ctrl-c shouldn't be passed along to me. When the main sshuttle dies,
|
||||||
|
# I'll die automatically.
|
||||||
|
os.setsid()
|
||||||
|
|
||||||
|
# we wait until we get some input before creating the rules. That way,
|
||||||
|
# sshuttle can launch us as early as possible (and get sudo password
|
||||||
|
# authentication as early in the startup process as possible).
|
||||||
|
line = sys.stdin.readline(128)
|
||||||
|
if not line:
|
||||||
|
return # parent died; nothing to do
|
||||||
|
|
||||||
|
subnets = []
|
||||||
|
if line != 'ROUTES\n':
|
||||||
|
raise Fatal('firewall: expected ROUTES but got %r' % line)
|
||||||
|
while 1:
|
||||||
|
line = sys.stdin.readline(128)
|
||||||
|
if not line:
|
||||||
|
raise Fatal('firewall: expected route but got %r' % line)
|
||||||
|
elif line == 'GO\n':
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
(width,exclude,ip) = line.strip().split(',', 2)
|
||||||
|
except:
|
||||||
|
raise Fatal('firewall: expected route or GO but got %r' % line)
|
||||||
|
subnets.append((int(width), bool(int(exclude)), ip))
|
||||||
|
|
||||||
|
try:
|
||||||
|
if line:
|
||||||
|
debug1('firewall manager: starting transproxy.\n')
|
||||||
|
do_wait = do_it(port, dnsport, subnets)
|
||||||
|
sys.stdout.write('STARTED\n')
|
||||||
|
|
||||||
|
try:
|
||||||
|
sys.stdout.flush()
|
||||||
|
except IOError:
|
||||||
|
# the parent process died for some reason; he's surely been loud
|
||||||
|
# enough, so no reason to report another error
|
||||||
|
return
|
||||||
|
|
||||||
|
# Now we wait until EOF or any other kind of exception. We need
|
||||||
|
# to stay running so that we don't need a *second* password
|
||||||
|
# authentication at shutdown time - that cleanup is important!
|
||||||
|
while 1:
|
||||||
|
if do_wait: do_wait()
|
||||||
|
line = sys.stdin.readline(128)
|
||||||
|
if line.startswith('HOST '):
|
||||||
|
(name,ip) = line[5:].strip().split(',', 1)
|
||||||
|
hostmap[name] = ip
|
||||||
|
rewrite_etc_hosts(port)
|
||||||
|
elif line:
|
||||||
|
raise Fatal('expected EOF, got %r' % line)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
debug1('firewall manager: undoing changes.\n')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
do_it(port, 0, [])
|
||||||
|
restore_etc_hosts(port)
|
133
flake.lock
generated
133
flake.lock
generated
@ -1,133 +0,0 @@
|
|||||||
{
|
|
||||||
"nodes": {
|
|
||||||
"flake-utils": {
|
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1731533236,
|
|
||||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1740743217,
|
|
||||||
"narHash": "sha256-brsCRzLqimpyhORma84c3W2xPbIidZlIc3JGIuQVSNI=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "b27ba4eb322d9d2bf2dc9ada9fd59442f50c8d7c",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixos-24.11",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pyproject-build-systems": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"pyproject-nix": [
|
|
||||||
"pyproject-nix"
|
|
||||||
],
|
|
||||||
"uv2nix": [
|
|
||||||
"uv2nix"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1740362541,
|
|
||||||
"narHash": "sha256-S8Mno07MspggOv/xIz5g8hB2b/C5HPiX8E+rXzKY+5U=",
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "build-system-pkgs",
|
|
||||||
"rev": "e151741c848ba92331af91f4e47640a1fb82be19",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "build-system-pkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pyproject-nix": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1739758351,
|
|
||||||
"narHash": "sha256-Aoa4dEoC7Hf6+gFVk/SDquZTMFlmlfsgdTWuqQxzePs=",
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "pyproject.nix",
|
|
||||||
"rev": "1329712f7f9af3a8b270764ba338a455b7323811",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "pyproject.nix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs",
|
|
||||||
"pyproject-build-systems": "pyproject-build-systems",
|
|
||||||
"pyproject-nix": "pyproject-nix",
|
|
||||||
"uv2nix": "uv2nix"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"systems": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"uv2nix": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
],
|
|
||||||
"pyproject-nix": [
|
|
||||||
"pyproject-nix"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1740497536,
|
|
||||||
"narHash": "sha256-K+8wsVooqhaqyxuvew3+62mgOfRLJ7whv7woqPU3Ypo=",
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "uv2nix",
|
|
||||||
"rev": "d01fd3a141755ad5d5b93dd9fcbd76d6401f5bac",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "pyproject-nix",
|
|
||||||
"repo": "uv2nix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"root": "root",
|
|
||||||
"version": 7
|
|
||||||
}
|
|
117
flake.nix
117
flake.nix
@ -1,117 +0,0 @@
|
|||||||
{
|
|
||||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling.";
|
|
||||||
|
|
||||||
inputs = {
|
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
|
||||||
pyproject-nix = {
|
|
||||||
url = "github:pyproject-nix/pyproject.nix";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
uv2nix = {
|
|
||||||
url = "github:pyproject-nix/uv2nix";
|
|
||||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
pyproject-build-systems = {
|
|
||||||
url = "github:pyproject-nix/build-system-pkgs";
|
|
||||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
|
||||||
inputs.uv2nix.follows = "uv2nix";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
outputs =
|
|
||||||
{
|
|
||||||
self,
|
|
||||||
nixpkgs,
|
|
||||||
flake-utils,
|
|
||||||
pyproject-nix,
|
|
||||||
uv2nix,
|
|
||||||
pyproject-build-systems,
|
|
||||||
}:
|
|
||||||
flake-utils.lib.eachDefaultSystem (
|
|
||||||
system:
|
|
||||||
let
|
|
||||||
inherit (nixpkgs) lib;
|
|
||||||
|
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
|
||||||
|
|
||||||
python = pkgs.python312;
|
|
||||||
|
|
||||||
workspace = uv2nix.lib.workspace.loadWorkspace { workspaceRoot = ./.; };
|
|
||||||
|
|
||||||
# Create package overlay from workspace.
|
|
||||||
overlay = workspace.mkPyprojectOverlay {
|
|
||||||
sourcePreference = "sdist";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Extend generated overlay with build fixups
|
|
||||||
#
|
|
||||||
# Uv2nix can only work with what it has, and uv.lock is missing essential metadata to perform some builds.
|
|
||||||
# This is an additional overlay implementing build fixups.
|
|
||||||
# See:
|
|
||||||
# - https://pyproject-nix.github.io/uv2nix/FAQ.html
|
|
||||||
pyprojectOverrides =
|
|
||||||
final: prev:
|
|
||||||
# Implement build fixups here.
|
|
||||||
# Note that uv2nix is _not_ using Nixpkgs buildPythonPackage.
|
|
||||||
# It's using https://pyproject-nix.github.io/pyproject.nix/build.html
|
|
||||||
let
|
|
||||||
inherit (final) resolveBuildSystem;
|
|
||||||
inherit (builtins) mapAttrs;
|
|
||||||
|
|
||||||
# Build system dependencies specified in the shape expected by resolveBuildSystem
|
|
||||||
# The empty lists below are lists of optional dependencies.
|
|
||||||
#
|
|
||||||
# A package `foo` with specification written as:
|
|
||||||
# `setuptools-scm[toml]` in pyproject.toml would be written as
|
|
||||||
# `foo.setuptools-scm = [ "toml" ]` in Nix
|
|
||||||
buildSystemOverrides = {
|
|
||||||
chardet.setuptools = [ ];
|
|
||||||
colorlog.setuptools = [ ];
|
|
||||||
python-debian.setuptools = [ ];
|
|
||||||
pluggy.setuptools = [ ];
|
|
||||||
pathspec.flit-core = [ ];
|
|
||||||
packaging.flit-core = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
in
|
|
||||||
mapAttrs (
|
|
||||||
name: spec:
|
|
||||||
prev.${name}.overrideAttrs (old: {
|
|
||||||
nativeBuildInputs = old.nativeBuildInputs ++ resolveBuildSystem spec;
|
|
||||||
})
|
|
||||||
) buildSystemOverrides;
|
|
||||||
|
|
||||||
pythonSet =
|
|
||||||
(pkgs.callPackage pyproject-nix.build.packages {
|
|
||||||
inherit python;
|
|
||||||
}).overrideScope
|
|
||||||
(
|
|
||||||
lib.composeManyExtensions [
|
|
||||||
pyproject-build-systems.overlays.default
|
|
||||||
overlay
|
|
||||||
pyprojectOverrides
|
|
||||||
]
|
|
||||||
);
|
|
||||||
|
|
||||||
inherit (pkgs.callPackages pyproject-nix.build.util { }) mkApplication;
|
|
||||||
package = mkApplication {
|
|
||||||
venv = pythonSet.mkVirtualEnv "sshuttle" workspace.deps.default;
|
|
||||||
package = pythonSet.sshuttle;
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
packages = {
|
|
||||||
sshuttle = package;
|
|
||||||
default = package;
|
|
||||||
};
|
|
||||||
devShells.default = pkgs.mkShell {
|
|
||||||
packages = [
|
|
||||||
pkgs.uv
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
80
helpers.py
Normal file
80
helpers.py
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
import sys, os, socket, errno
|
||||||
|
|
||||||
|
logprefix = ''
|
||||||
|
verbose = 0
|
||||||
|
|
||||||
|
def log(s):
|
||||||
|
try:
|
||||||
|
sys.stdout.flush()
|
||||||
|
sys.stderr.write(logprefix + s)
|
||||||
|
sys.stderr.flush()
|
||||||
|
except IOError:
|
||||||
|
# this could happen if stderr gets forcibly disconnected, eg. because
|
||||||
|
# our tty closes. That sucks, but it's no reason to abort the program.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def debug1(s):
|
||||||
|
if verbose >= 1:
|
||||||
|
log(s)
|
||||||
|
|
||||||
|
def debug2(s):
|
||||||
|
if verbose >= 2:
|
||||||
|
log(s)
|
||||||
|
|
||||||
|
def debug3(s):
|
||||||
|
if verbose >= 3:
|
||||||
|
log(s)
|
||||||
|
|
||||||
|
|
||||||
|
class Fatal(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
EXITCODE_NEEDS_REBOOT = 111
|
||||||
|
class FatalNeedsReboot(Fatal):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def list_contains_any(l, sub):
|
||||||
|
for i in sub:
|
||||||
|
if i in l:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def resolvconf_nameservers():
|
||||||
|
l = []
|
||||||
|
for line in open('/etc/resolv.conf'):
|
||||||
|
words = line.lower().split()
|
||||||
|
if len(words) >= 2 and words[0] == 'nameserver':
|
||||||
|
l.append(words[1])
|
||||||
|
return l
|
||||||
|
|
||||||
|
|
||||||
|
def resolvconf_random_nameserver():
|
||||||
|
l = resolvconf_nameservers()
|
||||||
|
if l:
|
||||||
|
if len(l) > 1:
|
||||||
|
# don't import this unless we really need it
|
||||||
|
import random
|
||||||
|
random.shuffle(l)
|
||||||
|
return l[0]
|
||||||
|
else:
|
||||||
|
return '127.0.0.1'
|
||||||
|
|
||||||
|
|
||||||
|
def islocal(ip):
|
||||||
|
sock = socket.socket()
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
sock.bind((ip, 0))
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] == errno.EADDRNOTAVAIL:
|
||||||
|
return False # not a local IP
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
sock.close()
|
||||||
|
return True # it's a local IP, or there would have been an error
|
||||||
|
|
||||||
|
|
281
hostwatch.py
Normal file
281
hostwatch.py
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
import time, socket, re, select, errno
|
||||||
|
if not globals().get('skip_imports'):
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
import helpers
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
POLL_TIME = 60*15
|
||||||
|
NETSTAT_POLL_TIME = 30
|
||||||
|
CACHEFILE=os.path.expanduser('~/.sshuttle.hosts')
|
||||||
|
|
||||||
|
|
||||||
|
_nmb_ok = True
|
||||||
|
_smb_ok = True
|
||||||
|
hostnames = {}
|
||||||
|
queue = {}
|
||||||
|
try:
|
||||||
|
null = open('/dev/null', 'wb')
|
||||||
|
except IOError, e:
|
||||||
|
log('warning: %s\n' % e)
|
||||||
|
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_ip(s):
|
||||||
|
return re.match(r'\d+\.\d+\.\d+\.\d+$', s)
|
||||||
|
|
||||||
|
|
||||||
|
def write_host_cache():
|
||||||
|
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
|
||||||
|
try:
|
||||||
|
f = open(tmpname, 'wb')
|
||||||
|
for name,ip in sorted(hostnames.items()):
|
||||||
|
f.write('%s,%s\n' % (name, ip))
|
||||||
|
f.close()
|
||||||
|
os.rename(tmpname, CACHEFILE)
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
os.unlink(tmpname)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def read_host_cache():
|
||||||
|
try:
|
||||||
|
f = open(CACHEFILE)
|
||||||
|
except IOError, e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
for line in f:
|
||||||
|
words = line.strip().split(',')
|
||||||
|
if len(words) == 2:
|
||||||
|
(name,ip) = words
|
||||||
|
name = re.sub(r'[^-\w]', '-', name).strip()
|
||||||
|
ip = re.sub(r'[^0-9.]', '', ip).strip()
|
||||||
|
if name and ip:
|
||||||
|
found_host(name, ip)
|
||||||
|
|
||||||
|
|
||||||
|
def found_host(hostname, ip):
|
||||||
|
hostname = re.sub(r'\..*', '', hostname)
|
||||||
|
hostname = re.sub(r'[^-\w]', '_', hostname)
|
||||||
|
if (ip.startswith('127.') or ip.startswith('255.')
|
||||||
|
or hostname == 'localhost'):
|
||||||
|
return
|
||||||
|
oldip = hostnames.get(hostname)
|
||||||
|
if oldip != ip:
|
||||||
|
hostnames[hostname] = ip
|
||||||
|
debug1('Found: %s: %s\n' % (hostname, ip))
|
||||||
|
sys.stdout.write('%s,%s\n' % (hostname, ip))
|
||||||
|
write_host_cache()
|
||||||
|
|
||||||
|
|
||||||
|
def _check_etc_hosts():
|
||||||
|
debug2(' > hosts\n')
|
||||||
|
for line in open('/etc/hosts'):
|
||||||
|
line = re.sub(r'#.*', '', line)
|
||||||
|
words = line.strip().split()
|
||||||
|
if not words:
|
||||||
|
continue
|
||||||
|
ip = words[0]
|
||||||
|
names = words[1:]
|
||||||
|
if _is_ip(ip):
|
||||||
|
debug3('< %s %r\n' % (ip, names))
|
||||||
|
for n in names:
|
||||||
|
check_host(n)
|
||||||
|
found_host(n, ip)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_revdns(ip):
|
||||||
|
debug2(' > rev: %s\n' % ip)
|
||||||
|
try:
|
||||||
|
r = socket.gethostbyaddr(ip)
|
||||||
|
debug3('< %s\n' % r[0])
|
||||||
|
check_host(r[0])
|
||||||
|
found_host(r[0], ip)
|
||||||
|
except socket.herror, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _check_dns(hostname):
|
||||||
|
debug2(' > dns: %s\n' % hostname)
|
||||||
|
try:
|
||||||
|
ip = socket.gethostbyname(hostname)
|
||||||
|
debug3('< %s\n' % ip)
|
||||||
|
check_host(ip)
|
||||||
|
found_host(hostname, ip)
|
||||||
|
except socket.gaierror, e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _check_netstat():
|
||||||
|
debug2(' > netstat\n')
|
||||||
|
argv = ['netstat', '-n']
|
||||||
|
try:
|
||||||
|
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||||
|
content = p.stdout.read()
|
||||||
|
p.wait()
|
||||||
|
except OSError, e:
|
||||||
|
log('%r failed: %r\n' % (argv, e))
|
||||||
|
return
|
||||||
|
|
||||||
|
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
|
||||||
|
debug3('< %s\n' % ip)
|
||||||
|
check_host(ip)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_smb(hostname):
|
||||||
|
return
|
||||||
|
global _smb_ok
|
||||||
|
if not _smb_ok:
|
||||||
|
return
|
||||||
|
argv = ['smbclient', '-U', '%', '-L', hostname]
|
||||||
|
debug2(' > smb: %s\n' % hostname)
|
||||||
|
try:
|
||||||
|
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||||
|
lines = p.stdout.readlines()
|
||||||
|
p.wait()
|
||||||
|
except OSError, e:
|
||||||
|
log('%r failed: %r\n' % (argv, e))
|
||||||
|
_smb_ok = False
|
||||||
|
return
|
||||||
|
|
||||||
|
lines.reverse()
|
||||||
|
|
||||||
|
# junk at top
|
||||||
|
while lines:
|
||||||
|
line = lines.pop().strip()
|
||||||
|
if re.match(r'Server\s+', line):
|
||||||
|
break
|
||||||
|
|
||||||
|
# server list section:
|
||||||
|
# Server Comment
|
||||||
|
# ------ -------
|
||||||
|
while lines:
|
||||||
|
line = lines.pop().strip()
|
||||||
|
if not line or re.match(r'-+\s+-+', line):
|
||||||
|
continue
|
||||||
|
if re.match(r'Workgroup\s+Master', line):
|
||||||
|
break
|
||||||
|
words = line.split()
|
||||||
|
hostname = words[0].lower()
|
||||||
|
debug3('< %s\n' % hostname)
|
||||||
|
check_host(hostname)
|
||||||
|
|
||||||
|
# workgroup list section:
|
||||||
|
# Workgroup Master
|
||||||
|
# --------- ------
|
||||||
|
while lines:
|
||||||
|
line = lines.pop().strip()
|
||||||
|
if re.match(r'-+\s+', line):
|
||||||
|
continue
|
||||||
|
if not line:
|
||||||
|
break
|
||||||
|
words = line.split()
|
||||||
|
(workgroup, hostname) = (words[0].lower(), words[1].lower())
|
||||||
|
debug3('< group(%s) -> %s\n' % (workgroup, hostname))
|
||||||
|
check_host(hostname)
|
||||||
|
check_workgroup(workgroup)
|
||||||
|
|
||||||
|
if lines:
|
||||||
|
assert(0)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_nmb(hostname, is_workgroup, is_master):
|
||||||
|
return
|
||||||
|
global _nmb_ok
|
||||||
|
if not _nmb_ok:
|
||||||
|
return
|
||||||
|
argv = ['nmblookup'] + ['-M']*is_master + ['--', hostname]
|
||||||
|
debug2(' > n%d%d: %s\n' % (is_workgroup, is_master, hostname))
|
||||||
|
try:
|
||||||
|
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||||
|
lines = p.stdout.readlines()
|
||||||
|
rv = p.wait()
|
||||||
|
except OSError, e:
|
||||||
|
log('%r failed: %r\n' % (argv, e))
|
||||||
|
_nmb_ok = False
|
||||||
|
return
|
||||||
|
if rv:
|
||||||
|
log('%r returned %d\n' % (argv, rv))
|
||||||
|
return
|
||||||
|
for line in lines:
|
||||||
|
m = re.match(r'(\d+\.\d+\.\d+\.\d+) (\w+)<\w\w>\n', line)
|
||||||
|
if m:
|
||||||
|
g = m.groups()
|
||||||
|
(ip, name) = (g[0], g[1].lower())
|
||||||
|
debug3('< %s -> %s\n' % (name, ip))
|
||||||
|
if is_workgroup:
|
||||||
|
_enqueue(_check_smb, ip)
|
||||||
|
else:
|
||||||
|
found_host(name, ip)
|
||||||
|
check_host(name)
|
||||||
|
|
||||||
|
|
||||||
|
def check_host(hostname):
|
||||||
|
if _is_ip(hostname):
|
||||||
|
_enqueue(_check_revdns, hostname)
|
||||||
|
else:
|
||||||
|
_enqueue(_check_dns, hostname)
|
||||||
|
_enqueue(_check_smb, hostname)
|
||||||
|
_enqueue(_check_nmb, hostname, False, False)
|
||||||
|
|
||||||
|
|
||||||
|
def check_workgroup(hostname):
|
||||||
|
_enqueue(_check_nmb, hostname, True, False)
|
||||||
|
_enqueue(_check_nmb, hostname, True, True)
|
||||||
|
|
||||||
|
|
||||||
|
def _enqueue(op, *args):
|
||||||
|
t = (op,args)
|
||||||
|
if queue.get(t) == None:
|
||||||
|
queue[t] = 0
|
||||||
|
|
||||||
|
|
||||||
|
def _stdin_still_ok(timeout):
|
||||||
|
r,w,x = select.select([sys.stdin.fileno()], [], [], timeout)
|
||||||
|
if r:
|
||||||
|
b = os.read(sys.stdin.fileno(), 4096)
|
||||||
|
if not b:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def hw_main(seed_hosts):
|
||||||
|
if helpers.verbose >= 2:
|
||||||
|
helpers.logprefix = 'HH: '
|
||||||
|
else:
|
||||||
|
helpers.logprefix = 'hostwatch: '
|
||||||
|
|
||||||
|
read_host_cache()
|
||||||
|
|
||||||
|
_enqueue(_check_etc_hosts)
|
||||||
|
_enqueue(_check_netstat)
|
||||||
|
check_host('localhost')
|
||||||
|
check_host(socket.gethostname())
|
||||||
|
check_workgroup('workgroup')
|
||||||
|
check_workgroup('-')
|
||||||
|
for h in seed_hosts:
|
||||||
|
check_host(h)
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
now = time.time()
|
||||||
|
for t,last_polled in queue.items():
|
||||||
|
(op,args) = t
|
||||||
|
if not _stdin_still_ok(0):
|
||||||
|
break
|
||||||
|
maxtime = POLL_TIME
|
||||||
|
if op == _check_netstat:
|
||||||
|
maxtime = NETSTAT_POLL_TIME
|
||||||
|
if now - last_polled > maxtime:
|
||||||
|
queue[t] = time.time()
|
||||||
|
op(*args)
|
||||||
|
try:
|
||||||
|
sys.stdout.flush()
|
||||||
|
except IOError:
|
||||||
|
break
|
||||||
|
|
||||||
|
# FIXME: use a smarter timeout based on oldest last_polled
|
||||||
|
if not _stdin_still_ok(1):
|
||||||
|
break
|
141
main.py
Executable file
141
main.py
Executable file
@ -0,0 +1,141 @@
|
|||||||
|
import sys, os, re
|
||||||
|
import helpers, options, client, server, firewall, hostwatch
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
|
||||||
|
# list of:
|
||||||
|
# 1.2.3.4/5 or just 1.2.3.4
|
||||||
|
def parse_subnets(subnets_str):
|
||||||
|
subnets = []
|
||||||
|
for s in subnets_str:
|
||||||
|
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
|
||||||
|
if not m:
|
||||||
|
raise Fatal('%r is not a valid IP subnet format' % s)
|
||||||
|
(a,b,c,d,width) = m.groups()
|
||||||
|
(a,b,c,d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
|
||||||
|
if width == None:
|
||||||
|
width = 32
|
||||||
|
else:
|
||||||
|
width = int(width)
|
||||||
|
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||||
|
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||||
|
if width > 32:
|
||||||
|
raise Fatal('*/%d is greater than the maximum of 32' % width)
|
||||||
|
subnets.append(('%d.%d.%d.%d' % (a,b,c,d), width))
|
||||||
|
return subnets
|
||||||
|
|
||||||
|
|
||||||
|
# 1.2.3.4:567 or just 1.2.3.4 or just 567
|
||||||
|
def parse_ipport(s):
|
||||||
|
s = str(s)
|
||||||
|
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
|
||||||
|
if not m:
|
||||||
|
raise Fatal('%r is not a valid IP:port format' % s)
|
||||||
|
(a,b,c,d,port) = m.groups()
|
||||||
|
(a,b,c,d,port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
|
||||||
|
int(port or 0))
|
||||||
|
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||||
|
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||||
|
if port > 65535:
|
||||||
|
raise Fatal('*:%d is greater than the maximum of 65535' % port)
|
||||||
|
if a == None:
|
||||||
|
a = b = c = d = 0
|
||||||
|
return ('%d.%d.%d.%d' % (a,b,c,d), port)
|
||||||
|
|
||||||
|
|
||||||
|
optspec = """
|
||||||
|
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
|
||||||
|
sshuttle --server
|
||||||
|
sshuttle --firewall <port> <subnets...>
|
||||||
|
sshuttle --hostwatch
|
||||||
|
--
|
||||||
|
l,listen= transproxy to this ip address and port number [127.0.0.1:0]
|
||||||
|
H,auto-hosts scan for remote hostnames and update local /etc/hosts
|
||||||
|
N,auto-nets automatically determine subnets to route
|
||||||
|
dns capture local DNS requests and forward to the remote DNS server
|
||||||
|
python= path to python interpreter on the remote server
|
||||||
|
r,remote= ssh hostname (and optional username) of remote sshuttle server
|
||||||
|
x,exclude= exclude this subnet (can be used more than once)
|
||||||
|
exclude-from= exclude the subnets in a file (whitespace separated)
|
||||||
|
v,verbose increase debug message verbosity
|
||||||
|
e,ssh-cmd= the command to use to connect to the remote [ssh]
|
||||||
|
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
|
||||||
|
no-latency-control sacrifice latency to improve bandwidth benchmarks
|
||||||
|
wrap= restart counting channel numbers after this number (for testing)
|
||||||
|
D,daemon run in the background as a daemon
|
||||||
|
V,version print sshuttle's version number
|
||||||
|
syslog send log messages to syslog (default if you use --daemon)
|
||||||
|
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
|
||||||
|
server (internal use only)
|
||||||
|
firewall (internal use only)
|
||||||
|
hostwatch (internal use only)
|
||||||
|
"""
|
||||||
|
o = options.Options(optspec)
|
||||||
|
(opt, flags, extra) = o.parse(sys.argv[2:])
|
||||||
|
|
||||||
|
if opt.version:
|
||||||
|
import version
|
||||||
|
print version.TAG
|
||||||
|
sys.exit(0)
|
||||||
|
if opt.daemon:
|
||||||
|
opt.syslog = 1
|
||||||
|
if opt.wrap:
|
||||||
|
import ssnet
|
||||||
|
ssnet.MAX_CHANNEL = int(opt.wrap)
|
||||||
|
helpers.verbose = opt.verbose
|
||||||
|
|
||||||
|
try:
|
||||||
|
if opt.server:
|
||||||
|
if len(extra) != 0:
|
||||||
|
o.fatal('no arguments expected')
|
||||||
|
server.latency_control = opt.latency_control
|
||||||
|
sys.exit(server.main())
|
||||||
|
elif opt.firewall:
|
||||||
|
if len(extra) != 2:
|
||||||
|
o.fatal('exactly two arguments expected')
|
||||||
|
sys.exit(firewall.main(int(extra[0]), int(extra[1]), opt.syslog))
|
||||||
|
elif opt.hostwatch:
|
||||||
|
sys.exit(hostwatch.hw_main(extra))
|
||||||
|
else:
|
||||||
|
if len(extra) < 1 and not opt.auto_nets:
|
||||||
|
o.fatal('at least one subnet (or -N) expected')
|
||||||
|
includes = extra
|
||||||
|
excludes = ['127.0.0.0/8']
|
||||||
|
for k,v in flags:
|
||||||
|
if k in ('-x','--exclude'):
|
||||||
|
excludes.append(v)
|
||||||
|
if k in ('-X', '--exclude-from'):
|
||||||
|
excludes += open(v).read().split()
|
||||||
|
remotename = opt.remote
|
||||||
|
if remotename == '' or remotename == '-':
|
||||||
|
remotename = None
|
||||||
|
if opt.seed_hosts and not opt.auto_hosts:
|
||||||
|
o.fatal('--seed-hosts only works if you also use -H')
|
||||||
|
if opt.seed_hosts:
|
||||||
|
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
||||||
|
elif opt.auto_hosts:
|
||||||
|
sh = []
|
||||||
|
else:
|
||||||
|
sh = None
|
||||||
|
sys.exit(client.main(parse_ipport(opt.listen or '0.0.0.0:0'),
|
||||||
|
opt.ssh_cmd,
|
||||||
|
remotename,
|
||||||
|
opt.python,
|
||||||
|
opt.latency_control,
|
||||||
|
opt.dns,
|
||||||
|
sh,
|
||||||
|
opt.auto_nets,
|
||||||
|
parse_subnets(includes),
|
||||||
|
parse_subnets(excludes),
|
||||||
|
opt.syslog, opt.daemon, opt.pidfile))
|
||||||
|
except FatalNeedsReboot, e:
|
||||||
|
log('You must reboot before using sshuttle.\n')
|
||||||
|
sys.exit(EXITCODE_NEEDS_REBOOT)
|
||||||
|
except Fatal, e:
|
||||||
|
log('fatal: %s\n' % e)
|
||||||
|
sys.exit(99)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
log('\n')
|
||||||
|
log('Keyboard interrupt: exiting.\n')
|
||||||
|
sys.exit(1)
|
200
options.py
Normal file
200
options.py
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
"""Command-line options parser.
|
||||||
|
With the help of an options spec string, easily parse command-line options.
|
||||||
|
"""
|
||||||
|
import sys, os, textwrap, getopt, re, struct
|
||||||
|
|
||||||
|
class OptDict:
|
||||||
|
def __init__(self):
|
||||||
|
self._opts = {}
|
||||||
|
|
||||||
|
def __setitem__(self, k, v):
|
||||||
|
if k.startswith('no-') or k.startswith('no_'):
|
||||||
|
k = k[3:]
|
||||||
|
v = not v
|
||||||
|
self._opts[k] = v
|
||||||
|
|
||||||
|
def __getitem__(self, k):
|
||||||
|
if k.startswith('no-') or k.startswith('no_'):
|
||||||
|
return not self._opts[k[3:]]
|
||||||
|
return self._opts[k]
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
return self[k]
|
||||||
|
|
||||||
|
|
||||||
|
def _default_onabort(msg):
|
||||||
|
sys.exit(97)
|
||||||
|
|
||||||
|
|
||||||
|
def _intify(v):
|
||||||
|
try:
|
||||||
|
vv = int(v or '')
|
||||||
|
if str(vv) == v:
|
||||||
|
return vv
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
def _atoi(v):
|
||||||
|
try:
|
||||||
|
return int(v or 0)
|
||||||
|
except ValueError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_negative_kv(k, v):
|
||||||
|
if k.startswith('no-') or k.startswith('no_'):
|
||||||
|
return k[3:], not v
|
||||||
|
return k,v
|
||||||
|
|
||||||
|
def _remove_negative_k(k):
|
||||||
|
return _remove_negative_kv(k, None)[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _tty_width():
|
||||||
|
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||||
|
try:
|
||||||
|
import fcntl, termios
|
||||||
|
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
|
||||||
|
except (IOError, ImportError):
|
||||||
|
return _atoi(os.environ.get('WIDTH')) or 70
|
||||||
|
(ysize,xsize,ypix,xpix) = struct.unpack('HHHH', s)
|
||||||
|
return xsize or 70
|
||||||
|
|
||||||
|
|
||||||
|
class Options:
|
||||||
|
"""Option parser.
|
||||||
|
When constructed, two strings are mandatory. The first one is the command
|
||||||
|
name showed before error messages. The second one is a string called an
|
||||||
|
optspec that specifies the synopsis and option flags and their description.
|
||||||
|
For more information about optspecs, consult the bup-options(1) man page.
|
||||||
|
|
||||||
|
Two optional arguments specify an alternative parsing function and an
|
||||||
|
alternative behaviour on abort (after having output the usage string).
|
||||||
|
|
||||||
|
By default, the parser function is getopt.gnu_getopt, and the abort
|
||||||
|
behaviour is to exit the program.
|
||||||
|
"""
|
||||||
|
def __init__(self, optspec, optfunc=getopt.gnu_getopt,
|
||||||
|
onabort=_default_onabort):
|
||||||
|
self.optspec = optspec
|
||||||
|
self._onabort = onabort
|
||||||
|
self.optfunc = optfunc
|
||||||
|
self._aliases = {}
|
||||||
|
self._shortopts = 'h?'
|
||||||
|
self._longopts = ['help']
|
||||||
|
self._hasparms = {}
|
||||||
|
self._defaults = {}
|
||||||
|
self._usagestr = self._gen_usage()
|
||||||
|
|
||||||
|
def _gen_usage(self):
|
||||||
|
out = []
|
||||||
|
lines = self.optspec.strip().split('\n')
|
||||||
|
lines.reverse()
|
||||||
|
first_syn = True
|
||||||
|
while lines:
|
||||||
|
l = lines.pop()
|
||||||
|
if l == '--': break
|
||||||
|
out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l))
|
||||||
|
first_syn = False
|
||||||
|
out.append('\n')
|
||||||
|
last_was_option = False
|
||||||
|
while lines:
|
||||||
|
l = lines.pop()
|
||||||
|
if l.startswith(' '):
|
||||||
|
out.append('%s%s\n' % (last_was_option and '\n' or '',
|
||||||
|
l.lstrip()))
|
||||||
|
last_was_option = False
|
||||||
|
elif l:
|
||||||
|
(flags, extra) = l.split(' ', 1)
|
||||||
|
extra = extra.strip()
|
||||||
|
if flags.endswith('='):
|
||||||
|
flags = flags[:-1]
|
||||||
|
has_parm = 1
|
||||||
|
else:
|
||||||
|
has_parm = 0
|
||||||
|
g = re.search(r'\[([^\]]*)\]$', extra)
|
||||||
|
if g:
|
||||||
|
defval = g.group(1)
|
||||||
|
else:
|
||||||
|
defval = None
|
||||||
|
flagl = flags.split(',')
|
||||||
|
flagl_nice = []
|
||||||
|
for _f in flagl:
|
||||||
|
f,dvi = _remove_negative_kv(_f, _intify(defval))
|
||||||
|
self._aliases[f] = _remove_negative_k(flagl[0])
|
||||||
|
self._hasparms[f] = has_parm
|
||||||
|
self._defaults[f] = dvi
|
||||||
|
if len(f) == 1:
|
||||||
|
self._shortopts += f + (has_parm and ':' or '')
|
||||||
|
flagl_nice.append('-' + f)
|
||||||
|
else:
|
||||||
|
f_nice = re.sub(r'\W', '_', f)
|
||||||
|
self._aliases[f_nice] = _remove_negative_k(flagl[0])
|
||||||
|
self._longopts.append(f + (has_parm and '=' or ''))
|
||||||
|
self._longopts.append('no-' + f)
|
||||||
|
flagl_nice.append('--' + _f)
|
||||||
|
flags_nice = ', '.join(flagl_nice)
|
||||||
|
if has_parm:
|
||||||
|
flags_nice += ' ...'
|
||||||
|
prefix = ' %-20s ' % flags_nice
|
||||||
|
argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(),
|
||||||
|
initial_indent=prefix,
|
||||||
|
subsequent_indent=' '*28))
|
||||||
|
out.append(argtext + '\n')
|
||||||
|
last_was_option = True
|
||||||
|
else:
|
||||||
|
out.append('\n')
|
||||||
|
last_was_option = False
|
||||||
|
return ''.join(out).rstrip() + '\n'
|
||||||
|
|
||||||
|
def usage(self, msg=""):
|
||||||
|
"""Print usage string to stderr and abort."""
|
||||||
|
sys.stderr.write(self._usagestr)
|
||||||
|
e = self._onabort and self._onabort(msg) or None
|
||||||
|
if e:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def fatal(self, s):
|
||||||
|
"""Print an error message to stderr and abort with usage string."""
|
||||||
|
msg = 'error: %s\n' % s
|
||||||
|
sys.stderr.write(msg)
|
||||||
|
return self.usage(msg)
|
||||||
|
|
||||||
|
def parse(self, args):
|
||||||
|
"""Parse a list of arguments and return (options, flags, extra).
|
||||||
|
|
||||||
|
In the returned tuple, "options" is an OptDict with known options,
|
||||||
|
"flags" is a list of option flags that were used on the command-line,
|
||||||
|
and "extra" is a list of positional arguments.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
(flags,extra) = self.optfunc(args, self._shortopts, self._longopts)
|
||||||
|
except getopt.GetoptError, e:
|
||||||
|
self.fatal(e)
|
||||||
|
|
||||||
|
opt = OptDict()
|
||||||
|
|
||||||
|
for k,v in self._defaults.iteritems():
|
||||||
|
k = self._aliases[k]
|
||||||
|
opt[k] = v
|
||||||
|
|
||||||
|
for (k,v) in flags:
|
||||||
|
k = k.lstrip('-')
|
||||||
|
if k in ('h', '?', 'help'):
|
||||||
|
self.usage()
|
||||||
|
if k.startswith('no-'):
|
||||||
|
k = self._aliases[k[3:]]
|
||||||
|
v = 0
|
||||||
|
else:
|
||||||
|
k = self._aliases[k]
|
||||||
|
if not self._hasparms[k]:
|
||||||
|
assert(v == '')
|
||||||
|
v = (opt._opts.get(k) or 0) + 1
|
||||||
|
else:
|
||||||
|
v = _intify(v)
|
||||||
|
opt[k] = v
|
||||||
|
for (f1,f2) in self._aliases.iteritems():
|
||||||
|
opt[f1] = opt._opts.get(f2)
|
||||||
|
return (opt,flags,extra)
|
@ -1,57 +0,0 @@
|
|||||||
[project]
|
|
||||||
authors = [
|
|
||||||
{name = "Brian May", email = "brian@linuxpenguins.xyz"},
|
|
||||||
]
|
|
||||||
license = {text = "LGPL-2.1"}
|
|
||||||
requires-python = "<4.0,>=3.9"
|
|
||||||
dependencies = []
|
|
||||||
name = "sshuttle"
|
|
||||||
version = "1.3.1"
|
|
||||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling."
|
|
||||||
readme = "README.rst"
|
|
||||||
classifiers = [
|
|
||||||
"Development Status :: 5 - Production/Stable",
|
|
||||||
"Intended Audience :: Developers",
|
|
||||||
"Intended Audience :: End Users/Desktop",
|
|
||||||
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
|
||||||
"Programming Language :: Python :: 3.10",
|
|
||||||
"Programming Language :: Python :: 3.11",
|
|
||||||
"Programming Language :: Python :: 3.12",
|
|
||||||
"Topic :: System :: Networking",
|
|
||||||
]
|
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
sshuttle = "sshuttle.cmdline:main"
|
|
||||||
|
|
||||||
[dependency-groups]
|
|
||||||
dev = [
|
|
||||||
"pytest<9.0.0,>=8.0.1",
|
|
||||||
"pytest-cov<7.0,>=4.1",
|
|
||||||
"flake8<8.0.0,>=7.0.0",
|
|
||||||
"pyflakes<4.0.0,>=3.2.0",
|
|
||||||
"bump2version<2.0.0,>=1.0.1",
|
|
||||||
"twine<7,>=5",
|
|
||||||
"black>=25.1.0",
|
|
||||||
"jedi-language-server>=0.44.0",
|
|
||||||
"pylsp-mypy>=0.7.0",
|
|
||||||
"python-lsp-server>=1.12.2",
|
|
||||||
"ruff>=0.11.2",
|
|
||||||
]
|
|
||||||
docs = [
|
|
||||||
"sphinx==8.1.3; python_version ~= \"3.10\"",
|
|
||||||
"furo==2024.8.6",
|
|
||||||
]
|
|
||||||
|
|
||||||
[tool.uv]
|
|
||||||
default-groups = []
|
|
||||||
|
|
||||||
[build-system]
|
|
||||||
requires = ["hatchling"]
|
|
||||||
build-backend = "hatchling.build"
|
|
||||||
|
|
||||||
[tool.hatch.build.targets.sdist]
|
|
||||||
exclude = [
|
|
||||||
"/.jj"
|
|
||||||
]
|
|
15
run
15
run
@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
set -e
|
|
||||||
export PYTHONPATH="$(dirname "$0"):$PYTHONPATH"
|
|
||||||
export PATH="$(dirname "$0")/bin:$PATH"
|
|
||||||
|
|
||||||
python_best_version() {
|
|
||||||
if [ -x "$(command -v python3)" ] &&
|
|
||||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
|
||||||
exec python3 "$@"
|
|
||||||
else
|
|
||||||
exec python "$@"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
python_best_version -m "sshuttle" "$@"
|
|
@ -1,39 +0,0 @@
|
|||||||
# https://hub.docker.com/r/linuxserver/openssh-server/
|
|
||||||
ARG BASE_IMAGE=docker.io/linuxserver/openssh-server:version-9.3_p2-r1
|
|
||||||
|
|
||||||
FROM ${BASE_IMAGE} as pyenv
|
|
||||||
|
|
||||||
# https://github.com/pyenv/pyenv/wiki#suggested-build-environment
|
|
||||||
RUN apk add --no-cache build-base git libffi-dev openssl-dev bzip2-dev zlib-dev readline-dev sqlite-dev
|
|
||||||
ENV PYENV_ROOT=/pyenv
|
|
||||||
RUN curl https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
|
|
||||||
RUN /pyenv/bin/pyenv install 3.10
|
|
||||||
RUN /pyenv/bin/pyenv install 3.11
|
|
||||||
RUN /pyenv/bin/pyenv install 3.12
|
|
||||||
RUN bash -xc 'rm -rf /pyenv/{.git,plugins} /pyenv/versions/*/lib/*/{test,config,config-*linux-gnu}' && \
|
|
||||||
find /pyenv -type d -name __pycache__ -exec rm -rf {} + && \
|
|
||||||
find /pyenv -type f -name '*.py[co]' -delete
|
|
||||||
|
|
||||||
FROM ${BASE_IMAGE}
|
|
||||||
|
|
||||||
RUN apk add --no-cache bash nginx iperf3
|
|
||||||
|
|
||||||
# pyenv setup
|
|
||||||
ENV PYENV_ROOT=/pyenv
|
|
||||||
ENV PATH=/pyenv/shims:/pyenv/bin:$PATH
|
|
||||||
COPY --from=pyenv /pyenv /pyenv
|
|
||||||
|
|
||||||
# OpenSSH Server variables
|
|
||||||
ENV PUID=1000
|
|
||||||
ENV PGID=1000
|
|
||||||
ENV PASSWORD_ACCESS=true
|
|
||||||
ENV USER_NAME=test
|
|
||||||
ENV USER_PASSWORD=test
|
|
||||||
ENV LOG_STDOUT=true
|
|
||||||
|
|
||||||
# suppress linuxserver.io logo printing, chnage sshd config
|
|
||||||
RUN sed -i '1 a exec &>/dev/null' /etc/s6-overlay/s6-rc.d/init-adduser/run
|
|
||||||
|
|
||||||
# https://www.linuxserver.io/blog/2019-09-14-customizing-our-containers
|
|
||||||
# To customize the container and start other components
|
|
||||||
COPY container.setup.sh /custom-cont-init.d/setup.sh
|
|
@ -1,21 +0,0 @@
|
|||||||
# Container based test bed for sshuttle
|
|
||||||
|
|
||||||
```bash
|
|
||||||
test-bed up -d # start containers
|
|
||||||
|
|
||||||
exec-sshuttle <node-id> [--copy-id] [--server-py=2.7|3.10] [--client-py=2.7|3.10] [--sshuttle-bin=/path/to/sshuttle] [sshuttle-args...]
|
|
||||||
# --copy-id -> optionally do ssh-copy-id to make it passwordless for future runs
|
|
||||||
# --sshuttle-bin -> use another sshuttle binary instead of one from dev setup
|
|
||||||
# --server-py -> Python version to use in server. (manged by pyenv)
|
|
||||||
# --client-py -> Python version to use in client (manged by pyenv)
|
|
||||||
|
|
||||||
exec-sshuttle node-1 # start sshuttle to connect to node-1
|
|
||||||
|
|
||||||
exec-tool curl node-1 # curl to nginx instance running on node1 via IP that is only reachable via sshuttle
|
|
||||||
exec-tool iperf3 node-1 # measure throughput to node-1
|
|
||||||
|
|
||||||
run-benchmark node-1 --client-py=3.10
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
<https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows>
|
|
@ -1,34 +0,0 @@
|
|||||||
name: sshuttle-testbed
|
|
||||||
|
|
||||||
services:
|
|
||||||
node-1:
|
|
||||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
|
||||||
container_name: sshuttle-testbed-node-1
|
|
||||||
hostname: node-1
|
|
||||||
cap_add:
|
|
||||||
- "NET_ADMIN"
|
|
||||||
environment:
|
|
||||||
- ADD_IP_ADDRESSES=10.55.1.77/24
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
ipv6_address: 2001:0DB8::551
|
|
||||||
node-2:
|
|
||||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
|
||||||
container_name: sshuttle-testbed-node-2
|
|
||||||
hostname: node-2
|
|
||||||
cap_add:
|
|
||||||
- "NET_ADMIN"
|
|
||||||
environment:
|
|
||||||
- ADD_IP_ADDRESSES=10.55.2.77/32
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
ipv6_address: 2001:0DB8::552
|
|
||||||
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
driver: bridge
|
|
||||||
enable_ipv6: true
|
|
||||||
ipam:
|
|
||||||
config:
|
|
||||||
- subnet: 2001:0DB8::/112
|
|
||||||
# internal: true
|
|
@ -1,65 +0,0 @@
|
|||||||
#!/usr/bin/with-contenv bash
|
|
||||||
# shellcheck shell=bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
function with_set_x() {
|
|
||||||
set -x
|
|
||||||
"$@"
|
|
||||||
{
|
|
||||||
ec=$?
|
|
||||||
set +x
|
|
||||||
return $ec
|
|
||||||
} 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function log() {
|
|
||||||
echo "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
log ">>> Setting up $(hostname) | id: $(id)\nIP:\n$(ip a)\nRoutes:\n$(ip r)\npyenv:\n$(pyenv versions)"
|
|
||||||
|
|
||||||
echo "
|
|
||||||
AcceptEnv PYENV_VERSION
|
|
||||||
" >> /etc/ssh/sshd_config
|
|
||||||
|
|
||||||
iface="$(ip route | awk '/default/ { print $5 }')"
|
|
||||||
default_gw="$(ip route | awk '/default/ { print $3 }')"
|
|
||||||
for addr in ${ADD_IP_ADDRESSES//,/ }; do
|
|
||||||
log ">>> Adding $addr to interface $iface"
|
|
||||||
net_addr=$(ipcalc -n "$addr" | awk -F= '{print $2}')
|
|
||||||
with_set_x ip addr add "$addr" dev "$iface"
|
|
||||||
with_set_x ip route add "$net_addr" via "$default_gw" dev "$iface" # so that sshuttle -N can discover routes
|
|
||||||
done
|
|
||||||
|
|
||||||
log ">>> Starting iperf3 server"
|
|
||||||
iperf3 --server --port 5001 &
|
|
||||||
|
|
||||||
mkdir -p /www
|
|
||||||
echo "<h5>Hello from $(hostname)</h5>
|
|
||||||
<pre>
|
|
||||||
<u>ip address</u>
|
|
||||||
$(ip address)
|
|
||||||
<u>ip route</u>
|
|
||||||
$(ip route)
|
|
||||||
</pre>" >/www/index.html
|
|
||||||
echo "
|
|
||||||
daemon off;
|
|
||||||
worker_processes 1;
|
|
||||||
error_log /dev/stdout info;
|
|
||||||
events {
|
|
||||||
worker_connections 1024;
|
|
||||||
}
|
|
||||||
http {
|
|
||||||
include /etc/nginx/mime.types;
|
|
||||||
server {
|
|
||||||
access_log /dev/stdout;
|
|
||||||
listen 8080 default_server;
|
|
||||||
listen [::]:8080 default_server;
|
|
||||||
root /www;
|
|
||||||
}
|
|
||||||
}" >/etc/nginx/nginx.conf
|
|
||||||
|
|
||||||
log ">>> Starting nginx"
|
|
||||||
nginx &
|
|
@ -1,159 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export MSYS_NO_PATHCONV=1
|
|
||||||
|
|
||||||
function with_set_x() {
|
|
||||||
set -x
|
|
||||||
"$@"
|
|
||||||
{
|
|
||||||
ec=$?
|
|
||||||
set +x
|
|
||||||
return $ec
|
|
||||||
} 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
function log() {
|
|
||||||
echo "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
ssh_cmd='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
|
|
||||||
ssh_copy_id=false
|
|
||||||
args=()
|
|
||||||
subnet_args=()
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
arg=$1
|
|
||||||
shift
|
|
||||||
case "$arg" in
|
|
||||||
-v|-vv*)
|
|
||||||
ssh_cmd+=" -v"
|
|
||||||
args+=("$arg")
|
|
||||||
;;
|
|
||||||
-r)
|
|
||||||
args+=("-r" "$1")
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--copy-id)
|
|
||||||
ssh_copy_id=true
|
|
||||||
;;
|
|
||||||
--server-py=*)
|
|
||||||
server_pyenv_ver="${arg#*=}"
|
|
||||||
;;
|
|
||||||
--client-py=*)
|
|
||||||
client_pyenv_ver="${arg#*=}"
|
|
||||||
;;
|
|
||||||
-6)
|
|
||||||
ipv6_only=true
|
|
||||||
;;
|
|
||||||
--sshuttle-bin=*)
|
|
||||||
sshuttle_bin="${arg#*=}"
|
|
||||||
;;
|
|
||||||
-N|*/*)
|
|
||||||
subnet_args+=("$arg")
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
args+=("$arg")
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
if [[ -z "$target" ]]; then
|
|
||||||
target=$arg
|
|
||||||
else
|
|
||||||
args+=("$arg")
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
if [[ ${#subnet_args[@]} -eq 0 ]]; then
|
|
||||||
subnet_args=("-N")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $target == node-* ]]; then
|
|
||||||
log "Target is a a test-bed node"
|
|
||||||
port="2222"
|
|
||||||
user_part="test:test"
|
|
||||||
host=$("$(dirname "$0")/test-bed" get-ip "$target")
|
|
||||||
index=${target#node-}
|
|
||||||
if [[ $ipv6_only == true ]]; then
|
|
||||||
args+=("2001:0DB8::/112")
|
|
||||||
else
|
|
||||||
args+=("10.55.$index.0/24")
|
|
||||||
fi
|
|
||||||
target="$user_part@$host:$port"
|
|
||||||
if ! command -v sshpass >/dev/null; then
|
|
||||||
log "sshpass is not found. You might have to manually enter ssh password: 'test'"
|
|
||||||
fi
|
|
||||||
if [[ -z $server_pyenv_ver ]]; then
|
|
||||||
log "server-py argumwnt is not specified. Setting it to 3.8"
|
|
||||||
server_pyenv_ver="3.8"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $server_pyenv_ver ]]; then
|
|
||||||
log "Would pass PYENV_VERRSION=$server_pyenv_ver to server. pyenv is required on server to make it work"
|
|
||||||
pycmd="/pyenv/shims/python"
|
|
||||||
ssh_cmd+=" -o SetEnv=PYENV_VERSION=${server_pyenv_ver:-'3'}"
|
|
||||||
args=("--python=$pycmd" "${args[@]}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $ssh_copy_id == true ]]; then
|
|
||||||
log "Trying to make it passwordless"
|
|
||||||
if [[ $target == *@* ]]; then
|
|
||||||
user_part="${target%%@*}"
|
|
||||||
host_part="${target#*@}"
|
|
||||||
else
|
|
||||||
user_part="$(whoami)"
|
|
||||||
host_part="$target"
|
|
||||||
fi
|
|
||||||
if [[ $host_part == *:* ]]; then
|
|
||||||
host="${host_part%:*}"
|
|
||||||
port="${host_part#*:}"
|
|
||||||
else
|
|
||||||
host="$host_part"
|
|
||||||
port="22"
|
|
||||||
fi
|
|
||||||
if [[ $user_part == *:* ]]; then
|
|
||||||
user="${user_part%:*}"
|
|
||||||
password="${user_part#*:}"
|
|
||||||
else
|
|
||||||
user="$user_part"
|
|
||||||
password=""
|
|
||||||
fi
|
|
||||||
cmd=(ssh-copy-id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p "$port" "$user@$host")
|
|
||||||
if [[ -n $password ]] && command -v sshpass >/dev/null; then
|
|
||||||
cmd=(sshpass -p "$password" "${cmd[@]}")
|
|
||||||
fi
|
|
||||||
with_set_x "${cmd[@]}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $sshuttle_bin || "$sshuttle_bin" == dev ]]; then
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
export PYTHONPATH="."
|
|
||||||
if [[ -n $client_pyenv_ver ]]; then
|
|
||||||
log "Using pyenv version: $client_pyenv_ver"
|
|
||||||
command -v pyenv &>/dev/null || log "You have to install pyenv to use --client-py" && exit 1
|
|
||||||
sshuttle_cmd=(/usr/bin/env PYENV_VERSION="$client_pyenv_ver" pyenv exec python -m sshuttle)
|
|
||||||
else
|
|
||||||
log "Using best python version availble"
|
|
||||||
if [ -x "$(command -v python3)" ] &&
|
|
||||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
|
||||||
sshuttle_cmd=(python3 -m sshuttle)
|
|
||||||
else
|
|
||||||
sshuttle_cmd=(python -m sshuttle)
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
[[ -n $client_pyenv_ver ]] && log "Can't specify --client-py when --sshuttle-bin is specified" && exit 1
|
|
||||||
sshuttle_cmd=("$sshuttle_bin")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ " ${args[*]} " != *" --ssh-cmd "* ]]; then
|
|
||||||
args=("--ssh-cmd" "$ssh_cmd" "${args[@]}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ " ${args[*]} " != *" -r "* ]]; then
|
|
||||||
args=("-r" "$target" "${args[@]}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x
|
|
||||||
"${sshuttle_cmd[@]}" --version
|
|
||||||
exec "${sshuttle_cmd[@]}" "${args[@]}" "${subnet_args[@]}"
|
|
@ -1,86 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
|
|
||||||
function with_set_x() {
|
|
||||||
set -x
|
|
||||||
"$@"
|
|
||||||
{
|
|
||||||
ec=$?
|
|
||||||
set +x
|
|
||||||
return $ec
|
|
||||||
} 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
function log() {
|
|
||||||
echo "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
args=()
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
arg=$1
|
|
||||||
shift
|
|
||||||
case "$arg" in
|
|
||||||
-6)
|
|
||||||
ipv6_only=true
|
|
||||||
continue
|
|
||||||
;;
|
|
||||||
-*) ;;
|
|
||||||
*)
|
|
||||||
if [[ -z $tool ]]; then
|
|
||||||
tool=$arg
|
|
||||||
continue
|
|
||||||
elif [[ -z $node ]]; then
|
|
||||||
node=$arg
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
args+=("$arg")
|
|
||||||
done
|
|
||||||
|
|
||||||
tool=${tool?:"tool argument missing. should be one of iperf3,ping,curl,ab"}
|
|
||||||
node=${node?:"node argument missing. should be 'node-1' , 'node-2' etc"}
|
|
||||||
|
|
||||||
if [[ $node == node-* ]]; then
|
|
||||||
index=${node#node-}
|
|
||||||
if [[ $ipv6_only == true ]]; then
|
|
||||||
host="2001:0DB8::55$index"
|
|
||||||
else
|
|
||||||
host="10.55.$index.77"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
host=$node
|
|
||||||
fi
|
|
||||||
|
|
||||||
connect_timeout_sec=3
|
|
||||||
|
|
||||||
case "$tool" in
|
|
||||||
ping)
|
|
||||||
with_set_x exec ping -W $connect_timeout_sec "${args[@]}" "$host"
|
|
||||||
;;
|
|
||||||
iperf3)
|
|
||||||
port=5001
|
|
||||||
with_set_x exec iperf3 --client "$host" --port=$port --connect-timeout=$((connect_timeout_sec * 1000)) "${args[@]}"
|
|
||||||
;;
|
|
||||||
curl)
|
|
||||||
port=8080
|
|
||||||
if [[ $host = *:* ]]; then
|
|
||||||
host="[$host]"
|
|
||||||
args+=(--ipv6)
|
|
||||||
fi
|
|
||||||
with_set_x exec curl "http://$host:$port/" -v --connect-timeout $connect_timeout_sec "${args[@]}"
|
|
||||||
;;
|
|
||||||
ab)
|
|
||||||
port=8080
|
|
||||||
if [[ " ${args[*]}" != *" -n "* && " ${args[*]}" != *" -c "* ]]; then
|
|
||||||
args+=(-n 500 -c 50 "${args[@]}")
|
|
||||||
fi
|
|
||||||
with_set_x exec ab -s $connect_timeout_sec "${args[@]}" "http://$host:$port/"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log "Unknown tool: $tool"
|
|
||||||
exit 2
|
|
||||||
;;
|
|
||||||
esac
|
|
@ -1,40 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
function with_set_x() {
|
|
||||||
set -x
|
|
||||||
"$@"
|
|
||||||
{
|
|
||||||
ec=$?
|
|
||||||
set +x
|
|
||||||
return $ec
|
|
||||||
} 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
function log() {
|
|
||||||
echo "$*" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
./test-bed up -d
|
|
||||||
|
|
||||||
benchmark() {
|
|
||||||
log -e "\n======== Benchmarking sshuttle | Args: [$*] ========"
|
|
||||||
local node=$1
|
|
||||||
shift
|
|
||||||
with_set_x ./exec-sshuttle "$node" --listen 55771 "$@" &
|
|
||||||
sshuttle_pid=$!
|
|
||||||
trap 'kill -0 $sshuttle_pid &>/dev/null && kill -15 $sshuttle_pid' EXIT
|
|
||||||
while ! nc -z localhost 55771; do sleep 0.1; done
|
|
||||||
sleep 1
|
|
||||||
./exec-tool iperf3 "$node" --time=4
|
|
||||||
with_set_x kill -15 $sshuttle_pid
|
|
||||||
wait $sshuttle_pid || true
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ $# -gt 0 ]]; then
|
|
||||||
benchmark "${@}"
|
|
||||||
else
|
|
||||||
benchmark node-1 --sshuttle-bin="${SSHUTTLE_BIN:-sshuttle}"
|
|
||||||
benchmark node-1 --sshuttle-bin=dev
|
|
||||||
fi
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
cd "$(dirname "$0")/.."
|
|
||||||
|
|
||||||
export PYTHONPATH=.
|
|
||||||
|
|
||||||
set -x
|
|
||||||
python -m flake8 sshuttle tests
|
|
||||||
python -m pytest .
|
|
@ -1,42 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
if [[ -z $1 || $1 = -* ]]; then
|
|
||||||
set -- up "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
function with_set_x() {
|
|
||||||
set -x
|
|
||||||
"$@"
|
|
||||||
{
|
|
||||||
ec=$?
|
|
||||||
set +x
|
|
||||||
return $ec
|
|
||||||
} 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
function build() {
|
|
||||||
# podman build -t ghcr.io/sshuttle/sshuttle-testbed .
|
|
||||||
with_set_x docker build --progress=plain -t ghcr.io/sshuttle/sshuttle-testbed -f Containerfile .
|
|
||||||
}
|
|
||||||
|
|
||||||
function compose() {
|
|
||||||
# podman-compose "$@"
|
|
||||||
with_set_x docker compose "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
function get-ip() {
|
|
||||||
local container_name=sshuttle-testbed-"$1"
|
|
||||||
docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$container_name"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ $1 == get-ip ]]; then
|
|
||||||
shift
|
|
||||||
get-ip "$@"
|
|
||||||
else
|
|
||||||
if [[ $* = *--build* ]]; then
|
|
||||||
build
|
|
||||||
fi
|
|
||||||
compose "$@"
|
|
||||||
fi
|
|
254
server.py
Normal file
254
server.py
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
import re, struct, socket, select, traceback, time
|
||||||
|
if not globals().get('skip_imports'):
|
||||||
|
import ssnet, helpers, hostwatch
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
|
||||||
|
def _ipmatch(ipstr):
|
||||||
|
if ipstr == 'default':
|
||||||
|
ipstr = '0.0.0.0/0'
|
||||||
|
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
|
||||||
|
if m:
|
||||||
|
g = m.groups()
|
||||||
|
ips = g[0]
|
||||||
|
width = int(g[4] or 32)
|
||||||
|
if g[1] == None:
|
||||||
|
ips += '.0.0.0'
|
||||||
|
width = min(width, 8)
|
||||||
|
elif g[2] == None:
|
||||||
|
ips += '.0.0'
|
||||||
|
width = min(width, 16)
|
||||||
|
elif g[3] == None:
|
||||||
|
ips += '.0'
|
||||||
|
width = min(width, 24)
|
||||||
|
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
|
||||||
|
|
||||||
|
|
||||||
|
def _ipstr(ip, width):
|
||||||
|
if width >= 32:
|
||||||
|
return ip
|
||||||
|
else:
|
||||||
|
return "%s/%d" % (ip, width)
|
||||||
|
|
||||||
|
|
||||||
|
def _maskbits(netmask):
|
||||||
|
if not netmask:
|
||||||
|
return 32
|
||||||
|
for i in range(32):
|
||||||
|
if netmask[0] & _shl(1, i):
|
||||||
|
return 32-i
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _shl(n, bits):
|
||||||
|
# we use our own implementation of left-shift because
|
||||||
|
# results may be different between older and newer versions
|
||||||
|
# of python for numbers like 1<<32. We use long() because
|
||||||
|
# int(2**32) doesn't work in older python, which has limited
|
||||||
|
# int sizes.
|
||||||
|
return n * long(2**bits)
|
||||||
|
|
||||||
|
|
||||||
|
def _list_routes():
|
||||||
|
argv = ['netstat', '-rn']
|
||||||
|
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||||
|
routes = []
|
||||||
|
for line in p.stdout:
|
||||||
|
cols = re.split(r'\s+', line)
|
||||||
|
ipw = _ipmatch(cols[0])
|
||||||
|
if not ipw:
|
||||||
|
continue # some lines won't be parseable; never mind
|
||||||
|
maskw = _ipmatch(cols[2]) # linux only
|
||||||
|
mask = _maskbits(maskw) # returns 32 if maskw is null
|
||||||
|
width = min(ipw[1], mask)
|
||||||
|
ip = ipw[0] & _shl(_shl(1, width) - 1, 32-width)
|
||||||
|
routes.append((socket.inet_ntoa(struct.pack('!I', ip)), width))
|
||||||
|
rv = p.wait()
|
||||||
|
if rv != 0:
|
||||||
|
log('WARNING: %r returned %d\n' % (argv, rv))
|
||||||
|
log('WARNING: That prevents --auto-nets from working.\n')
|
||||||
|
return routes
|
||||||
|
|
||||||
|
|
||||||
|
def list_routes():
|
||||||
|
l = []
|
||||||
|
for (ip,width) in _list_routes():
|
||||||
|
if not ip.startswith('0.') and not ip.startswith('127.'):
|
||||||
|
l.append((ip,width))
|
||||||
|
return l
|
||||||
|
|
||||||
|
|
||||||
|
def _exc_dump():
|
||||||
|
exc_info = sys.exc_info()
|
||||||
|
return ''.join(traceback.format_exception(*exc_info))
|
||||||
|
|
||||||
|
|
||||||
|
def start_hostwatch(seed_hosts):
|
||||||
|
s1,s2 = socket.socketpair()
|
||||||
|
pid = os.fork()
|
||||||
|
if not pid:
|
||||||
|
# child
|
||||||
|
rv = 99
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
s2.close()
|
||||||
|
os.dup2(s1.fileno(), 1)
|
||||||
|
os.dup2(s1.fileno(), 0)
|
||||||
|
s1.close()
|
||||||
|
rv = hostwatch.hw_main(seed_hosts) or 0
|
||||||
|
except Exception, e:
|
||||||
|
log('%s\n' % _exc_dump())
|
||||||
|
rv = 98
|
||||||
|
finally:
|
||||||
|
os._exit(rv)
|
||||||
|
s1.close()
|
||||||
|
return pid,s2
|
||||||
|
|
||||||
|
|
||||||
|
class Hostwatch:
|
||||||
|
def __init__(self):
|
||||||
|
self.pid = 0
|
||||||
|
self.sock = None
|
||||||
|
|
||||||
|
|
||||||
|
class DnsProxy(Handler):
|
||||||
|
def __init__(self, mux, chan, request):
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
Handler.__init__(self, [sock])
|
||||||
|
self.timeout = time.time()+30
|
||||||
|
self.mux = mux
|
||||||
|
self.chan = chan
|
||||||
|
self.tries = 0
|
||||||
|
self.peer = None
|
||||||
|
self.request = request
|
||||||
|
self.sock = sock
|
||||||
|
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||||
|
self.try_send()
|
||||||
|
|
||||||
|
def try_send(self):
|
||||||
|
if self.tries >= 3:
|
||||||
|
return
|
||||||
|
self.tries += 1
|
||||||
|
self.peer = resolvconf_random_nameserver()
|
||||||
|
self.sock.connect((self.peer, 53))
|
||||||
|
debug2('DNS: sending to %r\n' % self.peer)
|
||||||
|
try:
|
||||||
|
self.sock.send(self.request)
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] in ssnet.NET_ERRS:
|
||||||
|
# might have been spurious; try again.
|
||||||
|
# Note: these errors sometimes are reported by recv(),
|
||||||
|
# and sometimes by send(). We have to catch both.
|
||||||
|
debug2('DNS send to %r: %s\n' % (self.peer, e))
|
||||||
|
self.try_send()
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
log('DNS send to %r: %s\n' % (self.peer, e))
|
||||||
|
return
|
||||||
|
|
||||||
|
def callback(self):
|
||||||
|
try:
|
||||||
|
data = self.sock.recv(4096)
|
||||||
|
except socket.error, e:
|
||||||
|
if e.args[0] in ssnet.NET_ERRS:
|
||||||
|
# might have been spurious; try again.
|
||||||
|
# Note: these errors sometimes are reported by recv(),
|
||||||
|
# and sometimes by send(). We have to catch both.
|
||||||
|
debug2('DNS recv from %r: %s\n' % (self.peer, e))
|
||||||
|
self.try_send()
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
log('DNS recv from %r: %s\n' % (self.peer, e))
|
||||||
|
return
|
||||||
|
debug2('DNS response: %d bytes\n' % len(data))
|
||||||
|
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
|
||||||
|
self.ok = False
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if helpers.verbose >= 1:
|
||||||
|
helpers.logprefix = ' s: '
|
||||||
|
else:
|
||||||
|
helpers.logprefix = 'server: '
|
||||||
|
debug1('latency control setting = %r\n' % latency_control)
|
||||||
|
|
||||||
|
routes = list(list_routes())
|
||||||
|
debug1('available routes:\n')
|
||||||
|
for r in routes:
|
||||||
|
debug1(' %s/%d\n' % r)
|
||||||
|
|
||||||
|
# synchronization header
|
||||||
|
sys.stdout.write('\0\0SSHUTTLE0001')
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
handlers = []
|
||||||
|
mux = Mux(socket.fromfd(sys.stdin.fileno(),
|
||||||
|
socket.AF_INET, socket.SOCK_STREAM),
|
||||||
|
socket.fromfd(sys.stdout.fileno(),
|
||||||
|
socket.AF_INET, socket.SOCK_STREAM))
|
||||||
|
handlers.append(mux)
|
||||||
|
routepkt = ''
|
||||||
|
for r in routes:
|
||||||
|
routepkt += '%s,%d\n' % r
|
||||||
|
mux.send(0, ssnet.CMD_ROUTES, routepkt)
|
||||||
|
|
||||||
|
hw = Hostwatch()
|
||||||
|
hw.leftover = ''
|
||||||
|
|
||||||
|
def hostwatch_ready():
|
||||||
|
assert(hw.pid)
|
||||||
|
content = hw.sock.recv(4096)
|
||||||
|
if content:
|
||||||
|
lines = (hw.leftover + content).split('\n')
|
||||||
|
if lines[-1]:
|
||||||
|
# no terminating newline: entry isn't complete yet!
|
||||||
|
hw.leftover = lines.pop()
|
||||||
|
lines.append('')
|
||||||
|
else:
|
||||||
|
hw.leftover = ''
|
||||||
|
mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
|
||||||
|
else:
|
||||||
|
raise Fatal('hostwatch process died')
|
||||||
|
|
||||||
|
def got_host_req(data):
|
||||||
|
if not hw.pid:
|
||||||
|
(hw.pid,hw.sock) = start_hostwatch(data.strip().split())
|
||||||
|
handlers.append(Handler(socks = [hw.sock],
|
||||||
|
callback = hostwatch_ready))
|
||||||
|
mux.got_host_req = got_host_req
|
||||||
|
|
||||||
|
def new_channel(channel, data):
|
||||||
|
(dstip,dstport) = data.split(',', 1)
|
||||||
|
dstport = int(dstport)
|
||||||
|
outwrap = ssnet.connect_dst(dstip,dstport)
|
||||||
|
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
|
||||||
|
mux.new_channel = new_channel
|
||||||
|
|
||||||
|
dnshandlers = {}
|
||||||
|
def dns_req(channel, data):
|
||||||
|
debug2('Incoming DNS request.\n')
|
||||||
|
h = DnsProxy(mux, channel, data)
|
||||||
|
handlers.append(h)
|
||||||
|
dnshandlers[channel] = h
|
||||||
|
mux.got_dns_req = dns_req
|
||||||
|
|
||||||
|
while mux.ok:
|
||||||
|
if hw.pid:
|
||||||
|
assert(hw.pid > 0)
|
||||||
|
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
|
||||||
|
if rpid:
|
||||||
|
raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' % rv)
|
||||||
|
|
||||||
|
ssnet.runonce(handlers, mux)
|
||||||
|
if latency_control:
|
||||||
|
mux.check_fullness()
|
||||||
|
mux.callback()
|
||||||
|
|
||||||
|
if dnshandlers:
|
||||||
|
now = time.time()
|
||||||
|
for channel,h in dnshandlers.items():
|
||||||
|
if h.timeout < now or not h.ok:
|
||||||
|
del dnshandlers[channel]
|
||||||
|
h.ok = False
|
30
setup.cfg
30
setup.cfg
@ -1,30 +0,0 @@
|
|||||||
[bumpversion]
|
|
||||||
current_version = 1.3.1
|
|
||||||
|
|
||||||
[bumpversion:file:setup.py]
|
|
||||||
|
|
||||||
[bumpversion:file:pyproject.toml]
|
|
||||||
|
|
||||||
[bumpversion:file:sshuttle/version.py]
|
|
||||||
|
|
||||||
[aliases]
|
|
||||||
test = pytest
|
|
||||||
|
|
||||||
[bdist_wheel]
|
|
||||||
universal = 1
|
|
||||||
|
|
||||||
[upload]
|
|
||||||
sign = true
|
|
||||||
identity = 0x1784577F811F6EAC
|
|
||||||
|
|
||||||
[flake8]
|
|
||||||
count = true
|
|
||||||
show-source = true
|
|
||||||
statistics = true
|
|
||||||
max-line-length = 128
|
|
||||||
|
|
||||||
[pycodestyle]
|
|
||||||
max-line-length = 128
|
|
||||||
|
|
||||||
[tool:pytest]
|
|
||||||
addopts = --cov=sshuttle --cov-branch --cov-report=term-missing
|
|
106
ssh.py
Normal file
106
ssh.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
import sys, os, re, socket, zlib
|
||||||
|
import compat.ssubprocess as ssubprocess
|
||||||
|
import helpers
|
||||||
|
from helpers import *
|
||||||
|
|
||||||
|
|
||||||
|
def readfile(name):
|
||||||
|
basedir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||||
|
path = [basedir] + sys.path
|
||||||
|
for d in path:
|
||||||
|
fullname = os.path.join(d, name)
|
||||||
|
if os.path.exists(fullname):
|
||||||
|
return open(fullname, 'rb').read()
|
||||||
|
raise Exception("can't find file %r in any of %r" % (name, path))
|
||||||
|
|
||||||
|
|
||||||
|
def empackage(z, filename, data=None):
|
||||||
|
(path,basename) = os.path.split(filename)
|
||||||
|
if not data:
|
||||||
|
data = readfile(filename)
|
||||||
|
content = z.compress(data)
|
||||||
|
content += z.flush(zlib.Z_SYNC_FLUSH)
|
||||||
|
return '%s\n%d\n%s' % (basename, len(content), content)
|
||||||
|
|
||||||
|
|
||||||
|
def connect(ssh_cmd, rhostport, python, stderr, options):
|
||||||
|
main_exe = sys.argv[0]
|
||||||
|
portl = []
|
||||||
|
|
||||||
|
rhostIsIPv6 = False
|
||||||
|
if (rhostport or '').count(':') > 1:
|
||||||
|
rhostIsIPv6 = True
|
||||||
|
if rhostport.count(']') or rhostport.count('['):
|
||||||
|
result = rhostport.split(']')
|
||||||
|
rhost = result[0].strip('[')
|
||||||
|
if len(result) > 1:
|
||||||
|
result[1] = result[1].strip(':')
|
||||||
|
if result[1] is not '':
|
||||||
|
portl = ['-p', str(int(result[1]))]
|
||||||
|
else: # can't disambiguate IPv6 colons and a port number. pass the hostname through.
|
||||||
|
rhost = rhostport
|
||||||
|
else: # IPv4
|
||||||
|
l = (rhostport or '').split(':', 1)
|
||||||
|
rhost = l[0]
|
||||||
|
if len(l) > 1:
|
||||||
|
portl = ['-p', str(int(l[1]))]
|
||||||
|
|
||||||
|
if rhost == '-':
|
||||||
|
rhost = None
|
||||||
|
|
||||||
|
ipv6flag = []
|
||||||
|
if rhostIsIPv6:
|
||||||
|
ipv6flag = ['-6']
|
||||||
|
|
||||||
|
z = zlib.compressobj(1)
|
||||||
|
content = readfile('assembler.py')
|
||||||
|
optdata = ''.join("%s=%r\n" % (k,v) for (k,v) in options.items())
|
||||||
|
content2 = (empackage(z, 'cmdline_options.py', optdata) +
|
||||||
|
empackage(z, 'helpers.py') +
|
||||||
|
empackage(z, 'compat/ssubprocess.py') +
|
||||||
|
empackage(z, 'ssnet.py') +
|
||||||
|
empackage(z, 'hostwatch.py') +
|
||||||
|
empackage(z, 'server.py') +
|
||||||
|
"\n")
|
||||||
|
|
||||||
|
pyscript = r"""
|
||||||
|
import sys;
|
||||||
|
skip_imports=1;
|
||||||
|
verbosity=%d;
|
||||||
|
exec compile(sys.stdin.read(%d), "assembler.py", "exec")
|
||||||
|
""" % (helpers.verbose or 0, len(content))
|
||||||
|
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
|
||||||
|
|
||||||
|
|
||||||
|
if not rhost:
|
||||||
|
# ignore the --python argument when running locally; we already know
|
||||||
|
# which python version works.
|
||||||
|
argv = [sys.argv[1], '-c', pyscript]
|
||||||
|
else:
|
||||||
|
if ssh_cmd:
|
||||||
|
sshl = ssh_cmd.split(' ')
|
||||||
|
else:
|
||||||
|
sshl = ['ssh']
|
||||||
|
if python:
|
||||||
|
pycmd = "'%s' -c '%s'" % (python, pyscript)
|
||||||
|
else:
|
||||||
|
pycmd = ("P=python2; $P -V 2>/dev/null || P=python; "
|
||||||
|
"exec \"$P\" -c '%s'") % pyscript
|
||||||
|
argv = (sshl +
|
||||||
|
portl +
|
||||||
|
ipv6flag +
|
||||||
|
[rhost, '--', pycmd])
|
||||||
|
(s1,s2) = socket.socketpair()
|
||||||
|
def setup():
|
||||||
|
# runs in the child process
|
||||||
|
s2.close()
|
||||||
|
s1a,s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
|
||||||
|
s1.close()
|
||||||
|
debug2('executing: %r\n' % argv)
|
||||||
|
p = ssubprocess.Popen(argv, stdin=s1a, stdout=s1b, preexec_fn=setup,
|
||||||
|
close_fds=True, stderr=stderr)
|
||||||
|
os.close(s1a)
|
||||||
|
os.close(s1b)
|
||||||
|
s2.sendall(content)
|
||||||
|
s2.sendall(content2)
|
||||||
|
return p, s2
|
12
sshuttle
Executable file
12
sshuttle
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
EXE=$0
|
||||||
|
for i in 1 2 3 4 5 6 7 8 9 10; do
|
||||||
|
[ -L "$EXE" ] || break
|
||||||
|
EXE=$(readlink "$EXE")
|
||||||
|
done
|
||||||
|
DIR=$(dirname "$EXE")
|
||||||
|
if python2 -V 2>/dev/null; then
|
||||||
|
exec python2 "$DIR/main.py" python2 "$@"
|
||||||
|
else
|
||||||
|
exec python "$DIR/main.py" python "$@"
|
||||||
|
fi
|
@ -1 +0,0 @@
|
|||||||
__version__ = "1.3.1"
|
|
@ -1,10 +0,0 @@
|
|||||||
"""Coverage.py's main entry point."""
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
from sshuttle.cmdline import main
|
|
||||||
from sshuttle.helpers import debug3
|
|
||||||
|
|
||||||
debug3("Start: (pid=%s, ppid=%s) %r" % (os.getpid(), os.getppid(), sys.argv))
|
|
||||||
exit_code = main()
|
|
||||||
debug3("Exit: (pid=%s, ppid=%s, code=%s) cmd %r" % (os.getpid(), os.getppid(), exit_code, sys.argv))
|
|
||||||
sys.exit(exit_code)
|
|
@ -1,53 +0,0 @@
|
|||||||
import sys
|
|
||||||
import zlib
|
|
||||||
import types
|
|
||||||
import platform
|
|
||||||
|
|
||||||
stdin = stdin # type: typing.BinaryIO # noqa: F821 must be a previously defined global
|
|
||||||
verbosity = verbosity # type: int # noqa: F821 must be a previously defined global
|
|
||||||
if verbosity > 0:
|
|
||||||
sys.stderr.write(' s: Running server on remote host with %s (version %s)\n'
|
|
||||||
% (sys.executable, platform.python_version()))
|
|
||||||
|
|
||||||
z = zlib.decompressobj()
|
|
||||||
|
|
||||||
while 1:
|
|
||||||
name = stdin.readline().strip()
|
|
||||||
if name:
|
|
||||||
# python2 compat: in python2 stdin.readline().strip() -> str
|
|
||||||
# in python3 stdin.readline().strip() -> bytes
|
|
||||||
# (see #481)
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
name = name.decode("ASCII")
|
|
||||||
nbytes = int(stdin.readline())
|
|
||||||
if verbosity >= 2:
|
|
||||||
sys.stderr.write(' s: assembling %r (%d bytes)\n'
|
|
||||||
% (name, nbytes))
|
|
||||||
content = z.decompress(stdin.read(nbytes))
|
|
||||||
|
|
||||||
module = types.ModuleType(name)
|
|
||||||
parents = name.rsplit(".", 1)
|
|
||||||
if len(parents) == 2:
|
|
||||||
parent, parent_name = parents
|
|
||||||
setattr(sys.modules[parent], parent_name, module)
|
|
||||||
|
|
||||||
code = compile(content, name, "exec")
|
|
||||||
exec(code, module.__dict__) # nosec
|
|
||||||
sys.modules[name] = module
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
sys.stderr.flush()
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
# import can only happen once the code has been transferred to
|
|
||||||
# the server. 'noqa: E402' excludes these lines from QA checks.
|
|
||||||
import sshuttle.helpers # noqa: E402
|
|
||||||
sshuttle.helpers.verbose = verbosity
|
|
||||||
|
|
||||||
import sshuttle.cmdline_options as options # noqa: E402
|
|
||||||
from sshuttle.server import main # noqa: E402
|
|
||||||
|
|
||||||
main(options.latency_control, options.latency_buffer_size,
|
|
||||||
options.auto_hosts, options.to_nameserver,
|
|
||||||
options.auto_nets)
|
|
1173
sshuttle/client.py
1173
sshuttle/client.py
File diff suppressed because it is too large
Load Diff
@ -1,145 +0,0 @@
|
|||||||
import os
|
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import sshuttle.helpers as helpers
|
|
||||||
import sshuttle.client as client
|
|
||||||
import sshuttle.firewall as firewall
|
|
||||||
import sshuttle.hostwatch as hostwatch
|
|
||||||
import sshuttle.ssyslog as ssyslog
|
|
||||||
from sshuttle.options import parser, parse_ipport
|
|
||||||
from sshuttle.helpers import family_ip_tuple, log, Fatal
|
|
||||||
from sshuttle.sudoers import sudoers
|
|
||||||
from sshuttle.namespace import enter_namespace
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
if 'SSHUTTLE_ARGS' in os.environ:
|
|
||||||
env_args = shlex.split(os.environ['SSHUTTLE_ARGS'])
|
|
||||||
else:
|
|
||||||
env_args = []
|
|
||||||
args = [*env_args, *sys.argv[1:]]
|
|
||||||
|
|
||||||
opt = parser.parse_args(args)
|
|
||||||
|
|
||||||
if opt.sudoers_no_modify:
|
|
||||||
# sudoers() calls exit() when it completes
|
|
||||||
sudoers(user_name=opt.sudoers_user)
|
|
||||||
|
|
||||||
if opt.daemon:
|
|
||||||
opt.syslog = 1
|
|
||||||
if opt.wrap:
|
|
||||||
import sshuttle.ssnet as ssnet
|
|
||||||
ssnet.MAX_CHANNEL = opt.wrap
|
|
||||||
if opt.latency_buffer_size:
|
|
||||||
import sshuttle.ssnet as ssnet
|
|
||||||
ssnet.LATENCY_BUFFER_SIZE = opt.latency_buffer_size
|
|
||||||
helpers.verbose = opt.verbose
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Since namespace and namespace-pid options are only available
|
|
||||||
# in linux, we must check if it exists with getattr
|
|
||||||
namespace = getattr(opt, 'namespace', None)
|
|
||||||
namespace_pid = getattr(opt, 'namespace_pid', None)
|
|
||||||
if namespace or namespace_pid:
|
|
||||||
prefix = helpers.logprefix
|
|
||||||
helpers.logprefix = 'ns: '
|
|
||||||
enter_namespace(namespace, namespace_pid)
|
|
||||||
helpers.logprefix = prefix
|
|
||||||
|
|
||||||
if opt.firewall:
|
|
||||||
if opt.subnets or opt.subnets_file:
|
|
||||||
parser.error('exactly zero arguments expected')
|
|
||||||
return firewall.main(opt.method, opt.syslog)
|
|
||||||
elif opt.hostwatch:
|
|
||||||
hostwatch.hw_main(opt.subnets, opt.auto_hosts)
|
|
||||||
return 0
|
|
||||||
else:
|
|
||||||
# parse_subnetports() is used to create a list of includes
|
|
||||||
# and excludes. It is called once for each parameter and
|
|
||||||
# returns a list of one or more items for each subnet (it
|
|
||||||
# can return more than one item when a hostname in the
|
|
||||||
# parameter resolves to multiple IP addresses. Here, we
|
|
||||||
# flatten these lists.
|
|
||||||
includes = [item for sublist in opt.subnets+opt.subnets_file
|
|
||||||
for item in sublist]
|
|
||||||
excludes = [item for sublist in opt.exclude for item in sublist]
|
|
||||||
|
|
||||||
if not includes and not opt.auto_nets:
|
|
||||||
parser.error('at least one subnet, subnet file, '
|
|
||||||
'or -N expected')
|
|
||||||
remotename = opt.remote
|
|
||||||
if remotename == '' or remotename == '-':
|
|
||||||
remotename = None
|
|
||||||
nslist = [family_ip_tuple(ns) for ns in opt.ns_hosts]
|
|
||||||
if opt.seed_hosts:
|
|
||||||
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
|
||||||
elif opt.auto_hosts:
|
|
||||||
sh = []
|
|
||||||
else:
|
|
||||||
sh = None
|
|
||||||
if opt.listen:
|
|
||||||
ipport_v6 = None
|
|
||||||
ipport_v4 = None
|
|
||||||
lst = opt.listen.split(",")
|
|
||||||
for ip in lst:
|
|
||||||
family, ip, port = parse_ipport(ip)
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
ipport_v6 = (ip, port)
|
|
||||||
else:
|
|
||||||
ipport_v4 = (ip, port)
|
|
||||||
else:
|
|
||||||
# parse_ipport4('127.0.0.1:0')
|
|
||||||
ipport_v4 = "auto"
|
|
||||||
# parse_ipport6('[::1]:0')
|
|
||||||
ipport_v6 = "auto" if not opt.disable_ipv6 else None
|
|
||||||
try:
|
|
||||||
int(opt.tmark, 16)
|
|
||||||
except ValueError:
|
|
||||||
parser.error("--tmark must be a hexadecimal value")
|
|
||||||
opt.tmark = opt.tmark.lower() # make 'x' in 0x lowercase
|
|
||||||
if not opt.tmark.startswith("0x"): # accept without 0x prefix
|
|
||||||
opt.tmark = "0x%s" % opt.tmark
|
|
||||||
if opt.syslog:
|
|
||||||
ssyslog.start_syslog()
|
|
||||||
ssyslog.close_stdin()
|
|
||||||
ssyslog.stdout_to_syslog()
|
|
||||||
ssyslog.stderr_to_syslog()
|
|
||||||
return_code = client.main(ipport_v6, ipport_v4,
|
|
||||||
opt.ssh_cmd,
|
|
||||||
remotename,
|
|
||||||
opt.python,
|
|
||||||
opt.latency_control,
|
|
||||||
opt.latency_buffer_size,
|
|
||||||
opt.dns,
|
|
||||||
nslist,
|
|
||||||
opt.method,
|
|
||||||
sh,
|
|
||||||
opt.auto_hosts,
|
|
||||||
opt.auto_nets,
|
|
||||||
includes,
|
|
||||||
excludes,
|
|
||||||
opt.daemon,
|
|
||||||
opt.to_ns,
|
|
||||||
opt.pidfile,
|
|
||||||
opt.user,
|
|
||||||
opt.group,
|
|
||||||
opt.sudo_pythonpath,
|
|
||||||
opt.add_cmd_delimiter,
|
|
||||||
opt.remote_shell,
|
|
||||||
opt.tmark)
|
|
||||||
|
|
||||||
if return_code == 0:
|
|
||||||
log('Normal exit code, exiting...')
|
|
||||||
else:
|
|
||||||
log('Abnormal exit code %d detected, failing...' % return_code)
|
|
||||||
return return_code
|
|
||||||
|
|
||||||
except Fatal as e:
|
|
||||||
log('fatal: %s' % e)
|
|
||||||
return 99
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
log('\n')
|
|
||||||
log('Keyboard interrupt: exiting.')
|
|
||||||
return 1
|
|
@ -1,428 +0,0 @@
|
|||||||
import errno
|
|
||||||
import shutil
|
|
||||||
import socket
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import traceback
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
import base64
|
|
||||||
import io
|
|
||||||
|
|
||||||
import sshuttle.ssyslog as ssyslog
|
|
||||||
import sshuttle.helpers as helpers
|
|
||||||
from sshuttle.helpers import is_admin_user, log, debug1, debug2, debug3, Fatal
|
|
||||||
from sshuttle.methods import get_auto_method, get_method
|
|
||||||
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
HOSTSFILE = r"C:\Windows\System32\drivers\etc\hosts"
|
|
||||||
else:
|
|
||||||
HOSTSFILE = '/etc/hosts'
|
|
||||||
sshuttle_pid = None
|
|
||||||
|
|
||||||
|
|
||||||
def rewrite_etc_hosts(hostmap, port):
|
|
||||||
BAKFILE = '%s.sbak' % HOSTSFILE
|
|
||||||
APPEND = '# sshuttle-firewall-%d AUTOCREATED' % port
|
|
||||||
old_content = ''
|
|
||||||
st = None
|
|
||||||
try:
|
|
||||||
old_content = open(HOSTSFILE).read()
|
|
||||||
st = os.stat(HOSTSFILE)
|
|
||||||
except IOError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
if old_content.strip() and not os.path.exists(BAKFILE):
|
|
||||||
try:
|
|
||||||
os.link(HOSTSFILE, BAKFILE)
|
|
||||||
except OSError:
|
|
||||||
# file is locked - performing non-atomic copy
|
|
||||||
shutil.copyfile(HOSTSFILE, BAKFILE)
|
|
||||||
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
|
|
||||||
f = open(tmpname, 'w')
|
|
||||||
for line in old_content.rstrip().split('\n'):
|
|
||||||
if line.find(APPEND) >= 0:
|
|
||||||
continue
|
|
||||||
f.write('%s\n' % line)
|
|
||||||
for (name, ip) in sorted(hostmap.items()):
|
|
||||||
f.write('%-30s %s\n' % ('%s %s' % (ip, name), APPEND))
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
if sys.platform != 'win32':
|
|
||||||
if st is not None:
|
|
||||||
os.chown(tmpname, st.st_uid, st.st_gid)
|
|
||||||
os.chmod(tmpname, st.st_mode)
|
|
||||||
else:
|
|
||||||
os.chown(tmpname, 0, 0)
|
|
||||||
os.chmod(tmpname, 0o644)
|
|
||||||
try:
|
|
||||||
os.rename(tmpname, HOSTSFILE)
|
|
||||||
except OSError:
|
|
||||||
# file is locked - performing non-atomic copy
|
|
||||||
log('Warning: Using a non-atomic way to overwrite %s that can corrupt the file if '
|
|
||||||
'multiple processes write to it simultaneously.' % HOSTSFILE)
|
|
||||||
shutil.move(tmpname, HOSTSFILE)
|
|
||||||
|
|
||||||
|
|
||||||
def restore_etc_hosts(hostmap, port):
|
|
||||||
# Only restore if we added hosts to /etc/hosts previously.
|
|
||||||
if len(hostmap) > 0:
|
|
||||||
debug2('undoing /etc/hosts changes.')
|
|
||||||
rewrite_etc_hosts({}, port)
|
|
||||||
|
|
||||||
|
|
||||||
def firewall_exit(signum, frame):
|
|
||||||
# The typical sshuttle exit is that the main sshuttle process
|
|
||||||
# exits, closes file descriptors it uses, and the firewall process
|
|
||||||
# notices that it can't read from stdin anymore and exits
|
|
||||||
# (cleaning up firewall rules).
|
|
||||||
#
|
|
||||||
# However, in some cases, Ctrl+C might get sent to the firewall
|
|
||||||
# process. This might caused if someone manually tries to kill the
|
|
||||||
# firewall process, or if sshuttle was started using sudo's use_pty option
|
|
||||||
# and they try to exit by pressing Ctrl+C. Here, we forward the
|
|
||||||
# Ctrl+C/SIGINT to the main sshuttle process which should trigger
|
|
||||||
# the typical exit process as described above.
|
|
||||||
global sshuttle_pid
|
|
||||||
if sshuttle_pid:
|
|
||||||
debug1("Relaying interupt signal to sshuttle process %d" % sshuttle_pid)
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
sig = signal.CTRL_C_EVENT
|
|
||||||
else:
|
|
||||||
sig = signal.SIGINT
|
|
||||||
os.kill(sshuttle_pid, sig)
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_daemon_for_unix_like():
|
|
||||||
if not is_admin_user():
|
|
||||||
raise Fatal('You must have root privileges (or enable su/sudo) to set the firewall')
|
|
||||||
|
|
||||||
# don't disappear if our controlling terminal or stdout/stderr
|
|
||||||
# disappears; we still have to clean up.
|
|
||||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
|
||||||
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
|
|
||||||
signal.signal(signal.SIGTERM, firewall_exit)
|
|
||||||
signal.signal(signal.SIGINT, firewall_exit)
|
|
||||||
|
|
||||||
# Calling setsid() here isn't strictly necessary. However, it forces
|
|
||||||
# Ctrl+C to get sent to the main sshuttle process instead of to
|
|
||||||
# the firewall process---which is our preferred way to shutdown.
|
|
||||||
# Nonetheless, if the firewall process receives a SIGTERM/SIGINT
|
|
||||||
# signal, it will relay a SIGINT to the main sshuttle process
|
|
||||||
# automatically.
|
|
||||||
try:
|
|
||||||
os.setsid()
|
|
||||||
except OSError:
|
|
||||||
# setsid() fails if sudo is configured with the use_pty option.
|
|
||||||
pass
|
|
||||||
|
|
||||||
return sys.stdin.buffer, sys.stdout.buffer
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_daemon_for_windows():
|
|
||||||
if not is_admin_user():
|
|
||||||
raise Fatal('You must be administrator to set the firewall')
|
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, firewall_exit)
|
|
||||||
signal.signal(signal.SIGINT, firewall_exit)
|
|
||||||
|
|
||||||
com_chan = os.environ.get('SSHUTTLE_FW_COM_CHANNEL')
|
|
||||||
if com_chan == 'stdio':
|
|
||||||
debug3('Using inherited stdio for communicating with sshuttle client process')
|
|
||||||
else:
|
|
||||||
debug3('Using shared socket for communicating with sshuttle client process')
|
|
||||||
socket_share_data = base64.b64decode(com_chan)
|
|
||||||
sock = socket.fromshare(socket_share_data) # type: socket.socket
|
|
||||||
sys.stdin = io.TextIOWrapper(sock.makefile('rb', buffering=0))
|
|
||||||
sys.stdout = io.TextIOWrapper(sock.makefile('wb', buffering=0), write_through=True)
|
|
||||||
sock.close()
|
|
||||||
return sys.stdin.buffer, sys.stdout.buffer
|
|
||||||
|
|
||||||
|
|
||||||
# Isolate function that needs to be replaced for tests
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
setup_daemon = _setup_daemon_for_windows
|
|
||||||
else:
|
|
||||||
setup_daemon = _setup_daemon_for_unix_like
|
|
||||||
|
|
||||||
|
|
||||||
# Note that we're sorting in a very particular order:
|
|
||||||
# we need to go from smaller, more specific, port ranges, to larger,
|
|
||||||
# less-specific, port ranges. At each level, we order by subnet
|
|
||||||
# width, from most-specific subnets (largest swidth) to
|
|
||||||
# least-specific. On ties, excludes come first.
|
|
||||||
# s:(inet, subnet width, exclude flag, subnet, first port, last port)
|
|
||||||
def subnet_weight(s):
|
|
||||||
return (-s[-1] + (s[-2] or -65535), s[1], s[2])
|
|
||||||
|
|
||||||
|
|
||||||
def flush_systemd_dns_cache():
|
|
||||||
# If the user is using systemd-resolve for DNS resolution, it is
|
|
||||||
# possible for the request to go through systemd-resolve before we
|
|
||||||
# see it...and it may use a cached result instead of sending a
|
|
||||||
# request that we can intercept. When sshuttle starts and stops,
|
|
||||||
# this means that we should clear the cache!
|
|
||||||
#
|
|
||||||
# The command to do this was named systemd-resolve, but changed to
|
|
||||||
# resolvectl in systemd 239.
|
|
||||||
# https://github.com/systemd/systemd/blob/f8eb41003df1a4eab59ff9bec67b2787c9368dbd/NEWS#L3816
|
|
||||||
|
|
||||||
p = None
|
|
||||||
if helpers.which("resolvectl"):
|
|
||||||
debug2("Flushing systemd's DNS resolver cache: "
|
|
||||||
"resolvectl flush-caches")
|
|
||||||
p = ssubprocess.Popen(["resolvectl", "flush-caches"],
|
|
||||||
stdout=ssubprocess.PIPE, env=helpers.get_env())
|
|
||||||
elif helpers.which("systemd-resolve"):
|
|
||||||
debug2("Flushing systemd's DNS resolver cache: "
|
|
||||||
"systemd-resolve --flush-caches")
|
|
||||||
p = ssubprocess.Popen(["systemd-resolve", "--flush-caches"],
|
|
||||||
stdout=ssubprocess.PIPE, env=helpers.get_env())
|
|
||||||
|
|
||||||
if p:
|
|
||||||
# Wait so flush is finished and process doesn't show up as defunct.
|
|
||||||
rv = p.wait()
|
|
||||||
if rv != 0:
|
|
||||||
log("Received non-zero return code %d when flushing DNS resolver "
|
|
||||||
"cache." % rv)
|
|
||||||
|
|
||||||
|
|
||||||
# This is some voodoo for setting up the kernel's transparent
|
|
||||||
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
|
|
||||||
# otherwise we delete it, then make them from scratch.
|
|
||||||
#
|
|
||||||
# This code is supposed to clean up after itself by deleting its rules on
|
|
||||||
# exit. In case that fails, it's not the end of the world; future runs will
|
|
||||||
# supersede it in the transproxy list, at least, so the leftover rules
|
|
||||||
# are hopefully harmless.
|
|
||||||
def main(method_name, syslog):
|
|
||||||
helpers.logprefix = 'fw: '
|
|
||||||
stdin, stdout = setup_daemon()
|
|
||||||
hostmap = {}
|
|
||||||
debug1('Starting firewall with Python version %s'
|
|
||||||
% platform.python_version())
|
|
||||||
|
|
||||||
if method_name == "auto":
|
|
||||||
method = get_auto_method()
|
|
||||||
else:
|
|
||||||
method = get_method(method_name)
|
|
||||||
|
|
||||||
if syslog:
|
|
||||||
ssyslog.start_syslog()
|
|
||||||
ssyslog.stderr_to_syslog()
|
|
||||||
|
|
||||||
if not method.is_supported():
|
|
||||||
raise Fatal("The %s method is not supported on this machine. "
|
|
||||||
"Check that the appropriate programs are in your "
|
|
||||||
"PATH." % method_name)
|
|
||||||
|
|
||||||
debug1('ready method name %s.' % method.name)
|
|
||||||
stdout.write(('READY %s\n' % method.name).encode('ASCII'))
|
|
||||||
stdout.flush()
|
|
||||||
|
|
||||||
def _read_next_string_line():
|
|
||||||
try:
|
|
||||||
line = stdin.readline(128)
|
|
||||||
if not line:
|
|
||||||
return # parent probably exited
|
|
||||||
return line.decode('ASCII').strip()
|
|
||||||
except IOError as e:
|
|
||||||
# On windows, ConnectionResetError is thrown when parent process closes it's socket pair end
|
|
||||||
debug3('read from stdin failed: %s' % (e,))
|
|
||||||
return
|
|
||||||
# we wait until we get some input before creating the rules. That way,
|
|
||||||
# sshuttle can launch us as early as possible (and get sudo password
|
|
||||||
# authentication as early in the startup process as possible).
|
|
||||||
try:
|
|
||||||
line = _read_next_string_line()
|
|
||||||
if not line:
|
|
||||||
return # parent probably exited
|
|
||||||
except IOError as e:
|
|
||||||
# On windows, ConnectionResetError is thrown when parent process closes it's socket pair end
|
|
||||||
debug3('read from stdin failed: %s' % (e,))
|
|
||||||
return
|
|
||||||
|
|
||||||
subnets = []
|
|
||||||
if line != 'ROUTES':
|
|
||||||
raise Fatal('expected ROUTES but got %r' % line)
|
|
||||||
while 1:
|
|
||||||
line = _read_next_string_line()
|
|
||||||
if not line:
|
|
||||||
raise Fatal('expected route but got %r' % line)
|
|
||||||
elif line.startswith("NSLIST"):
|
|
||||||
break
|
|
||||||
try:
|
|
||||||
(family, width, exclude, ip, fport, lport) = line.split(',', 5)
|
|
||||||
except Exception:
|
|
||||||
raise Fatal('expected route or NSLIST but got %r' % line)
|
|
||||||
subnets.append((
|
|
||||||
int(family),
|
|
||||||
int(width),
|
|
||||||
bool(int(exclude)),
|
|
||||||
ip,
|
|
||||||
int(fport),
|
|
||||||
int(lport)))
|
|
||||||
debug2('Got subnets: %r' % subnets)
|
|
||||||
|
|
||||||
nslist = []
|
|
||||||
if line != 'NSLIST':
|
|
||||||
raise Fatal('expected NSLIST but got %r' % line)
|
|
||||||
while 1:
|
|
||||||
line = _read_next_string_line()
|
|
||||||
if not line:
|
|
||||||
raise Fatal('expected nslist but got %r' % line)
|
|
||||||
elif line.startswith("PORTS "):
|
|
||||||
break
|
|
||||||
try:
|
|
||||||
(family, ip) = line.split(',', 1)
|
|
||||||
except Exception:
|
|
||||||
raise Fatal('expected nslist or PORTS but got %r' % line)
|
|
||||||
nslist.append((int(family), ip))
|
|
||||||
debug2('Got partial nslist: %r' % nslist)
|
|
||||||
debug2('Got nslist: %r' % nslist)
|
|
||||||
|
|
||||||
if not line.startswith('PORTS '):
|
|
||||||
raise Fatal('expected PORTS but got %r' % line)
|
|
||||||
_, _, ports = line.partition(" ")
|
|
||||||
ports = ports.split(",")
|
|
||||||
if len(ports) != 4:
|
|
||||||
raise Fatal('expected 4 ports but got %d' % len(ports))
|
|
||||||
port_v6 = int(ports[0])
|
|
||||||
port_v4 = int(ports[1])
|
|
||||||
dnsport_v6 = int(ports[2])
|
|
||||||
dnsport_v4 = int(ports[3])
|
|
||||||
|
|
||||||
assert port_v6 >= 0
|
|
||||||
assert port_v6 <= 65535
|
|
||||||
assert port_v4 >= 0
|
|
||||||
assert port_v4 <= 65535
|
|
||||||
assert dnsport_v6 >= 0
|
|
||||||
assert dnsport_v6 <= 65535
|
|
||||||
assert dnsport_v4 >= 0
|
|
||||||
assert dnsport_v4 <= 65535
|
|
||||||
|
|
||||||
debug2('Got ports: %d,%d,%d,%d'
|
|
||||||
% (port_v6, port_v4, dnsport_v6, dnsport_v4))
|
|
||||||
|
|
||||||
line = _read_next_string_line()
|
|
||||||
if not line or not line.startswith("GO "):
|
|
||||||
raise Fatal('expected GO but got %r' % line)
|
|
||||||
|
|
||||||
_, _, args = line.partition(" ")
|
|
||||||
global sshuttle_pid
|
|
||||||
udp, user, group, tmark, sshuttle_pid = args.split(" ", 4)
|
|
||||||
udp = bool(int(udp))
|
|
||||||
sshuttle_pid = int(sshuttle_pid)
|
|
||||||
if user == '-':
|
|
||||||
user = None
|
|
||||||
if group == '-':
|
|
||||||
group = None
|
|
||||||
debug2('Got udp: %r, user: %r, group: %r, tmark: %s, sshuttle_pid: %d' %
|
|
||||||
(udp, user, group, tmark, sshuttle_pid))
|
|
||||||
|
|
||||||
subnets_v6 = [i for i in subnets if i[0] == socket.AF_INET6]
|
|
||||||
nslist_v6 = [i for i in nslist if i[0] == socket.AF_INET6]
|
|
||||||
subnets_v4 = [i for i in subnets if i[0] == socket.AF_INET]
|
|
||||||
nslist_v4 = [i for i in nslist if i[0] == socket.AF_INET]
|
|
||||||
|
|
||||||
try:
|
|
||||||
debug1('setting up.')
|
|
||||||
|
|
||||||
if subnets_v6 or nslist_v6:
|
|
||||||
debug2('setting up IPv6.')
|
|
||||||
method.setup_firewall(
|
|
||||||
port_v6, dnsport_v6, nslist_v6,
|
|
||||||
socket.AF_INET6, subnets_v6, udp,
|
|
||||||
user, group, tmark)
|
|
||||||
|
|
||||||
if subnets_v4 or nslist_v4:
|
|
||||||
debug2('setting up IPv4.')
|
|
||||||
method.setup_firewall(
|
|
||||||
port_v4, dnsport_v4, nslist_v4,
|
|
||||||
socket.AF_INET, subnets_v4, udp,
|
|
||||||
user, group, tmark)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# For some methods (eg: windivert) firewall setup will be differed / will run asynchronously.
|
|
||||||
# Such method implements wait_for_firewall_ready() to wait until firewall is up and running.
|
|
||||||
method.wait_for_firewall_ready(sshuttle_pid)
|
|
||||||
except NotImplementedError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if sys.platform == 'linux':
|
|
||||||
flush_systemd_dns_cache()
|
|
||||||
|
|
||||||
try:
|
|
||||||
stdout.write(b'STARTED\n')
|
|
||||||
stdout.flush()
|
|
||||||
except IOError as e: # the parent process probably died
|
|
||||||
debug3('write to stdout failed: %s' % (e,))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Now we wait until EOF or any other kind of exception. We need
|
|
||||||
# to stay running so that we don't need a *second* password
|
|
||||||
# authentication at shutdown time - that cleanup is important!
|
|
||||||
while 1:
|
|
||||||
line = _read_next_string_line()
|
|
||||||
if not line:
|
|
||||||
return
|
|
||||||
if line.startswith('HOST '):
|
|
||||||
(name, ip) = line[5:].split(',', 1)
|
|
||||||
hostmap[name] = ip
|
|
||||||
debug2('setting up /etc/hosts.')
|
|
||||||
rewrite_etc_hosts(hostmap, port_v6 or port_v4)
|
|
||||||
elif line:
|
|
||||||
if not method.firewall_command(line):
|
|
||||||
raise Fatal('expected command, got %r' % line)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
finally:
|
|
||||||
try:
|
|
||||||
debug1('undoing changes.')
|
|
||||||
except Exception:
|
|
||||||
debug2('An error occurred, ignoring it.')
|
|
||||||
|
|
||||||
try:
|
|
||||||
if subnets_v6 or nslist_v6:
|
|
||||||
debug2('undoing IPv6 changes.')
|
|
||||||
method.restore_firewall(port_v6, socket.AF_INET6, udp, user, group)
|
|
||||||
except Exception:
|
|
||||||
try:
|
|
||||||
debug1("Error trying to undo IPv6 firewall.")
|
|
||||||
debug1(traceback.format_exc())
|
|
||||||
except Exception:
|
|
||||||
debug2('An error occurred, ignoring it.')
|
|
||||||
|
|
||||||
try:
|
|
||||||
if subnets_v4 or nslist_v4:
|
|
||||||
debug2('undoing IPv4 changes.')
|
|
||||||
method.restore_firewall(port_v4, socket.AF_INET, udp, user, group)
|
|
||||||
except Exception:
|
|
||||||
try:
|
|
||||||
debug1("Error trying to undo IPv4 firewall.")
|
|
||||||
debug1(traceback.format_exc())
|
|
||||||
except Exception:
|
|
||||||
debug2('An error occurred, ignoring it.')
|
|
||||||
|
|
||||||
try:
|
|
||||||
# debug2() message printed in restore_etc_hosts() function.
|
|
||||||
restore_etc_hosts(hostmap, port_v6 or port_v4)
|
|
||||||
except Exception:
|
|
||||||
try:
|
|
||||||
debug1("Error trying to undo /etc/hosts changes.")
|
|
||||||
debug1(traceback.format_exc())
|
|
||||||
except Exception:
|
|
||||||
debug2('An error occurred, ignoring it.')
|
|
||||||
|
|
||||||
if sys.platform == 'linux':
|
|
||||||
try:
|
|
||||||
flush_systemd_dns_cache()
|
|
||||||
except Exception:
|
|
||||||
try:
|
|
||||||
debug1("Error trying to flush systemd dns cache.")
|
|
||||||
debug1(traceback.format_exc())
|
|
||||||
except Exception:
|
|
||||||
debug2("An error occurred, ignoring it.")
|
|
@ -1,349 +0,0 @@
|
|||||||
import sys
|
|
||||||
import socket
|
|
||||||
import errno
|
|
||||||
import os
|
|
||||||
import threading
|
|
||||||
import subprocess
|
|
||||||
import traceback
|
|
||||||
import re
|
|
||||||
|
|
||||||
if sys.platform != "win32":
|
|
||||||
import fcntl
|
|
||||||
|
|
||||||
logprefix = ''
|
|
||||||
verbose = 0
|
|
||||||
|
|
||||||
|
|
||||||
def b(s):
|
|
||||||
return s.encode("ASCII")
|
|
||||||
|
|
||||||
|
|
||||||
def get_verbose_level():
|
|
||||||
return verbose
|
|
||||||
|
|
||||||
|
|
||||||
def log(s):
|
|
||||||
global logprefix
|
|
||||||
try:
|
|
||||||
sys.stdout.flush()
|
|
||||||
except (IOError, ValueError): # ValueError ~ I/O operation on closed file
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
# Put newline at end of string if line doesn't have one.
|
|
||||||
if not s.endswith("\n"):
|
|
||||||
s = s+"\n"
|
|
||||||
|
|
||||||
prefix = logprefix
|
|
||||||
s = s.rstrip("\n")
|
|
||||||
for line in s.split("\n"):
|
|
||||||
sys.stderr.write(prefix + line + "\n")
|
|
||||||
prefix = " "
|
|
||||||
sys.stderr.flush()
|
|
||||||
except (IOError, ValueError): # ValueError ~ I/O operation on closed file
|
|
||||||
# this could happen if stderr gets forcibly disconnected, eg. because
|
|
||||||
# our tty closes. That sucks, but it's no reason to abort the program.
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def debug1(s):
|
|
||||||
if verbose >= 1:
|
|
||||||
log(s)
|
|
||||||
|
|
||||||
|
|
||||||
def debug2(s):
|
|
||||||
if verbose >= 2:
|
|
||||||
log(s)
|
|
||||||
|
|
||||||
|
|
||||||
def debug3(s):
|
|
||||||
if verbose >= 3:
|
|
||||||
log(s)
|
|
||||||
|
|
||||||
|
|
||||||
class Fatal(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def resolvconf_nameservers(systemd_resolved):
|
|
||||||
"""Retrieves a list of tuples (address type, address as a string) of
|
|
||||||
the DNS servers used by the system to resolve hostnames.
|
|
||||||
|
|
||||||
If parameter is False, DNS servers are retrieved from only
|
|
||||||
/etc/resolv.conf. This behavior makes sense for the sshuttle
|
|
||||||
server.
|
|
||||||
|
|
||||||
If parameter is True, we retrieve information from both
|
|
||||||
/etc/resolv.conf and /run/systemd/resolve/resolv.conf (if it
|
|
||||||
exists). This behavior makes sense for the sshuttle client.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Historically, we just needed to read /etc/resolv.conf.
|
|
||||||
#
|
|
||||||
# If systemd-resolved is active, /etc/resolv.conf will point to
|
|
||||||
# localhost and the actual DNS servers that systemd-resolved uses
|
|
||||||
# are stored in /run/systemd/resolve/resolv.conf. For programs
|
|
||||||
# that use the localhost DNS server, having sshuttle read
|
|
||||||
# /etc/resolv.conf is sufficient. However, resolved provides other
|
|
||||||
# ways of resolving hostnames (such as via dbus) that may not
|
|
||||||
# route requests through localhost. So, we retrieve a list of DNS
|
|
||||||
# servers that resolved uses so we can intercept those as well.
|
|
||||||
#
|
|
||||||
# For more information about systemd-resolved, see:
|
|
||||||
# https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html
|
|
||||||
#
|
|
||||||
# On machines without systemd-resolved, we expect opening the
|
|
||||||
# second file will fail.
|
|
||||||
files = ['/etc/resolv.conf']
|
|
||||||
if systemd_resolved:
|
|
||||||
files += ['/run/systemd/resolve/resolv.conf']
|
|
||||||
|
|
||||||
nsservers = []
|
|
||||||
for f in files:
|
|
||||||
this_file_nsservers = []
|
|
||||||
try:
|
|
||||||
for line in open(f):
|
|
||||||
words = line.lower().split()
|
|
||||||
if len(words) >= 2 and words[0] == 'nameserver':
|
|
||||||
this_file_nsservers.append(family_ip_tuple(words[1]))
|
|
||||||
debug2("Found DNS servers in %s: %s" %
|
|
||||||
(f, [n[1] for n in this_file_nsservers]))
|
|
||||||
nsservers += this_file_nsservers
|
|
||||||
except OSError as e:
|
|
||||||
debug3("Failed to read %s when looking for DNS servers: %s" %
|
|
||||||
(f, e.strerror))
|
|
||||||
|
|
||||||
return nsservers
|
|
||||||
|
|
||||||
|
|
||||||
def windows_nameservers():
|
|
||||||
out = subprocess.check_output(["powershell", "-NonInteractive", "-NoProfile", "-Command", "Get-DnsClientServerAddress"],
|
|
||||||
encoding="utf-8")
|
|
||||||
servers = set()
|
|
||||||
for line in out.splitlines():
|
|
||||||
if line.startswith("Loopback "):
|
|
||||||
continue
|
|
||||||
m = re.search(r'{.+}', line)
|
|
||||||
if not m:
|
|
||||||
continue
|
|
||||||
for s in m.group().strip('{}').split(','):
|
|
||||||
s = s.strip()
|
|
||||||
if s.startswith('fec0:0:0:ffff'):
|
|
||||||
continue
|
|
||||||
servers.add(s)
|
|
||||||
debug2("Found DNS servers: %s" % servers)
|
|
||||||
return [(socket.AF_INET6 if ':' in s else socket.AF_INET, s) for s in servers]
|
|
||||||
|
|
||||||
|
|
||||||
def get_random_nameserver():
|
|
||||||
"""Return a random nameserver selected from servers produced by
|
|
||||||
resolvconf_nameservers()/windows_nameservers()
|
|
||||||
"""
|
|
||||||
if sys.platform == "win32":
|
|
||||||
if globals().get('_nameservers') is None:
|
|
||||||
ns_list = windows_nameservers()
|
|
||||||
globals()['_nameservers'] = ns_list
|
|
||||||
else:
|
|
||||||
ns_list = globals()['_nameservers']
|
|
||||||
else:
|
|
||||||
ns_list = resolvconf_nameservers(systemd_resolved=False)
|
|
||||||
if ns_list:
|
|
||||||
if len(ns_list) > 1:
|
|
||||||
# don't import this unless we really need it
|
|
||||||
import random
|
|
||||||
random.shuffle(ns_list)
|
|
||||||
return ns_list[0]
|
|
||||||
else:
|
|
||||||
return (socket.AF_INET, '127.0.0.1')
|
|
||||||
|
|
||||||
|
|
||||||
def islocal(ip, family):
|
|
||||||
sock = socket.socket(family)
|
|
||||||
try:
|
|
||||||
try:
|
|
||||||
sock.bind((ip, 0))
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.args[0] == errno.EADDRNOTAVAIL:
|
|
||||||
return False # not a local IP
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
sock.close()
|
|
||||||
return True # it's a local IP, or there would have been an error
|
|
||||||
|
|
||||||
|
|
||||||
def family_ip_tuple(ip):
|
|
||||||
if ':' in ip:
|
|
||||||
return (socket.AF_INET6, ip)
|
|
||||||
else:
|
|
||||||
return (socket.AF_INET, ip)
|
|
||||||
|
|
||||||
|
|
||||||
def family_to_string(family):
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
return "AF_INET6"
|
|
||||||
elif family == socket.AF_INET:
|
|
||||||
return "AF_INET"
|
|
||||||
else:
|
|
||||||
return str(family)
|
|
||||||
|
|
||||||
|
|
||||||
def get_env():
|
|
||||||
"""An environment for sshuttle subprocesses. See get_path()."""
|
|
||||||
env = {
|
|
||||||
'PATH': get_path(),
|
|
||||||
'LC_ALL': "C",
|
|
||||||
}
|
|
||||||
return env
|
|
||||||
|
|
||||||
|
|
||||||
def get_path():
|
|
||||||
"""Returns a string of paths separated by os.pathsep.
|
|
||||||
|
|
||||||
Users might not have all of the programs sshuttle needs in their
|
|
||||||
PATH variable (i.e., some programs might be in /sbin). Use PATH
|
|
||||||
and a hardcoded set of paths to search through. This function is
|
|
||||||
used by our which() and get_env() functions. If which() and the
|
|
||||||
subprocess environments differ, programs that which() finds might
|
|
||||||
not be found at run time (or vice versa).
|
|
||||||
"""
|
|
||||||
path = []
|
|
||||||
if "PATH" in os.environ:
|
|
||||||
path += os.environ["PATH"].split(os.pathsep)
|
|
||||||
# Python default paths.
|
|
||||||
path += os.defpath.split(os.pathsep)
|
|
||||||
# /sbin, etc are not in os.defpath and may not be in PATH either.
|
|
||||||
# /bin/ and /usr/bin below are probably redundant.
|
|
||||||
path += ['/bin', '/usr/bin', '/sbin', '/usr/sbin']
|
|
||||||
|
|
||||||
# Remove duplicates. Not strictly necessary.
|
|
||||||
path_dedup = []
|
|
||||||
for i in path:
|
|
||||||
if i not in path_dedup:
|
|
||||||
path_dedup.append(i)
|
|
||||||
|
|
||||||
return os.pathsep.join(path_dedup)
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 3):
|
|
||||||
from shutil import which as _which
|
|
||||||
else:
|
|
||||||
# Although sshuttle does not officially support older versions of
|
|
||||||
# Python, some still run the sshuttle server on remote machines
|
|
||||||
# with old versions of python.
|
|
||||||
def _which(file, mode=os.F_OK | os.X_OK, path=None):
|
|
||||||
if path is not None:
|
|
||||||
search_paths = path.split(os.pathsep)
|
|
||||||
elif "PATH" in os.environ:
|
|
||||||
search_paths = os.environ["PATH"].split(os.pathsep)
|
|
||||||
else:
|
|
||||||
search_paths = os.defpath.split(os.pathsep)
|
|
||||||
|
|
||||||
for p in search_paths:
|
|
||||||
filepath = os.path.join(p, file)
|
|
||||||
if os.path.exists(filepath) and os.access(filepath, mode):
|
|
||||||
return filepath
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def which(file, mode=os.F_OK | os.X_OK):
|
|
||||||
"""A wrapper around shutil.which() that searches a predictable set of
|
|
||||||
paths and is more verbose about what is happening. See get_path()
|
|
||||||
for more information.
|
|
||||||
"""
|
|
||||||
path = get_path()
|
|
||||||
rv = _which(file, mode, path)
|
|
||||||
if rv:
|
|
||||||
debug2("which() found '%s' at %s" % (file, rv))
|
|
||||||
else:
|
|
||||||
debug2("which() could not find '%s' in %s" % (file, path))
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
def is_admin_user():
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
# https://stackoverflow.com/questions/130763/request-uac-elevation-from-within-a-python-script/41930586#41930586
|
|
||||||
import ctypes
|
|
||||||
try:
|
|
||||||
return ctypes.windll.shell32.IsUserAnAdmin()
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# TODO(nom3ad): for sys.platform == 'linux', check capabilities for non-root users. (CAP_NET_ADMIN might be enough?)
|
|
||||||
return os.getuid() == 0
|
|
||||||
|
|
||||||
|
|
||||||
def set_non_blocking_io(fd):
|
|
||||||
if sys.platform != "win32":
|
|
||||||
try:
|
|
||||||
os.set_blocking(fd, False)
|
|
||||||
except AttributeError:
|
|
||||||
# python < 3.5
|
|
||||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
|
||||||
flags |= os.O_NONBLOCK
|
|
||||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
|
||||||
else:
|
|
||||||
_sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
_sock.setblocking(False)
|
|
||||||
|
|
||||||
|
|
||||||
class RWPair:
|
|
||||||
def __init__(self, r, w):
|
|
||||||
self.r = r
|
|
||||||
self.w = w
|
|
||||||
self.read = r.read
|
|
||||||
self.readline = r.readline
|
|
||||||
self.write = w.write
|
|
||||||
self.flush = w.flush
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
for f in self.r, self.w:
|
|
||||||
try:
|
|
||||||
f.close()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class SocketRWShim:
|
|
||||||
__slots__ = ('_r', '_w', '_on_end', '_s1', '_s2', '_t1', '_t2')
|
|
||||||
|
|
||||||
def __init__(self, r, w, on_end=None):
|
|
||||||
self._r = r
|
|
||||||
self._w = w
|
|
||||||
self._on_end = on_end
|
|
||||||
|
|
||||||
self._s1, self._s2 = socket.socketpair()
|
|
||||||
debug3("[SocketShim] r=%r w=%r | s1=%r s2=%r" % (self._r, self._w, self._s1, self._s2))
|
|
||||||
|
|
||||||
def stream_reader_to_sock():
|
|
||||||
try:
|
|
||||||
for data in iter(lambda: self._r.read(16384), b''):
|
|
||||||
self._s1.sendall(data)
|
|
||||||
# debug3("[SocketRWShim] <<<<< r.read() %d %r..." % (len(data), data[:min(32, len(data))]))
|
|
||||||
except Exception:
|
|
||||||
traceback.print_exc(file=sys.stderr)
|
|
||||||
finally:
|
|
||||||
debug2("[SocketRWShim] Thread 'stream_reader_to_sock' exiting")
|
|
||||||
self._s1.close()
|
|
||||||
self._on_end and self._on_end()
|
|
||||||
|
|
||||||
def stream_sock_to_writer():
|
|
||||||
try:
|
|
||||||
for data in iter(lambda: self._s1.recv(16384), b''):
|
|
||||||
while data:
|
|
||||||
n = self._w.write(data)
|
|
||||||
data = data[n:]
|
|
||||||
# debug3("[SocketRWShim] <<<<< w.write() %d %r..." % (len(data), data[:min(32, len(data))]))
|
|
||||||
except Exception:
|
|
||||||
traceback.print_exc(file=sys.stderr)
|
|
||||||
finally:
|
|
||||||
debug2("[SocketRWShim] Thread 'stream_sock_to_writer' exiting")
|
|
||||||
self._s1.close()
|
|
||||||
self._on_end and self._on_end()
|
|
||||||
|
|
||||||
self._t1 = threading.Thread(target=stream_reader_to_sock, name='stream_reader_to_sock', daemon=True).start()
|
|
||||||
self._t2 = threading.Thread(target=stream_sock_to_writer, name='stream_sock_to_writer', daemon=True).start()
|
|
||||||
|
|
||||||
def makefiles(self):
|
|
||||||
return self._s2.makefile("rb", buffering=0), self._s2.makefile("wb", buffering=0)
|
|
@ -1,250 +0,0 @@
|
|||||||
import time
|
|
||||||
import socket
|
|
||||||
import re
|
|
||||||
import select
|
|
||||||
import errno
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
import sshuttle.helpers as helpers
|
|
||||||
from sshuttle.helpers import log, debug1, debug2, debug3, get_env
|
|
||||||
|
|
||||||
POLL_TIME = 60 * 15
|
|
||||||
NETSTAT_POLL_TIME = 30
|
|
||||||
CACHEFILE = os.path.expanduser('~/.sshuttle.hosts')
|
|
||||||
|
|
||||||
# Have we already failed to write CACHEFILE?
|
|
||||||
CACHE_WRITE_FAILED = False
|
|
||||||
|
|
||||||
SHOULD_WRITE_CACHE = False
|
|
||||||
|
|
||||||
hostnames = {}
|
|
||||||
queue = {}
|
|
||||||
try:
|
|
||||||
null = open(os.devnull, 'wb')
|
|
||||||
except IOError:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
log('warning: %s' % e)
|
|
||||||
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_ip(s):
|
|
||||||
return re.match(r'\d+\.\d+\.\d+\.\d+$', s)
|
|
||||||
|
|
||||||
|
|
||||||
def write_host_cache():
|
|
||||||
"""If possible, write our hosts file to disk so future connections
|
|
||||||
can reuse the hosts that we already found."""
|
|
||||||
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
|
|
||||||
global CACHE_WRITE_FAILED
|
|
||||||
try:
|
|
||||||
f = open(tmpname, 'wb')
|
|
||||||
for name, ip in sorted(hostnames.items()):
|
|
||||||
f.write(('%s,%s\n' % (name, ip)).encode("ASCII"))
|
|
||||||
f.close()
|
|
||||||
os.chmod(tmpname, 384) # 600 in octal, 'rw-------'
|
|
||||||
os.rename(tmpname, CACHEFILE)
|
|
||||||
CACHE_WRITE_FAILED = False
|
|
||||||
except (OSError, IOError):
|
|
||||||
# Write message if we haven't yet or if we get a failure after
|
|
||||||
# a previous success.
|
|
||||||
if not CACHE_WRITE_FAILED:
|
|
||||||
log("Failed to write host cache to temporary file "
|
|
||||||
"%s and rename it to %s" % (tmpname, CACHEFILE))
|
|
||||||
CACHE_WRITE_FAILED = True
|
|
||||||
|
|
||||||
try:
|
|
||||||
os.unlink(tmpname)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def read_host_cache():
|
|
||||||
"""If possible, read the cache file from disk to populate hosts that
|
|
||||||
were found in a previous sshuttle run."""
|
|
||||||
try:
|
|
||||||
f = open(CACHEFILE)
|
|
||||||
except (OSError, IOError):
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
log("Failed to read existing host cache file %s on remote host"
|
|
||||||
% CACHEFILE)
|
|
||||||
return
|
|
||||||
for line in f:
|
|
||||||
words = line.strip().split(',')
|
|
||||||
if len(words) == 2:
|
|
||||||
(name, ip) = words
|
|
||||||
name = re.sub(r'[^-\w\.]', '-', name).strip()
|
|
||||||
# Remove characters that shouldn't be in IP
|
|
||||||
ip = re.sub(r'[^0-9.]', '', ip).strip()
|
|
||||||
if name and ip:
|
|
||||||
found_host(name, ip)
|
|
||||||
f.close()
|
|
||||||
global SHOULD_WRITE_CACHE
|
|
||||||
if SHOULD_WRITE_CACHE:
|
|
||||||
write_host_cache()
|
|
||||||
SHOULD_WRITE_CACHE = False
|
|
||||||
|
|
||||||
|
|
||||||
def found_host(name, ip):
|
|
||||||
"""The provided name maps to the given IP. Add the host to the
|
|
||||||
hostnames list, send the host to the sshuttle client via
|
|
||||||
stdout, and write the host to the cache file.
|
|
||||||
"""
|
|
||||||
hostname = re.sub(r'\..*', '', name)
|
|
||||||
hostname = re.sub(r'[^-\w\.]', '_', hostname)
|
|
||||||
if (ip.startswith('127.') or ip.startswith('255.') or
|
|
||||||
hostname == 'localhost'):
|
|
||||||
return
|
|
||||||
|
|
||||||
if hostname != name:
|
|
||||||
found_host(hostname, ip)
|
|
||||||
|
|
||||||
global SHOULD_WRITE_CACHE
|
|
||||||
oldip = hostnames.get(name)
|
|
||||||
if oldip != ip:
|
|
||||||
hostnames[name] = ip
|
|
||||||
debug1('Found: %s: %s' % (name, ip))
|
|
||||||
sys.stdout.write('%s,%s\n' % (name, ip))
|
|
||||||
SHOULD_WRITE_CACHE = True
|
|
||||||
|
|
||||||
|
|
||||||
def _check_etc_hosts():
|
|
||||||
"""If possible, read /etc/hosts to find hosts."""
|
|
||||||
filename = '/etc/hosts'
|
|
||||||
debug2(' > Reading %s on remote host' % filename)
|
|
||||||
try:
|
|
||||||
for line in open(filename):
|
|
||||||
line = re.sub(r'#.*', '', line) # remove comments
|
|
||||||
words = line.strip().split()
|
|
||||||
if not words:
|
|
||||||
continue
|
|
||||||
ip = words[0]
|
|
||||||
if _is_ip(ip):
|
|
||||||
names = words[1:]
|
|
||||||
debug3('< %s %r' % (ip, names))
|
|
||||||
for n in names:
|
|
||||||
check_host(n)
|
|
||||||
found_host(n, ip)
|
|
||||||
except (OSError, IOError):
|
|
||||||
debug1("Failed to read %s on remote host" % filename)
|
|
||||||
|
|
||||||
|
|
||||||
def _check_revdns(ip):
|
|
||||||
"""Use reverse DNS to try to get hostnames from an IP addresses."""
|
|
||||||
debug2(' > rev: %s' % ip)
|
|
||||||
try:
|
|
||||||
r = socket.gethostbyaddr(ip)
|
|
||||||
debug3('< %s' % r[0])
|
|
||||||
check_host(r[0])
|
|
||||||
found_host(r[0], ip)
|
|
||||||
except (OSError, socket.error, UnicodeError):
|
|
||||||
# This case is expected to occur regularly.
|
|
||||||
# debug3('< %s gethostbyaddr failed on remote host' % ip)
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def _check_dns(hostname):
|
|
||||||
debug2(' > dns: %s' % hostname)
|
|
||||||
try:
|
|
||||||
ip = socket.gethostbyname(hostname)
|
|
||||||
debug3('< %s' % ip)
|
|
||||||
check_host(ip)
|
|
||||||
found_host(hostname, ip)
|
|
||||||
except (socket.gaierror, UnicodeError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def _check_netstat():
|
|
||||||
debug2(' > netstat')
|
|
||||||
argv = ['netstat', '-n']
|
|
||||||
try:
|
|
||||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null,
|
|
||||||
env=get_env())
|
|
||||||
content = p.stdout.read().decode("ASCII")
|
|
||||||
p.wait()
|
|
||||||
except OSError:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
log('%r failed: %r' % (argv, e))
|
|
||||||
return
|
|
||||||
|
|
||||||
# The same IPs may appear multiple times. Consolidate them so the
|
|
||||||
# debug message doesn't print the same IP repeatedly.
|
|
||||||
ip_list = []
|
|
||||||
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
|
|
||||||
if ip not in ip_list:
|
|
||||||
ip_list.append(ip)
|
|
||||||
|
|
||||||
for ip in sorted(ip_list):
|
|
||||||
debug3('< %s' % ip)
|
|
||||||
check_host(ip)
|
|
||||||
|
|
||||||
|
|
||||||
def check_host(hostname):
|
|
||||||
if _is_ip(hostname):
|
|
||||||
_enqueue(_check_revdns, hostname)
|
|
||||||
else:
|
|
||||||
_enqueue(_check_dns, hostname)
|
|
||||||
|
|
||||||
|
|
||||||
def _enqueue(op, *args):
|
|
||||||
t = (op, args)
|
|
||||||
if queue.get(t) is None:
|
|
||||||
queue[t] = 0
|
|
||||||
|
|
||||||
|
|
||||||
def _stdin_still_ok(timeout):
|
|
||||||
r, _, _ = select.select([sys.stdin.fileno()], [], [], timeout)
|
|
||||||
if r:
|
|
||||||
b = os.read(sys.stdin.fileno(), 4096)
|
|
||||||
if not b:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def hw_main(seed_hosts, auto_hosts):
|
|
||||||
helpers.logprefix = 'HH: '
|
|
||||||
|
|
||||||
debug1('Starting hostwatch with Python version %s'
|
|
||||||
% platform.python_version())
|
|
||||||
|
|
||||||
for h in seed_hosts:
|
|
||||||
check_host(h)
|
|
||||||
|
|
||||||
if auto_hosts:
|
|
||||||
read_host_cache()
|
|
||||||
_enqueue(_check_etc_hosts)
|
|
||||||
_enqueue(_check_netstat)
|
|
||||||
check_host('localhost')
|
|
||||||
check_host(socket.gethostname())
|
|
||||||
|
|
||||||
while 1:
|
|
||||||
now = time.time()
|
|
||||||
# For each item in the queue
|
|
||||||
for t, last_polled in list(queue.items()):
|
|
||||||
(op, args) = t
|
|
||||||
if not _stdin_still_ok(0):
|
|
||||||
break
|
|
||||||
|
|
||||||
# Determine if we need to run.
|
|
||||||
maxtime = POLL_TIME
|
|
||||||
# netstat runs more often than other jobs
|
|
||||||
if op == _check_netstat:
|
|
||||||
maxtime = NETSTAT_POLL_TIME
|
|
||||||
|
|
||||||
# Check if this jobs needs to run.
|
|
||||||
if now - last_polled > maxtime:
|
|
||||||
queue[t] = time.time()
|
|
||||||
op(*args)
|
|
||||||
try:
|
|
||||||
sys.stdout.flush()
|
|
||||||
except IOError:
|
|
||||||
break
|
|
||||||
|
|
||||||
# FIXME: use a smarter timeout based on oldest last_polled
|
|
||||||
if not _stdin_still_ok(1): # sleeps for up to 1 second
|
|
||||||
break
|
|
@ -1,51 +0,0 @@
|
|||||||
import socket
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
from sshuttle.helpers import log, debug1, Fatal, family_to_string, get_env
|
|
||||||
|
|
||||||
|
|
||||||
def nonfatal(func, *args):
|
|
||||||
try:
|
|
||||||
func(*args)
|
|
||||||
except Fatal as e:
|
|
||||||
log('error: %s' % e)
|
|
||||||
|
|
||||||
|
|
||||||
def ipt_chain_exists(family, table, name):
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
cmd = 'ip6tables'
|
|
||||||
elif family == socket.AF_INET:
|
|
||||||
cmd = 'iptables'
|
|
||||||
else:
|
|
||||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
|
||||||
argv = [cmd, '-w', '-t', table, '-nL']
|
|
||||||
try:
|
|
||||||
output = ssubprocess.check_output(argv, env=get_env())
|
|
||||||
for line in output.decode('ASCII', errors='replace').split('\n'):
|
|
||||||
if line.startswith('Chain %s ' % name):
|
|
||||||
return True
|
|
||||||
except ssubprocess.CalledProcessError as e:
|
|
||||||
raise Fatal('%r returned %d' % (argv, e.returncode))
|
|
||||||
|
|
||||||
|
|
||||||
def ipt(family, table, *args):
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
argv = ['ip6tables', '-w', '-t', table] + list(args)
|
|
||||||
elif family == socket.AF_INET:
|
|
||||||
argv = ['iptables', '-w', '-t', table] + list(args)
|
|
||||||
else:
|
|
||||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
|
||||||
debug1('%s' % ' '.join(argv))
|
|
||||||
rv = ssubprocess.call(argv, env=get_env())
|
|
||||||
if rv:
|
|
||||||
raise Fatal('%r returned %d' % (argv, rv))
|
|
||||||
|
|
||||||
|
|
||||||
def nft(family, table, action, *args):
|
|
||||||
if family in (socket.AF_INET, socket.AF_INET6):
|
|
||||||
argv = ['nft', action, 'inet', table] + list(args)
|
|
||||||
else:
|
|
||||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
|
||||||
debug1('%s' % ' '.join(argv))
|
|
||||||
rv = ssubprocess.call(argv, env=get_env())
|
|
||||||
if rv:
|
|
||||||
raise Fatal('%r returned %d' % (argv, rv))
|
|
@ -1,126 +0,0 @@
|
|||||||
import importlib
|
|
||||||
import socket
|
|
||||||
import struct
|
|
||||||
import sys
|
|
||||||
import errno
|
|
||||||
import ipaddress
|
|
||||||
from sshuttle.helpers import Fatal, debug3
|
|
||||||
|
|
||||||
|
|
||||||
def original_dst(sock):
|
|
||||||
try:
|
|
||||||
family = sock.family
|
|
||||||
SO_ORIGINAL_DST = 80
|
|
||||||
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
SOCKADDR_MIN = 16
|
|
||||||
sockaddr_in = sock.getsockopt(socket.SOL_IP,
|
|
||||||
SO_ORIGINAL_DST, SOCKADDR_MIN)
|
|
||||||
port, raw_ip = struct.unpack_from('!2xH4s', sockaddr_in[:8])
|
|
||||||
ip = str(ipaddress.IPv4Address(raw_ip))
|
|
||||||
elif family == socket.AF_INET6:
|
|
||||||
sockaddr_in = sock.getsockopt(41, SO_ORIGINAL_DST, 64)
|
|
||||||
port, raw_ip = struct.unpack_from("!2xH4x16s", sockaddr_in)
|
|
||||||
ip = str(ipaddress.IPv6Address(raw_ip))
|
|
||||||
else:
|
|
||||||
raise Fatal("fw: Unknown family type.")
|
|
||||||
except socket.error as e:
|
|
||||||
if e.args[0] == errno.ENOPROTOOPT:
|
|
||||||
return sock.getsockname()
|
|
||||||
raise
|
|
||||||
return (ip, port)
|
|
||||||
|
|
||||||
|
|
||||||
class Features(object):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BaseMethod(object):
|
|
||||||
def __init__(self, name):
|
|
||||||
self.firewall = None
|
|
||||||
self.name = name
|
|
||||||
|
|
||||||
def set_firewall(self, firewall):
|
|
||||||
self.firewall = firewall
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_supported_features():
|
|
||||||
result = Features()
|
|
||||||
result.loopback_proxy_port = True
|
|
||||||
result.ipv4 = True
|
|
||||||
result.ipv6 = False
|
|
||||||
result.udp = False
|
|
||||||
result.dns = True
|
|
||||||
result.user = False
|
|
||||||
result.group = False
|
|
||||||
return result
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_supported():
|
|
||||||
"""Returns true if it appears that this method will work on this
|
|
||||||
machine."""
|
|
||||||
return False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_tcp_dstip(sock):
|
|
||||||
return original_dst(sock)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def recv_udp(udp_listener, bufsize):
|
|
||||||
debug3('Accept UDP using recvfrom.')
|
|
||||||
data, srcip = udp_listener.recvfrom(bufsize)
|
|
||||||
return (srcip, None, data)
|
|
||||||
|
|
||||||
def send_udp(self, sock, srcip, dstip, data):
|
|
||||||
if srcip is not None:
|
|
||||||
raise Fatal("Method %s send_udp does not support setting srcip to %r"
|
|
||||||
% (self.name, srcip))
|
|
||||||
sock.sendto(data, dstip)
|
|
||||||
|
|
||||||
def setup_tcp_listener(self, tcp_listener):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def setup_udp_listener(self, udp_listener):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def assert_features(self, features):
|
|
||||||
avail = self.get_supported_features()
|
|
||||||
for key in ["udp", "dns", "ipv6", "ipv4", "user"]:
|
|
||||||
if getattr(features, key) and not getattr(avail, key):
|
|
||||||
raise Fatal(
|
|
||||||
"Feature %s not supported with method %s." %
|
|
||||||
(key, self.name))
|
|
||||||
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def wait_for_firewall_ready(self, sshuttle_pid):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def firewall_command(line):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_method(method_name):
|
|
||||||
module = importlib.import_module("sshuttle.methods.%s" % method_name)
|
|
||||||
return module.Method(method_name)
|
|
||||||
|
|
||||||
|
|
||||||
def get_auto_method():
|
|
||||||
debug3("Selecting a method automatically...")
|
|
||||||
# Try these methods, in order:
|
|
||||||
methods_to_try = ["nat", "nft", "pf", "ipfw"] if sys.platform != "win32" else ["windivert"]
|
|
||||||
for m in methods_to_try:
|
|
||||||
method = get_method(m)
|
|
||||||
if method.is_supported():
|
|
||||||
debug3("Method '%s' was automatically selected." % m)
|
|
||||||
return method
|
|
||||||
|
|
||||||
raise Fatal("Unable to automatically find a supported method. Check that "
|
|
||||||
"the appropriate programs are in your PATH. We tried "
|
|
||||||
"methods: %s" % str(methods_to_try))
|
|
@ -1,226 +0,0 @@
|
|||||||
import os
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
from sshuttle.helpers import log, debug1, debug2, debug3, \
|
|
||||||
Fatal, family_to_string, get_env, which
|
|
||||||
|
|
||||||
import socket
|
|
||||||
|
|
||||||
IP_BINDANY = 24
|
|
||||||
IP_RECVDSTADDR = 7
|
|
||||||
SOL_IPV6 = 41
|
|
||||||
IPV6_RECVDSTADDR = 74
|
|
||||||
|
|
||||||
|
|
||||||
def recv_udp(listener, bufsize):
|
|
||||||
debug3('Accept UDP python using recvmsg.')
|
|
||||||
data, ancdata, _, srcip = listener.recvmsg(4096,
|
|
||||||
socket.CMSG_SPACE(4))
|
|
||||||
dstip = None
|
|
||||||
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
|
||||||
if cmsg_level == socket.SOL_IP and cmsg_type == IP_RECVDSTADDR:
|
|
||||||
port = 53
|
|
||||||
ip = socket.inet_ntop(socket.AF_INET, cmsg_data[0:4])
|
|
||||||
dstip = (ip, port)
|
|
||||||
break
|
|
||||||
return (srcip, dstip, data)
|
|
||||||
|
|
||||||
|
|
||||||
def ipfw_rule_exists(n):
|
|
||||||
argv = ['ipfw', 'list', '%d' % n]
|
|
||||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, env=get_env())
|
|
||||||
|
|
||||||
found = False
|
|
||||||
for line in p.stdout:
|
|
||||||
if line.startswith(b'%05d ' % n):
|
|
||||||
if 'check-state :sshuttle' not in line:
|
|
||||||
log('non-sshuttle ipfw rule: %r' % line.strip())
|
|
||||||
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
rv = p.wait()
|
|
||||||
if rv:
|
|
||||||
raise Fatal('%r returned %d' % (argv, rv))
|
|
||||||
return found
|
|
||||||
|
|
||||||
|
|
||||||
_oldctls = {}
|
|
||||||
|
|
||||||
|
|
||||||
def _fill_oldctls(prefix):
|
|
||||||
argv = ['sysctl', prefix]
|
|
||||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, env=get_env())
|
|
||||||
for line in p.stdout:
|
|
||||||
line = line.decode()
|
|
||||||
assert line[-1] == '\n'
|
|
||||||
(k, v) = line[:-1].split(': ', 1)
|
|
||||||
_oldctls[k] = v.strip()
|
|
||||||
rv = p.wait()
|
|
||||||
if rv:
|
|
||||||
raise Fatal('%r returned %d' % (argv, rv))
|
|
||||||
if not line:
|
|
||||||
raise Fatal('%r returned no data' % (argv,))
|
|
||||||
|
|
||||||
|
|
||||||
def _sysctl_set(name, val):
|
|
||||||
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
|
|
||||||
debug1('>> %s' % ' '.join(argv))
|
|
||||||
return ssubprocess.call(argv, stdout=open(os.devnull, 'w'), env=get_env())
|
|
||||||
# No env: No output. (Or error that won't be parsed.)
|
|
||||||
|
|
||||||
|
|
||||||
_changedctls = []
|
|
||||||
|
|
||||||
|
|
||||||
def sysctl_set(name, val, permanent=False):
|
|
||||||
PREFIX = 'net.inet.ip'
|
|
||||||
assert name.startswith(PREFIX + '.')
|
|
||||||
val = str(val)
|
|
||||||
if not _oldctls:
|
|
||||||
_fill_oldctls(PREFIX)
|
|
||||||
if not (name in _oldctls):
|
|
||||||
debug1('>> No such sysctl: %r' % name)
|
|
||||||
return False
|
|
||||||
oldval = _oldctls[name]
|
|
||||||
if val != oldval:
|
|
||||||
rv = _sysctl_set(name, val)
|
|
||||||
if rv == 0 and permanent:
|
|
||||||
debug1('>> ...saving permanently in /etc/sysctl.conf')
|
|
||||||
f = open('/etc/sysctl.conf', 'a')
|
|
||||||
f.write('\n'
|
|
||||||
'# Added by sshuttle\n'
|
|
||||||
'%s=%s\n' % (name, val))
|
|
||||||
f.close()
|
|
||||||
else:
|
|
||||||
_changedctls.append(name)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def ipfw(*args):
|
|
||||||
argv = ['ipfw', '-q'] + list(args)
|
|
||||||
debug1('>> %s' % ' '.join(argv))
|
|
||||||
rv = ssubprocess.call(argv, env=get_env())
|
|
||||||
# No env: No output. (Or error that won't be parsed.)
|
|
||||||
if rv:
|
|
||||||
raise Fatal('%r returned %d' % (argv, rv))
|
|
||||||
|
|
||||||
|
|
||||||
def ipfw_noexit(*args):
|
|
||||||
argv = ['ipfw', '-q'] + list(args)
|
|
||||||
debug1('>> %s' % ' '.join(argv))
|
|
||||||
ssubprocess.call(argv, env=get_env())
|
|
||||||
# No env: No output. (Or error that won't be parsed.)
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.ipv6 = False
|
|
||||||
result.udp = False # NOTE: Almost there, kernel patch needed
|
|
||||||
result.dns = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get_tcp_dstip(self, sock):
|
|
||||||
return sock.getsockname()
|
|
||||||
|
|
||||||
def recv_udp(self, udp_listener, bufsize):
|
|
||||||
srcip, dstip, data = recv_udp(udp_listener, bufsize)
|
|
||||||
if not dstip:
|
|
||||||
debug1(
|
|
||||||
"-- ignored UDP from %r: "
|
|
||||||
"couldn't determine destination IP address" % (srcip,))
|
|
||||||
return None
|
|
||||||
return srcip, dstip, data
|
|
||||||
|
|
||||||
def send_udp(self, sock, srcip, dstip, data):
|
|
||||||
if not srcip:
|
|
||||||
debug1(
|
|
||||||
"-- ignored UDP to %r: "
|
|
||||||
"couldn't determine source IP address" % (dstip,))
|
|
||||||
return
|
|
||||||
|
|
||||||
# debug3('Sending SRC: %r DST: %r' % (srcip, dstip))
|
|
||||||
sender = socket.socket(sock.family, socket.SOCK_DGRAM)
|
|
||||||
sender.setsockopt(socket.SOL_IP, IP_BINDANY, 1)
|
|
||||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
|
||||||
sender.bind(srcip)
|
|
||||||
sender.sendto(data, dstip)
|
|
||||||
sender.close()
|
|
||||||
|
|
||||||
def setup_udp_listener(self, udp_listener):
|
|
||||||
if udp_listener.v4 is not None:
|
|
||||||
udp_listener.v4.setsockopt(socket.SOL_IP, IP_RECVDSTADDR, 1)
|
|
||||||
# if udp_listener.v6 is not None:
|
|
||||||
# udp_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVDSTADDR, 1)
|
|
||||||
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
# IPv6 not supported
|
|
||||||
if family not in [socket.AF_INET]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by ipfw method_name'
|
|
||||||
% family_to_string(family))
|
|
||||||
|
|
||||||
# XXX: Any risk from this?
|
|
||||||
ipfw_noexit('delete', '1')
|
|
||||||
|
|
||||||
while _changedctls:
|
|
||||||
name = _changedctls.pop()
|
|
||||||
oldval = _oldctls[name]
|
|
||||||
_sysctl_set(name, oldval)
|
|
||||||
|
|
||||||
if subnets or dnsport:
|
|
||||||
sysctl_set('net.inet.ip.fw.enable', 1)
|
|
||||||
|
|
||||||
ipfw('add', '1', 'check-state', ':sshuttle')
|
|
||||||
|
|
||||||
ipfw('add', '1', 'skipto', '2',
|
|
||||||
'tcp',
|
|
||||||
'from', 'any', 'to', 'table(125)')
|
|
||||||
ipfw('add', '1', 'fwd', '127.0.0.1,%d' % port,
|
|
||||||
'tcp',
|
|
||||||
'from', 'any', 'to', 'table(126)',
|
|
||||||
'setup', 'keep-state', ':sshuttle')
|
|
||||||
|
|
||||||
ipfw_noexit('table', '124', 'flush')
|
|
||||||
dnscount = 0
|
|
||||||
for _, ip in [i for i in nslist if i[0] == family]:
|
|
||||||
ipfw('table', '124', 'add', '%s' % (ip))
|
|
||||||
dnscount += 1
|
|
||||||
if dnscount > 0:
|
|
||||||
ipfw('add', '1', 'fwd', '127.0.0.1,%d' % dnsport,
|
|
||||||
'udp',
|
|
||||||
'from', 'any', 'to', 'table(124)',
|
|
||||||
'keep-state', ':sshuttle')
|
|
||||||
ipfw('add', '1', 'allow',
|
|
||||||
'udp',
|
|
||||||
'from', 'any', 'to', 'any')
|
|
||||||
|
|
||||||
if subnets:
|
|
||||||
# create new subnet entries
|
|
||||||
for _, swidth, sexclude, snet, fport, lport \
|
|
||||||
in sorted(subnets, key=lambda s: s[1], reverse=True):
|
|
||||||
if sexclude:
|
|
||||||
ipfw('table', '125', 'add', '%s/%s' % (snet, swidth))
|
|
||||||
else:
|
|
||||||
ipfw('table', '126', 'add', '%s/%s' % (snet, swidth))
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
if family not in [socket.AF_INET]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by ipfw method'
|
|
||||||
% family_to_string(family))
|
|
||||||
|
|
||||||
ipfw_noexit('delete', '1')
|
|
||||||
ipfw_noexit('table', '124', 'flush')
|
|
||||||
ipfw_noexit('table', '125', 'flush')
|
|
||||||
ipfw_noexit('table', '126', 'flush')
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if which("ipfw"):
|
|
||||||
return True
|
|
||||||
debug2("ipfw method not supported because 'ipfw' command is "
|
|
||||||
"missing.")
|
|
||||||
return False
|
|
@ -1,133 +0,0 @@
|
|||||||
import socket
|
|
||||||
from sshuttle.firewall import subnet_weight
|
|
||||||
from sshuttle.helpers import family_to_string, which, debug2
|
|
||||||
from sshuttle.linux import ipt, ipt_chain_exists, nonfatal
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
# We name the chain based on the transproxy port number so that it's
|
|
||||||
# possible to run multiple copies of sshuttle at the same time. Of course,
|
|
||||||
# the multiple copies shouldn't have overlapping subnets, or only the most-
|
|
||||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
|
||||||
# "-A OUTPUT").
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
if family != socket.AF_INET and family != socket.AF_INET6:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by nat method_name'
|
|
||||||
% family_to_string(family))
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by nat method_name")
|
|
||||||
table = "nat"
|
|
||||||
|
|
||||||
def _ipt(*args):
|
|
||||||
return ipt(family, table, *args)
|
|
||||||
|
|
||||||
def _ipm(*args):
|
|
||||||
return ipt(family, "mangle", *args)
|
|
||||||
|
|
||||||
chain = 'sshuttle-%s' % port
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
self.restore_firewall(port, family, udp, user, group)
|
|
||||||
|
|
||||||
_ipt('-N', chain)
|
|
||||||
_ipt('-F', chain)
|
|
||||||
if user is not None or group is not None:
|
|
||||||
margs = ['-I', 'OUTPUT', '1', '-m', 'owner']
|
|
||||||
if user is not None:
|
|
||||||
margs += ['--uid-owner', str(user)]
|
|
||||||
if group is not None:
|
|
||||||
margs += ['--gid-owner', str(group)]
|
|
||||||
margs += ['-j', 'MARK', '--set-mark', str(port)]
|
|
||||||
nonfatal(_ipm, *margs)
|
|
||||||
args = '-m', 'mark', '--mark', str(port), '-j', chain
|
|
||||||
else:
|
|
||||||
args = '-j', chain
|
|
||||||
|
|
||||||
_ipt('-I', 'OUTPUT', '1', *args)
|
|
||||||
_ipt('-I', 'PREROUTING', '1', *args)
|
|
||||||
|
|
||||||
# Redirect DNS traffic as requested. This includes routing traffic
|
|
||||||
# to localhost DNS servers through sshuttle.
|
|
||||||
for _, ip in [i for i in nslist if i[0] == family]:
|
|
||||||
_ipt('-A', chain, '-j', 'REDIRECT',
|
|
||||||
'--dest', '%s' % ip,
|
|
||||||
'-p', 'udp',
|
|
||||||
'--dport', '53',
|
|
||||||
'--to-ports', str(dnsport))
|
|
||||||
|
|
||||||
# create new subnet entries.
|
|
||||||
for _, swidth, sexclude, snet, fport, lport \
|
|
||||||
in sorted(subnets, key=subnet_weight, reverse=True):
|
|
||||||
tcp_ports = ('-p', 'tcp')
|
|
||||||
if fport:
|
|
||||||
tcp_ports = tcp_ports + ('--dport', '%d:%d' % (fport, lport))
|
|
||||||
|
|
||||||
if sexclude:
|
|
||||||
_ipt('-A', chain, '-j', 'RETURN',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
*tcp_ports)
|
|
||||||
else:
|
|
||||||
_ipt('-A', chain, '-j', 'REDIRECT',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
*(tcp_ports + ('--to-ports', str(port))))
|
|
||||||
|
|
||||||
# Don't route any remaining local traffic through sshuttle.
|
|
||||||
_ipt('-A', chain, '-j', 'RETURN',
|
|
||||||
'-m', 'addrtype',
|
|
||||||
'--dst-type', 'LOCAL')
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
# only ipv4 supported with NAT
|
|
||||||
if family != socket.AF_INET and family != socket.AF_INET6:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by nat method_name'
|
|
||||||
% family_to_string(family))
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by nat method_name")
|
|
||||||
|
|
||||||
table = "nat"
|
|
||||||
|
|
||||||
def _ipt(*args):
|
|
||||||
return ipt(family, table, *args)
|
|
||||||
|
|
||||||
def _ipm(*args):
|
|
||||||
return ipt(family, "mangle", *args)
|
|
||||||
|
|
||||||
chain = 'sshuttle-%s' % port
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
if ipt_chain_exists(family, table, chain):
|
|
||||||
if user is not None or group is not None:
|
|
||||||
margs = ['-D', 'OUTPUT', '-m', 'owner']
|
|
||||||
if user is not None:
|
|
||||||
margs += ['--uid-owner', str(user)]
|
|
||||||
if group is not None:
|
|
||||||
margs += ['--gid-owner', str(group)]
|
|
||||||
margs += ['-j', 'MARK', '--set-mark', str(port)]
|
|
||||||
nonfatal(_ipm, *margs)
|
|
||||||
|
|
||||||
args = '-m', 'mark', '--mark', str(port), '-j', chain
|
|
||||||
else:
|
|
||||||
args = '-j', chain
|
|
||||||
nonfatal(_ipt, '-D', 'OUTPUT', *args)
|
|
||||||
nonfatal(_ipt, '-D', 'PREROUTING', *args)
|
|
||||||
nonfatal(_ipt, '-F', chain)
|
|
||||||
_ipt('-X', chain)
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.user = True
|
|
||||||
result.ipv6 = True
|
|
||||||
result.group = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if which("iptables"):
|
|
||||||
return True
|
|
||||||
debug2("nat method not supported because 'iptables' command "
|
|
||||||
"is missing.")
|
|
||||||
return False
|
|
@ -1,114 +0,0 @@
|
|||||||
import socket
|
|
||||||
from sshuttle.firewall import subnet_weight
|
|
||||||
from sshuttle.linux import nft, nonfatal
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
from sshuttle.helpers import debug2, which
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
# We name the chain based on the transproxy port number so that it's
|
|
||||||
# possible to run multiple copies of sshuttle at the same time. Of course,
|
|
||||||
# the multiple copies shouldn't have overlapping subnets, or only the most-
|
|
||||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
|
||||||
# "-A OUTPUT").
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by nft")
|
|
||||||
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
table = 'sshuttle-ipv4-%s' % port
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
table = 'sshuttle-ipv6-%s' % port
|
|
||||||
|
|
||||||
def _nft(action, *args):
|
|
||||||
return nft(family, table, action, *args)
|
|
||||||
|
|
||||||
chain = table
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
_nft('add table', '')
|
|
||||||
_nft('add chain', 'prerouting',
|
|
||||||
'{ type nat hook prerouting priority -100; policy accept; }')
|
|
||||||
_nft('add chain', 'output',
|
|
||||||
'{ type nat hook output priority -100; policy accept; }')
|
|
||||||
_nft('add chain', chain)
|
|
||||||
_nft('flush chain', chain)
|
|
||||||
_nft('add rule', 'output jump %s' % chain)
|
|
||||||
_nft('add rule', 'prerouting jump %s' % chain)
|
|
||||||
|
|
||||||
# setup_firewall() gets called separately for ipv4 and ipv6. Make sure
|
|
||||||
# we only handle the version that we expect to.
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
_nft('add rule', chain, 'meta', 'nfproto', '!=', 'ipv4', 'return')
|
|
||||||
else:
|
|
||||||
_nft('add rule', chain, 'meta', 'nfproto', '!=', 'ipv6', 'return')
|
|
||||||
|
|
||||||
# Strings to use below to simplify our code
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
ip_version_l = 'ipv4'
|
|
||||||
ip_version = 'ip'
|
|
||||||
elif family == socket.AF_INET6:
|
|
||||||
ip_version_l = 'ipv6'
|
|
||||||
ip_version = 'ip6'
|
|
||||||
|
|
||||||
# Redirect DNS traffic as requested. This includes routing traffic
|
|
||||||
# to localhost DNS servers through sshuttle.
|
|
||||||
for _, ip in [i for i in nslist if i[0] == family]:
|
|
||||||
_nft('add rule', chain, ip_version,
|
|
||||||
'daddr %s' % ip, 'udp dport 53',
|
|
||||||
('redirect to :' + str(dnsport)))
|
|
||||||
|
|
||||||
# Don't route any remaining local traffic through sshuttle
|
|
||||||
_nft('add rule', chain, 'fib daddr type local return')
|
|
||||||
|
|
||||||
# create new subnet entries.
|
|
||||||
for _, swidth, sexclude, snet, fport, lport \
|
|
||||||
in sorted(subnets, key=subnet_weight, reverse=True):
|
|
||||||
|
|
||||||
# match using nfproto as described at
|
|
||||||
# https://superuser.com/questions/1560376/match-ipv6-protocol-using-nftables
|
|
||||||
if fport and fport != lport:
|
|
||||||
tcp_ports = ('meta', 'nfproto', ip_version_l, 'tcp',
|
|
||||||
'dport', '{ %d-%d }' % (fport, lport))
|
|
||||||
elif fport and fport == lport:
|
|
||||||
tcp_ports = ('meta', 'nfproto', ip_version_l, 'tcp',
|
|
||||||
'dport', '%d' % (fport))
|
|
||||||
else:
|
|
||||||
tcp_ports = ('meta', 'nfproto', ip_version_l,
|
|
||||||
'meta', 'l4proto', 'tcp')
|
|
||||||
|
|
||||||
if sexclude:
|
|
||||||
_nft('add rule', chain, *(tcp_ports + (
|
|
||||||
ip_version, 'daddr %s/%s' % (snet, swidth), 'return')))
|
|
||||||
else:
|
|
||||||
_nft('add rule', chain, *(tcp_ports + (
|
|
||||||
ip_version, 'daddr %s/%s' % (snet, swidth),
|
|
||||||
('redirect to :' + str(port)))))
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by nft method_name")
|
|
||||||
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
table = 'sshuttle-ipv4-%s' % port
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
table = 'sshuttle-ipv6-%s' % port
|
|
||||||
|
|
||||||
def _nft(action, *args):
|
|
||||||
return nft(family, table, action, *args)
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
nonfatal(_nft, 'delete table', '')
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.ipv6 = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if which("nft"):
|
|
||||||
return True
|
|
||||||
debug2("nft method not supported because 'nft' command is missing.")
|
|
||||||
return False
|
|
@ -1,503 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import errno
|
|
||||||
import struct
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
import shlex
|
|
||||||
from fcntl import ioctl
|
|
||||||
from ctypes import c_char, c_uint8, c_uint16, c_uint32, Union, Structure, \
|
|
||||||
sizeof, addressof, memmove
|
|
||||||
from sshuttle.firewall import subnet_weight
|
|
||||||
from sshuttle.helpers import log, debug1, debug2, debug3, Fatal, \
|
|
||||||
family_to_string, get_env, which
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
|
|
||||||
|
|
||||||
_pf_context = {
|
|
||||||
'started_by_sshuttle': 0,
|
|
||||||
'loaded_by_sshuttle': True,
|
|
||||||
'Xtoken': []
|
|
||||||
}
|
|
||||||
_pf_fd = None
|
|
||||||
|
|
||||||
|
|
||||||
class Generic(object):
|
|
||||||
MAXPATHLEN = 1024
|
|
||||||
PF_CHANGE_ADD_TAIL = 2
|
|
||||||
PF_CHANGE_GET_TICKET = 6
|
|
||||||
PF_PASS = 0
|
|
||||||
PF_RDR = 8
|
|
||||||
PF_OUT = 2
|
|
||||||
ACTION_OFFSET = 0
|
|
||||||
POOL_TICKET_OFFSET = 8
|
|
||||||
ANCHOR_CALL_OFFSET = 1040
|
|
||||||
|
|
||||||
class pf_addr(Structure):
|
|
||||||
class _pfa(Union):
|
|
||||||
_fields_ = [("v4", c_uint32), # struct in_addr
|
|
||||||
("v6", c_uint32 * 4), # struct in6_addr
|
|
||||||
("addr8", c_uint8 * 16),
|
|
||||||
("addr16", c_uint16 * 8),
|
|
||||||
("addr32", c_uint32 * 4)]
|
|
||||||
|
|
||||||
_fields_ = [("pfa", _pfa)]
|
|
||||||
_anonymous_ = ("pfa",)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.status = b''
|
|
||||||
self.pfioc_pooladdr = c_char * 1136
|
|
||||||
|
|
||||||
self.DIOCNATLOOK = (
|
|
||||||
(0x40000000 | 0x80000000) |
|
|
||||||
((sizeof(self.pfioc_natlook) & 0x1fff) << 16) |
|
|
||||||
((ord('D')) << 8) | (23))
|
|
||||||
self.DIOCCHANGERULE = (
|
|
||||||
(0x40000000 | 0x80000000) |
|
|
||||||
((sizeof(self.pfioc_rule) & 0x1fff) << 16) |
|
|
||||||
((ord('D')) << 8) | (26))
|
|
||||||
self.DIOCBEGINADDRS = (
|
|
||||||
(0x40000000 | 0x80000000) |
|
|
||||||
((sizeof(self.pfioc_pooladdr) & 0x1fff) << 16) |
|
|
||||||
((ord('D')) << 8) | (51))
|
|
||||||
|
|
||||||
def enable(self):
|
|
||||||
if b'INFO:\nStatus: Disabled' in self.status:
|
|
||||||
pfctl('-e')
|
|
||||||
_pf_context['started_by_sshuttle'] += 1
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def disable(anchor):
|
|
||||||
pfctl('-a %s -F all' % anchor)
|
|
||||||
if _pf_context['started_by_sshuttle'] == 1:
|
|
||||||
pfctl('-d')
|
|
||||||
_pf_context['started_by_sshuttle'] -= 1
|
|
||||||
|
|
||||||
def query_nat(self, family, proto, src_ip, src_port, dst_ip, dst_port):
|
|
||||||
[proto, family, src_port, dst_port] = [
|
|
||||||
int(v) for v in [proto, family, src_port, dst_port]]
|
|
||||||
|
|
||||||
packed_src_ip = socket.inet_pton(family, src_ip)
|
|
||||||
packed_dst_ip = socket.inet_pton(family, dst_ip)
|
|
||||||
|
|
||||||
assert len(packed_src_ip) == len(packed_dst_ip)
|
|
||||||
length = len(packed_src_ip)
|
|
||||||
|
|
||||||
pnl = self.pfioc_natlook()
|
|
||||||
pnl.proto = proto
|
|
||||||
pnl.direction = self.PF_OUT
|
|
||||||
pnl.af = family
|
|
||||||
memmove(addressof(pnl.saddr), packed_src_ip, length)
|
|
||||||
memmove(addressof(pnl.daddr), packed_dst_ip, length)
|
|
||||||
self._add_natlook_ports(pnl, src_port, dst_port)
|
|
||||||
|
|
||||||
ioctl(pf_get_dev(), self.DIOCNATLOOK,
|
|
||||||
(c_char * sizeof(pnl)).from_address(addressof(pnl)))
|
|
||||||
|
|
||||||
ip = socket.inet_ntop(
|
|
||||||
pnl.af, (c_char * length).from_address(addressof(pnl.rdaddr)).raw)
|
|
||||||
port = socket.ntohs(self._get_natlook_port(pnl.rdxport))
|
|
||||||
return (ip, port)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _add_natlook_ports(pnl, src_port, dst_port):
|
|
||||||
pnl.sxport = socket.htons(src_port)
|
|
||||||
pnl.dxport = socket.htons(dst_port)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_natlook_port(xport):
|
|
||||||
return xport
|
|
||||||
|
|
||||||
def add_anchors(self, anchor, status=None):
|
|
||||||
if status is None:
|
|
||||||
status = pfctl('-s all')[0]
|
|
||||||
self.status = status
|
|
||||||
if ('\nanchor "%s"' % anchor).encode('ASCII') not in status:
|
|
||||||
self._add_anchor_rule(self.PF_PASS, anchor.encode('ASCII'))
|
|
||||||
|
|
||||||
def _add_anchor_rule(self, kind, name, pr=None):
|
|
||||||
if pr is None:
|
|
||||||
pr = self.pfioc_rule()
|
|
||||||
|
|
||||||
memmove(addressof(pr) + self.ANCHOR_CALL_OFFSET, name,
|
|
||||||
min(self.MAXPATHLEN, len(name))) # anchor_call = name
|
|
||||||
memmove(addressof(pr) + self.RULE_ACTION_OFFSET,
|
|
||||||
struct.pack('I', kind), 4) # rule.action = kind
|
|
||||||
|
|
||||||
memmove(addressof(pr) + self.ACTION_OFFSET,
|
|
||||||
struct.pack('I', self.PF_CHANGE_GET_TICKET),
|
|
||||||
4) # action = PF_CHANGE_GET_TICKET
|
|
||||||
ioctl(pf_get_dev(), pf.DIOCCHANGERULE, pr)
|
|
||||||
|
|
||||||
memmove(addressof(pr) + self.ACTION_OFFSET,
|
|
||||||
struct.pack('I', self.PF_CHANGE_ADD_TAIL),
|
|
||||||
4) # action = PF_CHANGE_ADD_TAIL
|
|
||||||
ioctl(pf_get_dev(), pf.DIOCCHANGERULE, pr)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _inet_version(family):
|
|
||||||
return b'inet' if family == socket.AF_INET else b'inet6'
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _lo_addr(family):
|
|
||||||
return b'127.0.0.1' if family == socket.AF_INET else b'::1'
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def add_rules(anchor, rules):
|
|
||||||
assert isinstance(rules, bytes)
|
|
||||||
debug3("rules:\n" + rules.decode("ASCII"))
|
|
||||||
pfctl('-a %s -f /dev/stdin' % anchor, rules)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def has_skip_loopback():
|
|
||||||
return b'skip' in pfctl('-s Interfaces -i lo -v')[0]
|
|
||||||
|
|
||||||
|
|
||||||
class FreeBsd(Generic):
|
|
||||||
RULE_ACTION_OFFSET = 2968
|
|
||||||
|
|
||||||
def __new__(cls):
|
|
||||||
class pfioc_natlook(Structure):
|
|
||||||
pf_addr = Generic.pf_addr
|
|
||||||
_fields_ = [("saddr", pf_addr),
|
|
||||||
("daddr", pf_addr),
|
|
||||||
("rsaddr", pf_addr),
|
|
||||||
("rdaddr", pf_addr),
|
|
||||||
("sxport", c_uint16),
|
|
||||||
("dxport", c_uint16),
|
|
||||||
("rsxport", c_uint16),
|
|
||||||
("rdxport", c_uint16),
|
|
||||||
("af", c_uint8), # sa_family_t
|
|
||||||
("proto", c_uint8),
|
|
||||||
("proto_variant", c_uint8),
|
|
||||||
("direction", c_uint8)]
|
|
||||||
|
|
||||||
freebsd = Generic.__new__(cls)
|
|
||||||
freebsd.pfioc_rule = c_char * 3040
|
|
||||||
freebsd.pfioc_natlook = pfioc_natlook
|
|
||||||
return freebsd
|
|
||||||
|
|
||||||
def enable(self):
|
|
||||||
returncode = ssubprocess.call(['kldload', 'pf'], env=get_env())
|
|
||||||
# No env: No output.
|
|
||||||
super(FreeBsd, self).enable()
|
|
||||||
if returncode == 0:
|
|
||||||
_pf_context['loaded_by_sshuttle'] = True
|
|
||||||
|
|
||||||
def disable(self, anchor):
|
|
||||||
super(FreeBsd, self).disable(anchor)
|
|
||||||
if _pf_context['loaded_by_sshuttle'] and \
|
|
||||||
_pf_context['started_by_sshuttle'] == 0:
|
|
||||||
ssubprocess.call(['kldunload', 'pf'], env=get_env())
|
|
||||||
# No env: No output.
|
|
||||||
|
|
||||||
def add_anchors(self, anchor):
|
|
||||||
status = pfctl('-s all')[0]
|
|
||||||
if ('\nrdr-anchor "%s"' % anchor).encode('ASCII') not in status:
|
|
||||||
self._add_anchor_rule(self.PF_RDR, anchor.encode('ASCII'))
|
|
||||||
super(FreeBsd, self).add_anchors(anchor, status=status)
|
|
||||||
|
|
||||||
def _add_anchor_rule(self, kind, name, pr=None):
|
|
||||||
pr = pr or self.pfioc_rule()
|
|
||||||
ppa = self.pfioc_pooladdr()
|
|
||||||
|
|
||||||
ioctl(pf_get_dev(), self.DIOCBEGINADDRS, ppa)
|
|
||||||
# pool ticket
|
|
||||||
memmove(addressof(pr) + self.POOL_TICKET_OFFSET, ppa[4:8], 4)
|
|
||||||
super(FreeBsd, self)._add_anchor_rule(kind, name, pr=pr)
|
|
||||||
|
|
||||||
def add_rules(self, anchor, includes, port, dnsport, nslist, family):
|
|
||||||
inet_version = self._inet_version(family)
|
|
||||||
lo_addr = self._lo_addr(family)
|
|
||||||
|
|
||||||
tables = []
|
|
||||||
translating_rules = [
|
|
||||||
b'rdr pass on lo0 %s proto tcp from ! %s to %s '
|
|
||||||
b'-> %s port %r' % (inet_version, lo_addr, subnet, lo_addr, port)
|
|
||||||
for exclude, subnet in includes if not exclude
|
|
||||||
]
|
|
||||||
filtering_rules = [
|
|
||||||
b'pass out route-to lo0 %s proto tcp '
|
|
||||||
b'to %s keep state' % (inet_version, subnet)
|
|
||||||
if not exclude else
|
|
||||||
b'pass out %s proto tcp to %s' % (inet_version, subnet)
|
|
||||||
for exclude, subnet in includes
|
|
||||||
]
|
|
||||||
|
|
||||||
if nslist:
|
|
||||||
tables.append(
|
|
||||||
b'table <dns_servers> {%s}' %
|
|
||||||
b','.join([ns[1].encode("ASCII") for ns in nslist]))
|
|
||||||
translating_rules.append(
|
|
||||||
b'rdr pass on lo0 %s proto udp to <dns_servers> '
|
|
||||||
b'port 53 -> %s port %r' % (inet_version, lo_addr, dnsport))
|
|
||||||
filtering_rules.append(
|
|
||||||
b'pass out route-to lo0 %s proto udp to '
|
|
||||||
b'<dns_servers> port 53 keep state' % inet_version)
|
|
||||||
|
|
||||||
rules = b'\n'.join(tables + translating_rules + filtering_rules) \
|
|
||||||
+ b'\n'
|
|
||||||
|
|
||||||
super(FreeBsd, self).add_rules(anchor, rules)
|
|
||||||
|
|
||||||
|
|
||||||
class OpenBsd(Generic):
|
|
||||||
POOL_TICKET_OFFSET = 4
|
|
||||||
RULE_ACTION_OFFSET = 3324
|
|
||||||
ANCHOR_CALL_OFFSET = 1036
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
class pfioc_natlook(Structure):
|
|
||||||
pf_addr = Generic.pf_addr
|
|
||||||
_fields_ = [("saddr", pf_addr),
|
|
||||||
("daddr", pf_addr),
|
|
||||||
("rsaddr", pf_addr),
|
|
||||||
("rdaddr", pf_addr),
|
|
||||||
("rdomain", c_uint16),
|
|
||||||
("rrdomain", c_uint16),
|
|
||||||
("sxport", c_uint16),
|
|
||||||
("dxport", c_uint16),
|
|
||||||
("rsxport", c_uint16),
|
|
||||||
("rdxport", c_uint16),
|
|
||||||
("af", c_uint8), # sa_family_t
|
|
||||||
("proto", c_uint8),
|
|
||||||
("proto_variant", c_uint8),
|
|
||||||
("direction", c_uint8)]
|
|
||||||
|
|
||||||
self.pfioc_rule = c_char * 3408
|
|
||||||
self.pfioc_natlook = pfioc_natlook
|
|
||||||
super(OpenBsd, self).__init__()
|
|
||||||
|
|
||||||
def add_anchors(self, anchor):
|
|
||||||
# before adding anchors and rules we must override the skip lo
|
|
||||||
# that comes by default in openbsd pf.conf so the rules we will add,
|
|
||||||
# which rely on translating/filtering packets on lo, can work
|
|
||||||
if self.has_skip_loopback():
|
|
||||||
pfctl('-f /dev/stdin', b'match on lo\n')
|
|
||||||
super(OpenBsd, self).add_anchors(anchor)
|
|
||||||
|
|
||||||
def add_rules(self, anchor, includes, port, dnsport, nslist, family):
|
|
||||||
inet_version = self._inet_version(family)
|
|
||||||
lo_addr = self._lo_addr(family)
|
|
||||||
|
|
||||||
tables = []
|
|
||||||
translating_rules = [
|
|
||||||
b'pass in on lo0 %s proto tcp to %s '
|
|
||||||
b'divert-to %s port %r' % (inet_version, subnet, lo_addr, port)
|
|
||||||
for exclude, subnet in includes if not exclude
|
|
||||||
]
|
|
||||||
filtering_rules = [
|
|
||||||
b'pass out %s proto tcp to %s '
|
|
||||||
b'route-to lo0 keep state' % (inet_version, subnet)
|
|
||||||
if not exclude else
|
|
||||||
b'pass out %s proto tcp to %s' % (inet_version, subnet)
|
|
||||||
for exclude, subnet in includes
|
|
||||||
]
|
|
||||||
|
|
||||||
if nslist:
|
|
||||||
tables.append(
|
|
||||||
b'table <dns_servers> {%s}' %
|
|
||||||
b','.join([ns[1].encode("ASCII") for ns in nslist]))
|
|
||||||
translating_rules.append(
|
|
||||||
b'pass in on lo0 %s proto udp to <dns_servers> port 53 '
|
|
||||||
b'rdr-to %s port %r' % (inet_version, lo_addr, dnsport))
|
|
||||||
filtering_rules.append(
|
|
||||||
b'pass out %s proto udp to <dns_servers> port 53 '
|
|
||||||
b'route-to lo0 keep state' % inet_version)
|
|
||||||
|
|
||||||
rules = b'\n'.join(tables + translating_rules + filtering_rules) \
|
|
||||||
+ b'\n'
|
|
||||||
|
|
||||||
super(OpenBsd, self).add_rules(anchor, rules)
|
|
||||||
|
|
||||||
|
|
||||||
class Darwin(FreeBsd):
|
|
||||||
RULE_ACTION_OFFSET = 3068
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
class pf_state_xport(Union):
|
|
||||||
_fields_ = [("port", c_uint16),
|
|
||||||
("call_id", c_uint16),
|
|
||||||
("spi", c_uint32)]
|
|
||||||
|
|
||||||
class pfioc_natlook(Structure):
|
|
||||||
pf_addr = Generic.pf_addr
|
|
||||||
_fields_ = [("saddr", pf_addr),
|
|
||||||
("daddr", pf_addr),
|
|
||||||
("rsaddr", pf_addr),
|
|
||||||
("rdaddr", pf_addr),
|
|
||||||
("sxport", pf_state_xport),
|
|
||||||
("dxport", pf_state_xport),
|
|
||||||
("rsxport", pf_state_xport),
|
|
||||||
("rdxport", pf_state_xport),
|
|
||||||
("af", c_uint8), # sa_family_t
|
|
||||||
("proto", c_uint8),
|
|
||||||
("proto_variant", c_uint8),
|
|
||||||
("direction", c_uint8)]
|
|
||||||
|
|
||||||
self.pfioc_rule = c_char * 3104
|
|
||||||
self.pfioc_natlook = pfioc_natlook
|
|
||||||
super(Darwin, self).__init__()
|
|
||||||
|
|
||||||
def enable(self):
|
|
||||||
o = pfctl('-E')
|
|
||||||
_pf_context['Xtoken'].append(re.search(b'Token : (.+)', o[1]).group(1))
|
|
||||||
|
|
||||||
def disable(self, anchor):
|
|
||||||
pfctl('-a %s -F all' % anchor)
|
|
||||||
if _pf_context['Xtoken']:
|
|
||||||
pfctl('-X %s' % _pf_context['Xtoken'].pop().decode("ASCII"))
|
|
||||||
|
|
||||||
def add_anchors(self, anchor):
|
|
||||||
# before adding anchors and rules we must override the skip lo
|
|
||||||
# that in some cases ends up in the chain so the rules we will add,
|
|
||||||
# which rely on translating/filtering packets on lo, can work
|
|
||||||
if self.has_skip_loopback():
|
|
||||||
pfctl('-f /dev/stdin', b'pass on lo\n')
|
|
||||||
super(Darwin, self).add_anchors(anchor)
|
|
||||||
|
|
||||||
def _add_natlook_ports(self, pnl, src_port, dst_port):
|
|
||||||
pnl.sxport.port = socket.htons(src_port)
|
|
||||||
pnl.dxport.port = socket.htons(dst_port)
|
|
||||||
|
|
||||||
def _get_natlook_port(self, xport):
|
|
||||||
return xport.port
|
|
||||||
|
|
||||||
|
|
||||||
class PfSense(FreeBsd):
|
|
||||||
RULE_ACTION_OFFSET = 3040
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.pfioc_rule = c_char * 3112
|
|
||||||
super(PfSense, self).__init__()
|
|
||||||
|
|
||||||
|
|
||||||
if sys.platform == 'darwin':
|
|
||||||
pf = Darwin()
|
|
||||||
elif sys.platform.startswith('openbsd'):
|
|
||||||
pf = OpenBsd()
|
|
||||||
elif platform.version().endswith('pfSense'):
|
|
||||||
pf = PfSense()
|
|
||||||
else:
|
|
||||||
pf = FreeBsd()
|
|
||||||
|
|
||||||
|
|
||||||
def pfctl(args, stdin=None):
|
|
||||||
argv = ['pfctl'] + shlex.split(args)
|
|
||||||
debug1('>> %s' % ' '.join(argv))
|
|
||||||
p = ssubprocess.Popen(argv, stdin=ssubprocess.PIPE,
|
|
||||||
stdout=ssubprocess.PIPE,
|
|
||||||
stderr=ssubprocess.PIPE,
|
|
||||||
env=get_env())
|
|
||||||
o = p.communicate(stdin)
|
|
||||||
if p.returncode:
|
|
||||||
log('%r returned %d, stdout and stderr follows: ' %
|
|
||||||
(argv, p.returncode))
|
|
||||||
log("stdout:\n%s" % o[0].decode("ascii"))
|
|
||||||
log("stderr:\n%s" % o[1].decode("ascii"))
|
|
||||||
raise Fatal('%r returned %d' % (argv, p.returncode))
|
|
||||||
|
|
||||||
return o
|
|
||||||
|
|
||||||
|
|
||||||
def pf_get_dev():
|
|
||||||
global _pf_fd
|
|
||||||
if _pf_fd is None:
|
|
||||||
_pf_fd = os.open('/dev/pf', os.O_RDWR)
|
|
||||||
|
|
||||||
return _pf_fd
|
|
||||||
|
|
||||||
|
|
||||||
def pf_get_anchor(family, port):
|
|
||||||
return 'sshuttle%s-%d' % ('' if family == socket.AF_INET else '6', port)
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.ipv6 = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get_tcp_dstip(self, sock):
|
|
||||||
pfile = self.firewall.pfile
|
|
||||||
|
|
||||||
try:
|
|
||||||
peer = sock.getpeername()
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.args[0] == errno.EINVAL:
|
|
||||||
return sock.getsockname()
|
|
||||||
|
|
||||||
proxy = sock.getsockname()
|
|
||||||
|
|
||||||
argv = (sock.family, socket.IPPROTO_TCP,
|
|
||||||
peer[0].encode("ASCII"), peer[1],
|
|
||||||
proxy[0].encode("ASCII"), proxy[1])
|
|
||||||
out_line = b"QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % argv
|
|
||||||
pfile.write(out_line)
|
|
||||||
pfile.flush()
|
|
||||||
in_line = pfile.readline()
|
|
||||||
debug2(out_line.decode("ASCII") + ' > ' + in_line.decode("ASCII"))
|
|
||||||
if in_line.startswith(b'QUERY_PF_NAT_SUCCESS '):
|
|
||||||
(ip, port) = in_line[21:].split(b',')
|
|
||||||
return (ip.decode("ASCII"), int(port))
|
|
||||||
|
|
||||||
return sock.getsockname()
|
|
||||||
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
if family not in [socket.AF_INET, socket.AF_INET6]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by pf method_name'
|
|
||||||
% family_to_string(family))
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by pf method_name")
|
|
||||||
|
|
||||||
if subnets:
|
|
||||||
includes = []
|
|
||||||
# If a given subnet is both included and excluded, list the
|
|
||||||
# exclusion first; the table will ignore the second, opposite
|
|
||||||
# definition
|
|
||||||
for _, swidth, sexclude, snet, fport, lport \
|
|
||||||
in sorted(subnets, key=subnet_weight):
|
|
||||||
includes.append((sexclude, b"%s/%d%s" % (
|
|
||||||
snet.encode("ASCII"),
|
|
||||||
swidth,
|
|
||||||
b" port %d:%d" % (fport, lport) if fport else b"")))
|
|
||||||
|
|
||||||
anchor = pf_get_anchor(family, port)
|
|
||||||
pf.add_anchors(anchor)
|
|
||||||
pf.add_rules(anchor, includes, port, dnsport, nslist, family)
|
|
||||||
pf.enable()
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
if family not in [socket.AF_INET, socket.AF_INET6]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by pf method_name'
|
|
||||||
% family_to_string(family))
|
|
||||||
if udp:
|
|
||||||
raise Exception("UDP not supported by pf method_name")
|
|
||||||
|
|
||||||
pf.disable(pf_get_anchor(family, port))
|
|
||||||
|
|
||||||
def firewall_command(self, line):
|
|
||||||
if line.startswith('QUERY_PF_NAT '):
|
|
||||||
try:
|
|
||||||
dst = pf.query_nat(*(line[13:].split(',')))
|
|
||||||
sys.stdout.write('QUERY_PF_NAT_SUCCESS %s,%r\n' % dst)
|
|
||||||
except IOError as e:
|
|
||||||
sys.stdout.write('QUERY_PF_NAT_FAILURE %s\n' % e)
|
|
||||||
|
|
||||||
sys.stdout.flush()
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if which("pfctl"):
|
|
||||||
return True
|
|
||||||
debug2("pf method not supported because 'pfctl' command is missing.")
|
|
||||||
return False
|
|
@ -1,266 +0,0 @@
|
|||||||
import struct
|
|
||||||
from sshuttle.firewall import subnet_weight
|
|
||||||
from sshuttle.helpers import family_to_string
|
|
||||||
from sshuttle.linux import ipt, ipt_chain_exists
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
from sshuttle.helpers import debug1, debug2, debug3, Fatal, which
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
IP_TRANSPARENT = 19
|
|
||||||
IP_ORIGDSTADDR = 20
|
|
||||||
IP_RECVORIGDSTADDR = IP_ORIGDSTADDR
|
|
||||||
SOL_IPV6 = 41
|
|
||||||
IPV6_ORIGDSTADDR = 74
|
|
||||||
IPV6_RECVORIGDSTADDR = IPV6_ORIGDSTADDR
|
|
||||||
|
|
||||||
|
|
||||||
def recv_udp(listener, bufsize):
|
|
||||||
debug3('Accept UDP python using recvmsg.')
|
|
||||||
data, ancdata, _, srcip = listener.recvmsg(
|
|
||||||
4096, socket.CMSG_SPACE(24))
|
|
||||||
dstip = None
|
|
||||||
family = None
|
|
||||||
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
|
||||||
if cmsg_level == socket.SOL_IP and cmsg_type == IP_ORIGDSTADDR:
|
|
||||||
family, port = struct.unpack('=HH', cmsg_data[0:4])
|
|
||||||
port = socket.htons(port)
|
|
||||||
if family == socket.AF_INET:
|
|
||||||
start = 4
|
|
||||||
length = 4
|
|
||||||
else:
|
|
||||||
raise Fatal("Unsupported socket type '%s'" % family)
|
|
||||||
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
|
|
||||||
dstip = (ip, port)
|
|
||||||
break
|
|
||||||
elif cmsg_level == SOL_IPV6 and cmsg_type == IPV6_ORIGDSTADDR:
|
|
||||||
family, port = struct.unpack('=HH', cmsg_data[0:4])
|
|
||||||
port = socket.htons(port)
|
|
||||||
if family == socket.AF_INET6:
|
|
||||||
start = 8
|
|
||||||
length = 16
|
|
||||||
else:
|
|
||||||
raise Fatal("Unsupported socket type '%s'" % family)
|
|
||||||
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
|
|
||||||
dstip = (ip, port)
|
|
||||||
break
|
|
||||||
return (srcip, dstip, data)
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.ipv6 = True
|
|
||||||
result.udp = True
|
|
||||||
result.dns = True
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get_tcp_dstip(self, sock):
|
|
||||||
return sock.getsockname()
|
|
||||||
|
|
||||||
def recv_udp(self, udp_listener, bufsize):
|
|
||||||
srcip, dstip, data = recv_udp(udp_listener, bufsize)
|
|
||||||
if not dstip:
|
|
||||||
debug1(
|
|
||||||
"-- ignored UDP from %r: "
|
|
||||||
"couldn't determine destination IP address\n" % (srcip,))
|
|
||||||
return None
|
|
||||||
return srcip, dstip, data
|
|
||||||
|
|
||||||
def setsockopt_error(self, e):
|
|
||||||
"""The tproxy method needs root permissions to successfully
|
|
||||||
set the IP_TRANSPARENT option on sockets. This method is
|
|
||||||
called when we receive a PermissionError when trying to do
|
|
||||||
so."""
|
|
||||||
raise Fatal("Insufficient permissions for tproxy method.\n"
|
|
||||||
"Your effective UID is %d, not 0. Try rerunning as root.\n"
|
|
||||||
% os.geteuid())
|
|
||||||
|
|
||||||
def send_udp(self, sock, srcip, dstip, data):
|
|
||||||
if not srcip:
|
|
||||||
debug1(
|
|
||||||
"-- ignored UDP to %r: "
|
|
||||||
"couldn't determine source IP address\n" % (dstip,))
|
|
||||||
return
|
|
||||||
sender = socket.socket(sock.family, socket.SOCK_DGRAM)
|
|
||||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
try:
|
|
||||||
sender.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
|
||||||
except PermissionError as e:
|
|
||||||
self.setsockopt_error(e)
|
|
||||||
sender.bind(srcip)
|
|
||||||
sender.sendto(data, dstip)
|
|
||||||
sender.close()
|
|
||||||
|
|
||||||
def setup_tcp_listener(self, tcp_listener):
|
|
||||||
try:
|
|
||||||
tcp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
|
||||||
except PermissionError as e:
|
|
||||||
self.setsockopt_error(e)
|
|
||||||
|
|
||||||
def setup_udp_listener(self, udp_listener):
|
|
||||||
try:
|
|
||||||
udp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
|
||||||
except PermissionError as e:
|
|
||||||
self.setsockopt_error(e)
|
|
||||||
|
|
||||||
if udp_listener.v4 is not None:
|
|
||||||
udp_listener.v4.setsockopt(
|
|
||||||
socket.SOL_IP, IP_RECVORIGDSTADDR, 1)
|
|
||||||
if udp_listener.v6 is not None:
|
|
||||||
udp_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVORIGDSTADDR, 1)
|
|
||||||
|
|
||||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
|
||||||
user, group, tmark):
|
|
||||||
if family not in [socket.AF_INET, socket.AF_INET6]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by tproxy method'
|
|
||||||
% family_to_string(family))
|
|
||||||
|
|
||||||
table = "mangle"
|
|
||||||
|
|
||||||
def _ipt(*args):
|
|
||||||
return ipt(family, table, *args)
|
|
||||||
|
|
||||||
def _ipt_proto_ports(proto, fport, lport):
|
|
||||||
return proto + ('--dport', '%d:%d' % (fport, lport)) \
|
|
||||||
if fport else proto
|
|
||||||
|
|
||||||
mark_chain = 'sshuttle-m-%s' % port
|
|
||||||
tproxy_chain = 'sshuttle-t-%s' % port
|
|
||||||
divert_chain = 'sshuttle-d-%s' % port
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
self.restore_firewall(port, family, udp, user, group)
|
|
||||||
|
|
||||||
_ipt('-N', mark_chain)
|
|
||||||
_ipt('-F', mark_chain)
|
|
||||||
_ipt('-N', divert_chain)
|
|
||||||
_ipt('-F', divert_chain)
|
|
||||||
_ipt('-N', tproxy_chain)
|
|
||||||
_ipt('-F', tproxy_chain)
|
|
||||||
_ipt('-I', 'OUTPUT', '1', '-j', mark_chain)
|
|
||||||
_ipt('-I', 'PREROUTING', '1', '-j', tproxy_chain)
|
|
||||||
|
|
||||||
for _, ip in [i for i in nslist if i[0] == family]:
|
|
||||||
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', tmark,
|
|
||||||
'--dest', '%s/32' % ip,
|
|
||||||
'-m', 'udp', '-p', 'udp', '--dport', '53')
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
|
|
||||||
'--tproxy-mark', tmark,
|
|
||||||
'--dest', '%s/32' % ip,
|
|
||||||
'-m', 'udp', '-p', 'udp', '--dport', '53',
|
|
||||||
'--on-port', str(dnsport))
|
|
||||||
|
|
||||||
# Don't have packets sent to any of our local IP addresses go
|
|
||||||
# through the tproxy or mark chains (except DNS ones).
|
|
||||||
#
|
|
||||||
# Without this fix, if a large subnet is redirected through
|
|
||||||
# sshuttle (i.e., 0/0), then the user may be unable to receive
|
|
||||||
# UDP responses or connect to their own machine using an IP
|
|
||||||
# besides (127.0.0.1). Prior to including these lines, the
|
|
||||||
# documentation reminded the user to use -x to exclude their
|
|
||||||
# own IP addresses to receive UDP responses if they are
|
|
||||||
# redirecting a large subnet through sshuttle (i.e., 0/0).
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'RETURN', '-m', 'addrtype',
|
|
||||||
'--dst-type', 'LOCAL')
|
|
||||||
_ipt('-A', mark_chain, '-j', 'RETURN', '-m', 'addrtype',
|
|
||||||
'--dst-type', 'LOCAL')
|
|
||||||
|
|
||||||
_ipt('-A', divert_chain, '-j', 'MARK', '--set-mark', tmark)
|
|
||||||
_ipt('-A', divert_chain, '-j', 'ACCEPT')
|
|
||||||
_ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
|
|
||||||
'-m', 'tcp', '-p', 'tcp')
|
|
||||||
|
|
||||||
if udp:
|
|
||||||
_ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
|
|
||||||
'-m', 'udp', '-p', 'udp')
|
|
||||||
|
|
||||||
for _, swidth, sexclude, snet, fport, lport \
|
|
||||||
in sorted(subnets, key=subnet_weight, reverse=True):
|
|
||||||
tcp_ports = ('-p', 'tcp')
|
|
||||||
tcp_ports = _ipt_proto_ports(tcp_ports, fport, lport)
|
|
||||||
|
|
||||||
if sexclude:
|
|
||||||
_ipt('-A', mark_chain, '-j', 'RETURN',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'tcp',
|
|
||||||
*tcp_ports)
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'RETURN',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'tcp',
|
|
||||||
*tcp_ports)
|
|
||||||
else:
|
|
||||||
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', tmark,
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'tcp',
|
|
||||||
*tcp_ports)
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
|
|
||||||
'--tproxy-mark', tmark,
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'tcp',
|
|
||||||
*(tcp_ports + ('--on-port', str(port))))
|
|
||||||
|
|
||||||
if udp:
|
|
||||||
udp_ports = ('-p', 'udp')
|
|
||||||
udp_ports = _ipt_proto_ports(udp_ports, fport, lport)
|
|
||||||
|
|
||||||
if sexclude:
|
|
||||||
_ipt('-A', mark_chain, '-j', 'RETURN',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'udp',
|
|
||||||
*udp_ports)
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'RETURN',
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'udp',
|
|
||||||
*udp_ports)
|
|
||||||
else:
|
|
||||||
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', tmark,
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'udp',
|
|
||||||
*udp_ports)
|
|
||||||
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
|
|
||||||
'--tproxy-mark', tmark,
|
|
||||||
'--dest', '%s/%s' % (snet, swidth),
|
|
||||||
'-m', 'udp',
|
|
||||||
*(udp_ports + ('--on-port', str(port))))
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
if family not in [socket.AF_INET, socket.AF_INET6]:
|
|
||||||
raise Exception(
|
|
||||||
'Address family "%s" unsupported by tproxy method'
|
|
||||||
% family_to_string(family))
|
|
||||||
|
|
||||||
table = "mangle"
|
|
||||||
|
|
||||||
def _ipt(*args):
|
|
||||||
return ipt(family, table, *args)
|
|
||||||
|
|
||||||
mark_chain = 'sshuttle-m-%s' % port
|
|
||||||
tproxy_chain = 'sshuttle-t-%s' % port
|
|
||||||
divert_chain = 'sshuttle-d-%s' % port
|
|
||||||
|
|
||||||
# basic cleanup/setup of chains
|
|
||||||
if ipt_chain_exists(family, table, mark_chain):
|
|
||||||
_ipt('-D', 'OUTPUT', '-j', mark_chain)
|
|
||||||
_ipt('-F', mark_chain)
|
|
||||||
_ipt('-X', mark_chain)
|
|
||||||
|
|
||||||
if ipt_chain_exists(family, table, tproxy_chain):
|
|
||||||
_ipt('-D', 'PREROUTING', '-j', tproxy_chain)
|
|
||||||
_ipt('-F', tproxy_chain)
|
|
||||||
_ipt('-X', tproxy_chain)
|
|
||||||
|
|
||||||
if ipt_chain_exists(family, table, divert_chain):
|
|
||||||
_ipt('-F', divert_chain)
|
|
||||||
_ipt('-X', divert_chain)
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if which("iptables") and which("ip6tables"):
|
|
||||||
return True
|
|
||||||
debug2("tproxy method not supported because 'iptables' "
|
|
||||||
"or 'ip6tables' commands are missing.\n")
|
|
||||||
return False
|
|
@ -1,533 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
from ipaddress import ip_address, ip_network
|
|
||||||
import threading
|
|
||||||
from collections import namedtuple
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
from multiprocessing import shared_memory
|
|
||||||
from struct import Struct
|
|
||||||
from functools import wraps
|
|
||||||
from enum import IntEnum
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
|
|
||||||
from sshuttle.methods import BaseMethod
|
|
||||||
from sshuttle.helpers import log, debug3, debug1, debug2, get_verbose_level, Fatal
|
|
||||||
|
|
||||||
try:
|
|
||||||
# https://reqrypt.org/windivert-doc.html#divert_iphdr
|
|
||||||
# https://www.reqrypt.org/windivert-changelog.txt
|
|
||||||
import pydivert
|
|
||||||
except ImportError:
|
|
||||||
raise Exception("Could not import pydivert module. windivert requires https://pypi.org/project/pydivert")
|
|
||||||
|
|
||||||
|
|
||||||
ConnectionTuple = namedtuple(
|
|
||||||
"ConnectionTuple",
|
|
||||||
["protocol", "ip_version", "src_addr", "src_port", "dst_addr", "dst_port", "state_epoch", "state"],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
WINDIVERT_MAX_CONNECTIONS = int(os.environ.get('WINDIVERT_MAX_CONNECTIONS', 1024))
|
|
||||||
|
|
||||||
|
|
||||||
class IPProtocol(IntEnum):
|
|
||||||
TCP = socket.IPPROTO_TCP
|
|
||||||
UDP = socket.IPPROTO_UDP
|
|
||||||
|
|
||||||
@property
|
|
||||||
def filter(self):
|
|
||||||
return "tcp" if self == IPProtocol.TCP else "udp"
|
|
||||||
|
|
||||||
|
|
||||||
class IPFamily(IntEnum):
|
|
||||||
IPv4 = socket.AF_INET
|
|
||||||
IPv6 = socket.AF_INET6
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_ip_version(version):
|
|
||||||
return IPFamily.IPv6 if version == 4 else IPFamily.IPv4
|
|
||||||
|
|
||||||
@property
|
|
||||||
def filter(self):
|
|
||||||
return "ip" if self == socket.AF_INET else "ipv6"
|
|
||||||
|
|
||||||
@property
|
|
||||||
def version(self):
|
|
||||||
return 4 if self == socket.AF_INET else 6
|
|
||||||
|
|
||||||
@property
|
|
||||||
def loopback_addr(self):
|
|
||||||
return ip_address("127.0.0.1" if self == socket.AF_INET else "::1")
|
|
||||||
|
|
||||||
|
|
||||||
class ConnState(IntEnum):
|
|
||||||
TCP_SYN_SENT = 11 # SYN sent
|
|
||||||
TCP_ESTABLISHED = 12 # SYN+ACK received
|
|
||||||
TCP_FIN_WAIT_1 = 91 # FIN sent
|
|
||||||
TCP_CLOSE_WAIT = 92 # FIN received
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def can_timeout(state):
|
|
||||||
return state in (ConnState.TCP_SYN_SENT, ConnState.TCP_FIN_WAIT_1, ConnState.TCP_CLOSE_WAIT)
|
|
||||||
|
|
||||||
|
|
||||||
def repr_pkt(p):
|
|
||||||
try:
|
|
||||||
direction = p.direction.name
|
|
||||||
if p.is_loopback:
|
|
||||||
direction += "/lo"
|
|
||||||
except AttributeError: # windiver > 2.0
|
|
||||||
direction = 'OUT' if p.address.Outbound == 1 else 'IN'
|
|
||||||
if p.address.Loopback == 1:
|
|
||||||
direction += '/lo'
|
|
||||||
r = f"{direction} {p.src_addr}:{p.src_port}->{p.dst_addr}:{p.dst_port}"
|
|
||||||
if p.tcp:
|
|
||||||
t = p.tcp
|
|
||||||
r += f" {len(t.payload)}B ("
|
|
||||||
r += "+".join(
|
|
||||||
f.upper() for f in ("fin", "syn", "rst", "psh", "ack", "urg", "ece", "cwr", "ns") if getattr(t, f)
|
|
||||||
)
|
|
||||||
r += f") SEQ#{t.seq_num}"
|
|
||||||
if t.ack:
|
|
||||||
r += f" ACK#{t.ack_num}"
|
|
||||||
r += f" WZ={t.window_size}"
|
|
||||||
else:
|
|
||||||
r += f" {p.udp=} {p.icmpv4=} {p.icmpv6=}"
|
|
||||||
return f"<Pkt {r}>"
|
|
||||||
|
|
||||||
|
|
||||||
def synchronized_method(lock):
|
|
||||||
def decorator(method):
|
|
||||||
@wraps(method)
|
|
||||||
def wrapped(self, *args, **kwargs):
|
|
||||||
with getattr(self, lock):
|
|
||||||
return method(self, *args, **kwargs)
|
|
||||||
|
|
||||||
return wrapped
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
class ConnTrack:
|
|
||||||
|
|
||||||
_instance = None
|
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
|
||||||
if not cls._instance:
|
|
||||||
cls._instance = object.__new__(cls)
|
|
||||||
return cls._instance
|
|
||||||
raise RuntimeError("ConnTrack can not be instantiated multiple times")
|
|
||||||
|
|
||||||
def __init__(self, name, max_connections=0) -> None:
|
|
||||||
self.struct_full_tuple = Struct(">" + "".join(("B", "B", "16s", "H", "16s", "H", "L", "B")))
|
|
||||||
self.struct_src_tuple = Struct(">" + "".join(("B", "B", "16s", "H")))
|
|
||||||
self.struct_state_tuple = Struct(">" + "".join(("L", "B")))
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.max_connections = max_connections
|
|
||||||
self.shm_list = shared_memory.ShareableList(
|
|
||||||
[bytes(self.struct_full_tuple.size) for _ in range(max_connections)], name=name
|
|
||||||
)
|
|
||||||
self.is_owner = True
|
|
||||||
self.next_slot = 0
|
|
||||||
self.used_slots = set()
|
|
||||||
self.rlock = threading.RLock()
|
|
||||||
except FileExistsError:
|
|
||||||
self.is_owner = False
|
|
||||||
self.shm_list = shared_memory.ShareableList(name=name)
|
|
||||||
self.max_connections = len(self.shm_list)
|
|
||||||
|
|
||||||
debug2(
|
|
||||||
f"ConnTrack: is_owner={self.is_owner} cap={len(self.shm_list)} item_sz={self.struct_full_tuple.size}B"
|
|
||||||
f"shm_name={self.shm_list.shm.name} shm_sz={self.shm_list.shm.size}B"
|
|
||||||
)
|
|
||||||
|
|
||||||
@synchronized_method("rlock")
|
|
||||||
def add(self, proto, src_addr, src_port, dst_addr, dst_port, state):
|
|
||||||
if not self.is_owner:
|
|
||||||
raise RuntimeError("Only owner can mutate ConnTrack")
|
|
||||||
if len(self.used_slots) >= self.max_connections:
|
|
||||||
raise RuntimeError(f"No slot available in ConnTrack {len(self.used_slots)}/{self.max_connections}")
|
|
||||||
|
|
||||||
if self.get(proto, src_addr, src_port):
|
|
||||||
return
|
|
||||||
|
|
||||||
for _ in range(self.max_connections):
|
|
||||||
if self.next_slot not in self.used_slots:
|
|
||||||
break
|
|
||||||
self.next_slot = (self.next_slot + 1) % self.max_connections
|
|
||||||
else:
|
|
||||||
raise RuntimeError("No slot available in ConnTrack") # should not be here
|
|
||||||
|
|
||||||
src_addr = ip_address(src_addr)
|
|
||||||
dst_addr = ip_address(dst_addr)
|
|
||||||
assert src_addr.version == dst_addr.version
|
|
||||||
ip_version = src_addr.version
|
|
||||||
state_epoch = int(time.time())
|
|
||||||
entry = (proto, ip_version, src_addr.packed, src_port, dst_addr.packed, dst_port, state_epoch, state)
|
|
||||||
packed = self.struct_full_tuple.pack(*entry)
|
|
||||||
self.shm_list[self.next_slot] = packed
|
|
||||||
self.used_slots.add(self.next_slot)
|
|
||||||
proto = IPProtocol(proto)
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: added ({proto.name} {src_addr}:{src_port}->{dst_addr}:{dst_port} @{state_epoch}:{state.name}) to "
|
|
||||||
f"slot={self.next_slot} | #ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@synchronized_method("rlock")
|
|
||||||
def update(self, proto, src_addr, src_port, state):
|
|
||||||
if not self.is_owner:
|
|
||||||
raise RuntimeError("Only owner can mutate ConnTrack")
|
|
||||||
src_addr = ip_address(src_addr)
|
|
||||||
packed = self.struct_src_tuple.pack(proto, src_addr.version, src_addr.packed, src_port)
|
|
||||||
for i in self.used_slots:
|
|
||||||
if self.shm_list[i].startswith(packed):
|
|
||||||
state_epoch = int(time.time())
|
|
||||||
self.shm_list[i] = self.shm_list[i][:-5] + self.struct_state_tuple.pack(state_epoch, state)
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: updated ({proto.name} {src_addr}:{src_port} @{state_epoch}:{state.name}) from slot={i} | "
|
|
||||||
f"#ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
return self._unpack(self.shm_list[i])
|
|
||||||
else:
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: ({proto.name} src={src_addr}:{src_port}) is not found to update to {state.name} | "
|
|
||||||
f"#ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@synchronized_method("rlock")
|
|
||||||
def remove(self, proto, src_addr, src_port):
|
|
||||||
if not self.is_owner:
|
|
||||||
raise RuntimeError("Only owner can mutate ConnTrack")
|
|
||||||
src_addr = ip_address(src_addr)
|
|
||||||
packed = self.struct_src_tuple.pack(proto, src_addr.version, src_addr.packed, src_port)
|
|
||||||
for i in self.used_slots:
|
|
||||||
if self.shm_list[i].startswith(packed):
|
|
||||||
conn = self._unpack(self.shm_list[i])
|
|
||||||
self.shm_list[i] = b""
|
|
||||||
self.used_slots.remove(i)
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: removed ({proto.name} src={src_addr}:{src_port} state={conn.state.name}) from slot={i} | "
|
|
||||||
f"#ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
return conn
|
|
||||||
else:
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: ({proto.name} src={src_addr}:{src_port}) is not found to remove |"
|
|
||||||
f" #ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def get(self, proto, src_addr, src_port):
|
|
||||||
src_addr = ip_address(src_addr)
|
|
||||||
packed = self.struct_src_tuple.pack(proto, src_addr.version, src_addr.packed, src_port)
|
|
||||||
for entry in self.shm_list:
|
|
||||||
if entry and entry.startswith(packed):
|
|
||||||
return self._unpack(entry)
|
|
||||||
|
|
||||||
def dump(self):
|
|
||||||
for entry in self.shm_list:
|
|
||||||
if not entry:
|
|
||||||
continue
|
|
||||||
conn = self._unpack(entry)
|
|
||||||
proto, ip_version, src_addr, src_port, dst_addr, dst_port, state_epoch, state = conn
|
|
||||||
log(f"{proto.name}/{ip_version} {src_addr}:{src_port} -> {dst_addr}:{dst_port} {state.name}@{state_epoch}")
|
|
||||||
|
|
||||||
@synchronized_method("rlock")
|
|
||||||
def gc(self, connection_timeout_sec=15):
|
|
||||||
# self.dump()
|
|
||||||
now = int(time.time())
|
|
||||||
n = 0
|
|
||||||
for i in tuple(self.used_slots):
|
|
||||||
state_packed = self.shm_list[i][-5:]
|
|
||||||
(state_epoch, state) = self.struct_state_tuple.unpack(state_packed)
|
|
||||||
if (now - state_epoch) < connection_timeout_sec:
|
|
||||||
continue
|
|
||||||
if ConnState.can_timeout(state):
|
|
||||||
conn = self._unpack(self.shm_list[i])
|
|
||||||
self.shm_list[i] = b""
|
|
||||||
self.used_slots.remove(i)
|
|
||||||
n += 1
|
|
||||||
debug3(
|
|
||||||
f"ConnTrack: GC: removed ({conn.protocol.name} src={conn.src_addr}:{conn.src_port} state={conn.state.name})"
|
|
||||||
f" from slot={i} | #ActiveConn={len(self.used_slots)}"
|
|
||||||
)
|
|
||||||
debug3(f"ConnTrack: GC: collected {n} connections | #ActiveConn={len(self.used_slots)}")
|
|
||||||
|
|
||||||
def _unpack(self, packed):
|
|
||||||
(
|
|
||||||
proto,
|
|
||||||
ip_version,
|
|
||||||
src_addr_packed,
|
|
||||||
src_port,
|
|
||||||
dst_addr_packed,
|
|
||||||
dst_port,
|
|
||||||
state_epoch,
|
|
||||||
state,
|
|
||||||
) = self.struct_full_tuple.unpack(packed)
|
|
||||||
dst_addr = ip_address(dst_addr_packed if ip_version == 6 else dst_addr_packed[:4]).exploded
|
|
||||||
src_addr = ip_address(src_addr_packed if ip_version == 6 else src_addr_packed[:4]).exploded
|
|
||||||
proto = IPProtocol(proto)
|
|
||||||
state = ConnState(state)
|
|
||||||
return ConnectionTuple(proto, ip_version, src_addr, src_port, dst_addr, dst_port, state_epoch, state)
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
def conn_iter():
|
|
||||||
for i in self.used_slots:
|
|
||||||
yield self._unpack(self.shm_list[i])
|
|
||||||
|
|
||||||
return conn_iter()
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return f"<ConnTrack(n={len(self.used_slots) if self.is_owner else '?'},cap={len(self.shm_list)},owner={self.is_owner})>"
|
|
||||||
|
|
||||||
|
|
||||||
class Method(BaseMethod):
|
|
||||||
|
|
||||||
network_config = {}
|
|
||||||
|
|
||||||
def __init__(self, name):
|
|
||||||
super().__init__(name)
|
|
||||||
|
|
||||||
def _get_bind_address_for_port(self, port, family):
|
|
||||||
proto = "TCPv6" if family.version == 6 else "TCP"
|
|
||||||
for line in subprocess.check_output(["netstat", "-a", "-n", "-p", proto]).decode(errors='ignore').splitlines():
|
|
||||||
try:
|
|
||||||
_, local_addr, _, state, *_ = re.split(r"\s+", line.strip())
|
|
||||||
except ValueError:
|
|
||||||
continue
|
|
||||||
port_suffix = ":" + str(port)
|
|
||||||
if state == "LISTENING" and local_addr.endswith(port_suffix):
|
|
||||||
return ip_address(local_addr[:-len(port_suffix)].strip("[]"))
|
|
||||||
raise Fatal("Could not find listening address for {}/{}".format(port, proto))
|
|
||||||
|
|
||||||
def setup_firewall(self, proxy_port, dnsport, nslist, family, subnets, udp, user, group, tmark):
|
|
||||||
debug2(f"{proxy_port=}, {dnsport=}, {nslist=}, {family=}, {subnets=}, {udp=}, {user=}, {group=} {tmark=}")
|
|
||||||
|
|
||||||
if nslist or user or udp or group:
|
|
||||||
raise NotImplementedError("user, group, nslist, udp are not supported")
|
|
||||||
|
|
||||||
family = IPFamily(family)
|
|
||||||
|
|
||||||
proxy_ip = None
|
|
||||||
# using loopback only proxy binding won't work with windivert.
|
|
||||||
# See: https://github.com/basil00/Divert/issues/17#issuecomment-341100167 https://github.com/basil00/Divert/issues/82)
|
|
||||||
# As a workaround, finding another interface ip instead. (client should not bind proxy to loopback address)
|
|
||||||
proxy_bind_addr = self._get_bind_address_for_port(proxy_port, family)
|
|
||||||
if proxy_bind_addr.is_loopback:
|
|
||||||
raise Fatal("Windivert method requires proxy to be reachable by a non loopback address.")
|
|
||||||
if not proxy_bind_addr.is_unspecified:
|
|
||||||
proxy_ip = proxy_bind_addr
|
|
||||||
else:
|
|
||||||
local_addresses = [ip_address(info[4][0]) for info in socket.getaddrinfo(socket.gethostname(), 0, family=family)]
|
|
||||||
for addr in local_addresses:
|
|
||||||
if not addr.is_loopback and not addr.is_link_local:
|
|
||||||
proxy_ip = addr
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise Fatal("Windivert method requires proxy to be reachable by a non loopback address."
|
|
||||||
f"No address found for {family.name} in {local_addresses}")
|
|
||||||
debug2(f"Found non loopback address to connect to proxy: {proxy_ip}")
|
|
||||||
subnet_addresses = []
|
|
||||||
for (_, mask, exclude, network_addr, fport, lport) in subnets:
|
|
||||||
if fport and lport:
|
|
||||||
if lport > fport:
|
|
||||||
raise Fatal("lport must be less than or equal to fport")
|
|
||||||
ports = (fport, lport)
|
|
||||||
else:
|
|
||||||
ports = None
|
|
||||||
subnet_addresses.append((ip_network(f"{network_addr}/{mask}"), ports, exclude))
|
|
||||||
|
|
||||||
self.network_config[family] = {
|
|
||||||
"subnets": subnet_addresses,
|
|
||||||
"nslist": nslist,
|
|
||||||
"proxy_addr": (proxy_ip, proxy_port)
|
|
||||||
}
|
|
||||||
|
|
||||||
def wait_for_firewall_ready(self, sshuttle_pid):
|
|
||||||
debug2(f"network_config={self.network_config}")
|
|
||||||
self.conntrack = ConnTrack(f"sshuttle-windivert-{sshuttle_pid}", WINDIVERT_MAX_CONNECTIONS)
|
|
||||||
if not self.conntrack.is_owner:
|
|
||||||
raise Fatal("ConnTrack should be owner in wait_for_firewall_ready()")
|
|
||||||
thread_target_funcs = (self._egress_divert, self._ingress_divert, self._connection_gc)
|
|
||||||
ready_events = []
|
|
||||||
for fn in thread_target_funcs:
|
|
||||||
ev = threading.Event()
|
|
||||||
ready_events.append(ev)
|
|
||||||
|
|
||||||
def _target():
|
|
||||||
try:
|
|
||||||
fn(ev.set)
|
|
||||||
except Exception:
|
|
||||||
debug2(f"thread {fn.__name__} exiting due to: " + traceback.format_exc())
|
|
||||||
sys.stdin.close() # this will exist main thread
|
|
||||||
sys.stdout.close()
|
|
||||||
|
|
||||||
threading.Thread(name=fn.__name__, target=_target, daemon=True).start()
|
|
||||||
for ev in ready_events:
|
|
||||||
if not ev.wait(5): # at most 5 sec
|
|
||||||
raise Fatal("timeout in wait_for_firewall_ready()")
|
|
||||||
|
|
||||||
def restore_firewall(self, port, family, udp, user, group):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_supported_features(self):
|
|
||||||
result = super(Method, self).get_supported_features()
|
|
||||||
result.loopback_proxy_port = False
|
|
||||||
result.user = False
|
|
||||||
result.dns = False
|
|
||||||
# ipv6 only able to support with Windivert 2.x due to bugs in filter parsing
|
|
||||||
# TODO(nom3ad): Enable ipv6 once https://github.com/ffalcinelli/pydivert/pull/57 merged
|
|
||||||
result.ipv6 = False
|
|
||||||
return result
|
|
||||||
|
|
||||||
def get_tcp_dstip(self, sock):
|
|
||||||
if not hasattr(self, "conntrack"):
|
|
||||||
self.conntrack = ConnTrack(f"sshuttle-windivert-{os.getpid()}")
|
|
||||||
if self.conntrack.is_owner:
|
|
||||||
raise Fatal("ConnTrack should not be owner in get_tcp_dstip()")
|
|
||||||
|
|
||||||
src_addr, src_port = sock.getpeername()
|
|
||||||
c = self.conntrack.get(IPProtocol.TCP, src_addr, src_port)
|
|
||||||
if not c:
|
|
||||||
return (src_addr, src_port)
|
|
||||||
return (c.dst_addr, c.dst_port)
|
|
||||||
|
|
||||||
def is_supported(self):
|
|
||||||
if sys.platform == "win32":
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _egress_divert(self, ready_cb):
|
|
||||||
"""divert outgoing packets to proxy"""
|
|
||||||
proto = IPProtocol.TCP
|
|
||||||
filter = f"outbound and {proto.filter}"
|
|
||||||
af_filters = []
|
|
||||||
for af, c in self.network_config.items():
|
|
||||||
subnet_include_filters = []
|
|
||||||
subnet_exclude_filters = []
|
|
||||||
for ip_net, ports, exclude in c["subnets"]:
|
|
||||||
first_ip = ip_net.network_address.exploded
|
|
||||||
last_ip = ip_net.broadcast_address.exploded
|
|
||||||
if first_ip == last_ip:
|
|
||||||
_subnet_filter = f"{af.filter}.DstAddr=={first_ip}"
|
|
||||||
else:
|
|
||||||
_subnet_filter = f"{af.filter}.DstAddr>={first_ip} and {af.filter}.DstAddr<={last_ip}"
|
|
||||||
if ports:
|
|
||||||
if ports[0] == ports[1]:
|
|
||||||
_subnet_filter += f" and {proto.filter}.DstPort=={ports[0]}"
|
|
||||||
else:
|
|
||||||
_subnet_filter += f" and tcp.DstPort>={ports[0]} and tcp.DstPort<={ports[1]}"
|
|
||||||
(subnet_exclude_filters if exclude else subnet_include_filters).append(f"({_subnet_filter})")
|
|
||||||
_af_filter = f"{af.filter}"
|
|
||||||
if subnet_include_filters:
|
|
||||||
_af_filter += f" and ({' or '.join(subnet_include_filters)})"
|
|
||||||
if subnet_exclude_filters:
|
|
||||||
# TODO(noma3ad) use not() operator with Windivert2 after upgrade
|
|
||||||
_af_filter += f" and (({' or '.join(subnet_exclude_filters)})? false : true)"
|
|
||||||
proxy_ip, proxy_port = c["proxy_addr"]
|
|
||||||
# Avoids proxy outbound traffic getting directed to itself
|
|
||||||
proxy_guard_filter = f"(({af.filter}.DstAddr=={proxy_ip.exploded} and tcp.DstPort=={proxy_port})? false : true)"
|
|
||||||
_af_filter += f" and {proxy_guard_filter}"
|
|
||||||
af_filters.append(_af_filter)
|
|
||||||
if not af_filters:
|
|
||||||
raise Fatal("At least one ipv4 or ipv6 subnet is expected")
|
|
||||||
|
|
||||||
filter = f"{filter} and ({' or '.join(af_filters)})"
|
|
||||||
debug1(f"[EGRESS] {filter=}")
|
|
||||||
with pydivert.WinDivert(filter, layer=pydivert.Layer.NETWORK, flags=pydivert.Flag.DEFAULT) as w:
|
|
||||||
proxy_ipv4, proxy_ipv6 = None, None
|
|
||||||
if IPFamily.IPv4 in self.network_config:
|
|
||||||
proxy_ipv4 = self.network_config[IPFamily.IPv4]["proxy_addr"]
|
|
||||||
proxy_ipv4 = proxy_ipv4[0].exploded, proxy_ipv4[1]
|
|
||||||
if IPFamily.IPv6 in self.network_config:
|
|
||||||
proxy_ipv6 = self.network_config[IPFamily.IPv6]["proxy_addr"]
|
|
||||||
proxy_ipv6 = proxy_ipv6[0].exploded, proxy_ipv6[1]
|
|
||||||
ready_cb()
|
|
||||||
verbose = get_verbose_level()
|
|
||||||
for pkt in w:
|
|
||||||
verbose >= 3 and debug3("[EGRESS] " + repr_pkt(pkt))
|
|
||||||
if pkt.tcp.syn and not pkt.tcp.ack:
|
|
||||||
# SYN sent (start of 3-way handshake connection establishment from our side, we wait for SYN+ACK)
|
|
||||||
self.conntrack.add(
|
|
||||||
socket.IPPROTO_TCP,
|
|
||||||
pkt.src_addr,
|
|
||||||
pkt.src_port,
|
|
||||||
pkt.dst_addr,
|
|
||||||
pkt.dst_port,
|
|
||||||
ConnState.TCP_SYN_SENT,
|
|
||||||
)
|
|
||||||
if pkt.tcp.fin:
|
|
||||||
# FIN sent (start of graceful close our side, and we wait for ACK)
|
|
||||||
self.conntrack.update(IPProtocol.TCP, pkt.src_addr, pkt.src_port, ConnState.TCP_FIN_WAIT_1)
|
|
||||||
if pkt.tcp.rst:
|
|
||||||
# RST sent (initiate abrupt connection teardown from our side, so we don't expect any reply)
|
|
||||||
self.conntrack.remove(IPProtocol.TCP, pkt.src_addr, pkt.src_port)
|
|
||||||
|
|
||||||
# DNAT
|
|
||||||
if pkt.ipv4 and proxy_ipv4:
|
|
||||||
pkt.dst_addr, pkt.tcp.dst_port = proxy_ipv4
|
|
||||||
if pkt.ipv6 and proxy_ipv6:
|
|
||||||
pkt.dst_addr, pkt.tcp.dst_port = proxy_ipv6
|
|
||||||
|
|
||||||
# XXX: If we set loopback proxy address (DNAT), then we should do SNAT as well
|
|
||||||
# by setting src_addr to loopback address.
|
|
||||||
# Otherwise injecting packet will be ignored by Windows network stack
|
|
||||||
# as they packet has to cross public to private address space.
|
|
||||||
# See: https://github.com/basil00/Divert/issues/82
|
|
||||||
# Managing SNAT is more trickier, as we have to restore the original source IP address for reply packets.
|
|
||||||
# >>> pkt.dst_addr = proxy_ipv4
|
|
||||||
w.send(pkt, recalculate_checksum=True)
|
|
||||||
|
|
||||||
def _ingress_divert(self, ready_cb):
|
|
||||||
"""handles incoming packets from proxy"""
|
|
||||||
proto = IPProtocol.TCP
|
|
||||||
# Windivert treats all local process traffic as outbound, regardless of origin external/loopback iface
|
|
||||||
direction = "outbound"
|
|
||||||
proxy_addr_filters = []
|
|
||||||
for af, c in self.network_config.items():
|
|
||||||
if not c["subnets"]:
|
|
||||||
continue
|
|
||||||
proxy_ip, proxy_port = c["proxy_addr"]
|
|
||||||
# "ip.SrcAddr=={hex(int(proxy_ip))}" # only Windivert >=2 supports this
|
|
||||||
proxy_addr_filters.append(f"{af.filter}.SrcAddr=={proxy_ip.exploded} and tcp.SrcPort=={proxy_port}")
|
|
||||||
if not proxy_addr_filters:
|
|
||||||
raise Fatal("At least one ipv4 or ipv6 address is expected")
|
|
||||||
filter = f"{direction} and {proto.filter} and ({' or '.join(proxy_addr_filters)})"
|
|
||||||
debug1(f"[INGRESS] {filter=}")
|
|
||||||
with pydivert.WinDivert(filter, layer=pydivert.Layer.NETWORK, flags=pydivert.Flag.DEFAULT) as w:
|
|
||||||
ready_cb()
|
|
||||||
verbose = get_verbose_level()
|
|
||||||
for pkt in w:
|
|
||||||
verbose >= 3 and debug3("[INGRESS] " + repr_pkt(pkt))
|
|
||||||
if pkt.tcp.syn and pkt.tcp.ack:
|
|
||||||
# SYN+ACK received (connection established from proxy
|
|
||||||
conn = self.conntrack.update(IPProtocol.TCP, pkt.dst_addr, pkt.dst_port, ConnState.TCP_ESTABLISHED)
|
|
||||||
elif pkt.tcp.rst:
|
|
||||||
# RST received - Abrupt connection teardown initiated by proxy. Don't expect anymore packets
|
|
||||||
conn = self.conntrack.remove(IPProtocol.TCP, pkt.dst_addr, pkt.dst_port)
|
|
||||||
# https://wiki.wireshark.org/TCP-4-times-close.md
|
|
||||||
elif pkt.tcp.fin and pkt.tcp.ack:
|
|
||||||
# FIN+ACK received (Passive close by proxy. Don't expect any more packets. proxy expects an ACK)
|
|
||||||
conn = self.conntrack.remove(IPProtocol.TCP, pkt.dst_addr, pkt.dst_port)
|
|
||||||
elif pkt.tcp.fin:
|
|
||||||
# FIN received (proxy initiated graceful close. Expect a final ACK for a FIN packet)
|
|
||||||
conn = self.conntrack.update(IPProtocol.TCP, pkt.dst_addr, pkt.dst_port, ConnState.TCP_CLOSE_WAIT)
|
|
||||||
else:
|
|
||||||
# data fragments and ACKs
|
|
||||||
conn = self.conntrack.get(socket.IPPROTO_TCP, pkt.dst_addr, pkt.dst_port)
|
|
||||||
if not conn:
|
|
||||||
verbose >= 2 and debug2("Unexpected packet: " + repr_pkt(pkt))
|
|
||||||
continue
|
|
||||||
pkt.src_addr = conn.dst_addr
|
|
||||||
pkt.tcp.src_port = conn.dst_port
|
|
||||||
w.send(pkt, recalculate_checksum=True)
|
|
||||||
|
|
||||||
def _connection_gc(self, ready_cb):
|
|
||||||
ready_cb()
|
|
||||||
while True:
|
|
||||||
time.sleep(5)
|
|
||||||
self.conntrack.gc()
|
|
@ -1,40 +0,0 @@
|
|||||||
import os
|
|
||||||
import ctypes
|
|
||||||
import ctypes.util
|
|
||||||
|
|
||||||
from sshuttle.helpers import Fatal, debug1, debug2
|
|
||||||
|
|
||||||
|
|
||||||
CLONE_NEWNET = 0x40000000
|
|
||||||
NETNS_RUN_DIR = "/var/run/netns"
|
|
||||||
|
|
||||||
|
|
||||||
def enter_namespace(namespace, namespace_pid):
|
|
||||||
if namespace:
|
|
||||||
namespace_dir = f'{NETNS_RUN_DIR}/{namespace}'
|
|
||||||
else:
|
|
||||||
namespace_dir = f'/proc/{namespace_pid}/ns/net'
|
|
||||||
|
|
||||||
if not os.path.exists(namespace_dir):
|
|
||||||
raise Fatal('The namespace %r does not exists.' % namespace_dir)
|
|
||||||
|
|
||||||
debug2('loading libc')
|
|
||||||
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
|
|
||||||
|
|
||||||
default_errcheck = libc.setns.errcheck
|
|
||||||
|
|
||||||
def errcheck(ret, *args):
|
|
||||||
if ret == -1:
|
|
||||||
e = ctypes.get_errno()
|
|
||||||
raise Fatal(e, os.strerror(e))
|
|
||||||
if default_errcheck:
|
|
||||||
return default_errcheck(ret, *args)
|
|
||||||
|
|
||||||
libc.setns.errcheck = errcheck # type: ignore
|
|
||||||
|
|
||||||
debug1('Entering namespace %r' % namespace_dir)
|
|
||||||
|
|
||||||
with open(namespace_dir) as fd:
|
|
||||||
libc.setns(fd.fileno(), CLONE_NEWNET)
|
|
||||||
|
|
||||||
debug1('Namespace %r successfully set' % namespace_dir)
|
|
@ -1,488 +0,0 @@
|
|||||||
import re
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
from argparse import ArgumentParser, Action, ArgumentTypeError as Fatal
|
|
||||||
|
|
||||||
from sshuttle import __version__
|
|
||||||
|
|
||||||
|
|
||||||
# Subnet file, supporting empty lines and hash-started comment lines
|
|
||||||
def parse_subnetport_file(s):
|
|
||||||
try:
|
|
||||||
handle = open(s, 'r')
|
|
||||||
except OSError:
|
|
||||||
raise Fatal('Unable to open subnet file: %s' % s)
|
|
||||||
|
|
||||||
raw_config_lines = handle.readlines()
|
|
||||||
subnets = []
|
|
||||||
for _, line in enumerate(raw_config_lines):
|
|
||||||
line = line.strip()
|
|
||||||
if not line:
|
|
||||||
continue
|
|
||||||
if line[0] == '#':
|
|
||||||
continue
|
|
||||||
subnets.append(parse_subnetport(line))
|
|
||||||
|
|
||||||
return subnets
|
|
||||||
|
|
||||||
|
|
||||||
# 1.2.3.4/5:678, 1.2.3.4:567, 1.2.3.4/16 or just 1.2.3.4
|
|
||||||
# [1:2::3/64]:456, [1:2::3]:456, 1:2::3/64 or just 1:2::3
|
|
||||||
# example.com:123 or just example.com
|
|
||||||
#
|
|
||||||
# In addition, the port number can be specified as a range:
|
|
||||||
# 1.2.3.4:8000-8080.
|
|
||||||
#
|
|
||||||
# Can return multiple matches if the domain name used in the request
|
|
||||||
# has multiple IP addresses.
|
|
||||||
def parse_subnetport(s):
|
|
||||||
|
|
||||||
if s.count(':') > 1:
|
|
||||||
rx = r'(?:\[?(?:\*\.)?([\w\:]+)(?:/(\d+))?]?)(?::(\d+)(?:-(\d+))?)?$'
|
|
||||||
else:
|
|
||||||
rx = r'((?:\*\.)?[\w\.\-]+)(?:/(\d+))?(?::(\d+)(?:-(\d+))?)?$'
|
|
||||||
|
|
||||||
m = re.match(rx, s)
|
|
||||||
if not m:
|
|
||||||
raise Fatal('%r is not a valid address/mask:port format' % s)
|
|
||||||
|
|
||||||
# Ports range from fport to lport. If only one port is specified,
|
|
||||||
# fport is defined and lport is None.
|
|
||||||
#
|
|
||||||
# cidr is the mask defined with the slash notation
|
|
||||||
host, cidr, fport, lport = m.groups()
|
|
||||||
try:
|
|
||||||
addrinfo = socket.getaddrinfo(host, 0, 0, socket.SOCK_STREAM)
|
|
||||||
except socket.gaierror:
|
|
||||||
raise Fatal('Unable to resolve address: %s' % host)
|
|
||||||
|
|
||||||
# If the address is a domain with multiple IPs and a mask is also
|
|
||||||
# provided, proceed cautiously:
|
|
||||||
if cidr is not None:
|
|
||||||
addr_v6 = [a for a in addrinfo if a[0] == socket.AF_INET6]
|
|
||||||
addr_v4 = [a for a in addrinfo if a[0] == socket.AF_INET]
|
|
||||||
|
|
||||||
# Refuse to proceed if IPv4 and IPv6 addresses are present:
|
|
||||||
if len(addr_v6) > 0 and len(addr_v4) > 0:
|
|
||||||
raise Fatal("%s has IPv4 and IPv6 addresses, so the mask "
|
|
||||||
"of /%s is not supported. Specify the IP "
|
|
||||||
"addresses directly if you wish to specify "
|
|
||||||
"a mask." % (host, cidr))
|
|
||||||
|
|
||||||
# Warn if a domain has multiple IPs of the same type (IPv4 vs
|
|
||||||
# IPv6) and the mask is applied to all of the IPs.
|
|
||||||
if len(addr_v4) > 1 or len(addr_v6) > 1:
|
|
||||||
print("WARNING: %s has multiple IP addresses. The "
|
|
||||||
"mask of /%s is applied to all of the addresses."
|
|
||||||
% (host, cidr))
|
|
||||||
|
|
||||||
rv = []
|
|
||||||
for a in addrinfo:
|
|
||||||
family, _, _, _, addr = a
|
|
||||||
|
|
||||||
# Largest possible slash value we can use with this IP:
|
|
||||||
max_cidr = 32 if family == socket.AF_INET else 128
|
|
||||||
|
|
||||||
if cidr is None: # if no mask, use largest mask
|
|
||||||
cidr_to_use = max_cidr
|
|
||||||
else: # verify user-provided mask is appropriate
|
|
||||||
cidr_to_use = int(cidr)
|
|
||||||
if not 0 <= cidr_to_use <= max_cidr:
|
|
||||||
raise Fatal('Slash in CIDR notation (/%d) is '
|
|
||||||
'not between 0 and %d'
|
|
||||||
% (cidr_to_use, max_cidr))
|
|
||||||
|
|
||||||
rv.append((family, addr[0], cidr_to_use,
|
|
||||||
int(fport or 0), int(lport or fport or 0)))
|
|
||||||
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
# 1.2.3.4:567 or just 1.2.3.4 or just 567
|
|
||||||
# [1:2::3]:456 or [1:2::3] or just [::]:567
|
|
||||||
# example.com:123 or just example.com
|
|
||||||
def parse_ipport(s):
|
|
||||||
s = str(s)
|
|
||||||
if s.isdigit():
|
|
||||||
rx = r'()(\d+)$'
|
|
||||||
elif ']' in s:
|
|
||||||
rx = r'(?:\[([^]]+)])(?::(\d+))?$'
|
|
||||||
else:
|
|
||||||
rx = r'([\w\.\-]+)(?::(\d+))?$'
|
|
||||||
|
|
||||||
m = re.match(rx, s)
|
|
||||||
if not m:
|
|
||||||
raise Fatal('%r is not a valid IP:port format' % s)
|
|
||||||
|
|
||||||
host, port = m.groups()
|
|
||||||
host = host or '0.0.0.0'
|
|
||||||
port = int(port or 0)
|
|
||||||
|
|
||||||
try:
|
|
||||||
addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
|
|
||||||
except socket.gaierror:
|
|
||||||
raise Fatal('Unable to resolve address: %s' % host)
|
|
||||||
|
|
||||||
if len(addrinfo) > 1:
|
|
||||||
print("WARNING: Host %s has more than one IP, only using one of them."
|
|
||||||
% host)
|
|
||||||
|
|
||||||
family, _, _, _, addr = min(addrinfo)
|
|
||||||
# Note: addr contains (ip, port)
|
|
||||||
return (family,) + addr[:2]
|
|
||||||
|
|
||||||
|
|
||||||
def parse_list(lst):
|
|
||||||
"""Parse a comma separated string into a list."""
|
|
||||||
return re.split(r'[\s,]+', lst.strip()) if lst else []
|
|
||||||
|
|
||||||
|
|
||||||
def parse_namespace(namespace):
|
|
||||||
try:
|
|
||||||
assert re.fullmatch(
|
|
||||||
r'(@?[a-z_A-Z]\w+(?:\.@?[a-z_A-Z]\w+)*)', namespace)
|
|
||||||
return namespace
|
|
||||||
except AssertionError:
|
|
||||||
raise Fatal("%r is not a valid namespace name." % namespace)
|
|
||||||
|
|
||||||
|
|
||||||
class Concat(Action):
|
|
||||||
def __init__(self, option_strings, dest, nargs=None, **kwargs):
|
|
||||||
if nargs is not None:
|
|
||||||
raise ValueError("nargs not supported")
|
|
||||||
super(Concat, self).__init__(option_strings, dest, **kwargs)
|
|
||||||
|
|
||||||
def __call__(self, parser, namespace, values, option_string=None):
|
|
||||||
curr_value = getattr(namespace, self.dest, None) or []
|
|
||||||
setattr(namespace, self.dest, curr_value + values)
|
|
||||||
|
|
||||||
|
|
||||||
# Override one function in the ArgumentParser so that we can have
|
|
||||||
# better control for how we parse files containing arguments. We
|
|
||||||
# expect one argument per line, but strip whitespace/quotes from the
|
|
||||||
# beginning/end of the lines.
|
|
||||||
class MyArgumentParser(ArgumentParser):
|
|
||||||
def convert_arg_line_to_args(self, arg_line):
|
|
||||||
# Ignore comments
|
|
||||||
if arg_line.startswith("#"):
|
|
||||||
return []
|
|
||||||
|
|
||||||
# strip whitespace at beginning and end of line
|
|
||||||
arg_line = arg_line.strip()
|
|
||||||
|
|
||||||
# When copying parameters from the command line to a file,
|
|
||||||
# some users might copy the quotes they used on the command
|
|
||||||
# line into the config file. We ignore these if the line
|
|
||||||
# starts and ends with the same quote.
|
|
||||||
if arg_line.startswith("'") and arg_line.endswith("'") or \
|
|
||||||
arg_line.startswith('"') and arg_line.endswith('"'):
|
|
||||||
arg_line = arg_line[1:-1]
|
|
||||||
|
|
||||||
return [arg_line]
|
|
||||||
|
|
||||||
|
|
||||||
parser = MyArgumentParser(
|
|
||||||
prog="sshuttle",
|
|
||||||
usage="%(prog)s [-l [ip:]port] -r [user@]sshserver[:port] <subnets...>",
|
|
||||||
fromfile_prefix_chars="@"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"subnets",
|
|
||||||
metavar="IP/MASK[:PORT[-PORT]]...",
|
|
||||||
nargs="*",
|
|
||||||
type=parse_subnetport,
|
|
||||||
help="""
|
|
||||||
capture and forward traffic to these subnets (whitespace separated)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-l", "--listen",
|
|
||||||
metavar="[IP:]PORT",
|
|
||||||
help="""
|
|
||||||
transproxy to this ip address and port number
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-H", "--auto-hosts",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
continuously scan for remote hostnames and update local /etc/hosts as
|
|
||||||
they are found
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-N", "--auto-nets",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
automatically determine subnets to route
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--dns",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
capture local DNS requests and forward to the remote DNS server
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--ns-hosts",
|
|
||||||
metavar="IP[,IP]",
|
|
||||||
default=[],
|
|
||||||
type=parse_list,
|
|
||||||
help="""
|
|
||||||
capture and forward DNS requests made to the following servers
|
|
||||||
(comma separated)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--to-ns",
|
|
||||||
metavar="IP[:PORT]",
|
|
||||||
type=parse_ipport,
|
|
||||||
help="""
|
|
||||||
the DNS server to forward requests to; defaults to servers in
|
|
||||||
/etc/resolv.conf on remote side if not given.
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
method_choices = ["auto", "windivert"]
|
|
||||||
else:
|
|
||||||
method_choices = ["auto", "nft", "nat", "tproxy", "pf", "ipfw"]
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--method",
|
|
||||||
choices=method_choices,
|
|
||||||
metavar="TYPE",
|
|
||||||
default="auto",
|
|
||||||
help="""
|
|
||||||
%(choices)s
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--python",
|
|
||||||
metavar="PATH",
|
|
||||||
help="""
|
|
||||||
path to python interpreter on the remote server
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-r", "--remote",
|
|
||||||
metavar="[USERNAME[:PASSWORD]@]ADDR[:PORT]",
|
|
||||||
help="""
|
|
||||||
ssh hostname (and optional username and password) of remote %(prog)s server
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-x", "--exclude",
|
|
||||||
metavar="IP/MASK[:PORT[-PORT]]",
|
|
||||||
action="append",
|
|
||||||
default=[],
|
|
||||||
type=parse_subnetport,
|
|
||||||
help="""
|
|
||||||
exclude this subnet (can be used more than once)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-X", "--exclude-from",
|
|
||||||
metavar="PATH",
|
|
||||||
action=Concat,
|
|
||||||
dest="exclude",
|
|
||||||
type=parse_subnetport_file,
|
|
||||||
help="""
|
|
||||||
exclude the subnets in a file (whitespace separated)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-v", "--verbose",
|
|
||||||
action="count",
|
|
||||||
default=0,
|
|
||||||
help="""
|
|
||||||
increase debug message verbosity (can be used more than once)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-V", "--version",
|
|
||||||
action="version",
|
|
||||||
version=__version__,
|
|
||||||
help="""
|
|
||||||
print the %(prog)s version number and exit
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-e", "--ssh-cmd",
|
|
||||||
metavar="CMD",
|
|
||||||
default="ssh",
|
|
||||||
help="""
|
|
||||||
the command to use to connect to the remote [%(default)s]
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--no-cmd-delimiter",
|
|
||||||
action="store_false",
|
|
||||||
dest="add_cmd_delimiter",
|
|
||||||
help="""
|
|
||||||
do not add a double dash before the python command
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--remote-shell",
|
|
||||||
metavar="PROGRAM",
|
|
||||||
help="""
|
|
||||||
alternate remote shell program instead of defacto posix shell.
|
|
||||||
For Windows targets it would be either `cmd` or `powershell` unless something like git-bash is in use.
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--seed-hosts",
|
|
||||||
metavar="HOSTNAME[,HOSTNAME]",
|
|
||||||
default=[],
|
|
||||||
help="""
|
|
||||||
comma-separated list of hostnames for initial scan (may be used with
|
|
||||||
or without --auto-hosts)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--no-latency-control",
|
|
||||||
action="store_false",
|
|
||||||
dest="latency_control",
|
|
||||||
help="""
|
|
||||||
sacrifice latency to improve bandwidth benchmarks
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--latency-buffer-size",
|
|
||||||
metavar="SIZE",
|
|
||||||
type=int,
|
|
||||||
default=32768,
|
|
||||||
dest="latency_buffer_size",
|
|
||||||
help="""
|
|
||||||
size of latency control buffer
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--wrap",
|
|
||||||
metavar="NUM",
|
|
||||||
type=int,
|
|
||||||
help="""
|
|
||||||
restart counting channel numbers after this number (for testing)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--disable-ipv6",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
disable IPv6 support
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-D", "--daemon",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
run in the background as a daemon
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-s", "--subnets",
|
|
||||||
metavar="PATH",
|
|
||||||
action=Concat,
|
|
||||||
dest="subnets_file",
|
|
||||||
default=[],
|
|
||||||
type=parse_subnetport_file,
|
|
||||||
help="""
|
|
||||||
file where the subnets are stored, instead of on the command line
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--syslog",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
send log messages to syslog (default if you use --daemon)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--pidfile",
|
|
||||||
metavar="PATH",
|
|
||||||
default="./sshuttle.pid",
|
|
||||||
help="""
|
|
||||||
pidfile name (only if using --daemon) [%(default)s]
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--user",
|
|
||||||
help="""
|
|
||||||
apply all the rules only to this linux user
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--group",
|
|
||||||
help="""
|
|
||||||
apply all the rules only to this linux group
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--firewall",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
(internal use only)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--hostwatch",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
(internal use only)
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--sudoers-no-modify",
|
|
||||||
action="store_true",
|
|
||||||
help="""
|
|
||||||
Prints a sudo configuration to STDOUT which allows a user to
|
|
||||||
run sshuttle without a password. This option is INSECURE because,
|
|
||||||
with some cleverness, it also allows the user to run any command
|
|
||||||
as root without a password. The output also includes a suggested
|
|
||||||
method for you to install the configuration.
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--sudoers-user",
|
|
||||||
default="",
|
|
||||||
help="""
|
|
||||||
Set the user name or group with %%group_name for passwordless operation.
|
|
||||||
Default is the current user. Only works with the --sudoers-no-modify option.
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--no-sudo-pythonpath",
|
|
||||||
action="store_false",
|
|
||||||
dest="sudo_pythonpath",
|
|
||||||
help="""
|
|
||||||
do not set PYTHONPATH when invoking sudo
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-t", "--tmark",
|
|
||||||
metavar="[MARK]",
|
|
||||||
default="0x01",
|
|
||||||
help="""
|
|
||||||
tproxy optional traffic mark with provided MARK value in
|
|
||||||
hexadecimal (default '0x01')
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
if sys.platform == 'linux':
|
|
||||||
net_ns_group = parser.add_mutually_exclusive_group(
|
|
||||||
required=False)
|
|
||||||
|
|
||||||
net_ns_group.add_argument(
|
|
||||||
'--namespace',
|
|
||||||
type=parse_namespace,
|
|
||||||
help="Run inside of a net namespace with the given name."
|
|
||||||
)
|
|
||||||
net_ns_group.add_argument(
|
|
||||||
'--namespace-pid',
|
|
||||||
type=int,
|
|
||||||
help="""
|
|
||||||
Run inside the net namespace used by the process with
|
|
||||||
the given pid."""
|
|
||||||
)
|
|
@ -1,63 +0,0 @@
|
|||||||
"""When sshuttle is run via a systemd service file, we can communicate
|
|
||||||
to systemd about the status of the sshuttle process. In particular, we
|
|
||||||
can send READY status to tell systemd that sshuttle has completed
|
|
||||||
startup and send STOPPING to indicate that sshuttle is beginning
|
|
||||||
shutdown.
|
|
||||||
|
|
||||||
For details, see:
|
|
||||||
https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
|
||||||
"""
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import os
|
|
||||||
|
|
||||||
from sshuttle.helpers import debug1
|
|
||||||
|
|
||||||
|
|
||||||
def _notify(message):
|
|
||||||
"""Send a notification message to systemd."""
|
|
||||||
addr = os.environ.get("NOTIFY_SOCKET", None)
|
|
||||||
|
|
||||||
if not addr or len(addr) == 1 or addr[0] not in ('/', '@'):
|
|
||||||
return False
|
|
||||||
|
|
||||||
addr = '\0' + addr[1:] if addr[0] == '@' else addr
|
|
||||||
|
|
||||||
try:
|
|
||||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
|
||||||
except (OSError, IOError) as e:
|
|
||||||
debug1("Error creating socket to notify systemd: %s" % e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not message:
|
|
||||||
return False
|
|
||||||
|
|
||||||
assert isinstance(message, bytes)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return (sock.sendto(message, addr) > 0)
|
|
||||||
except (OSError, IOError) as e:
|
|
||||||
debug1("Error notifying systemd: %s" % e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def send(*messages):
|
|
||||||
"""Send multiple messages to systemd."""
|
|
||||||
return _notify(b'\n'.join(messages))
|
|
||||||
|
|
||||||
|
|
||||||
def ready():
|
|
||||||
"""Constructs a message that is appropriate to send upon completion of
|
|
||||||
sshuttle startup."""
|
|
||||||
return b"READY=1"
|
|
||||||
|
|
||||||
|
|
||||||
def stop():
|
|
||||||
"""Constructs a message that is appropriate to send when sshuttle is
|
|
||||||
beginning to shutdown."""
|
|
||||||
return b"STOPPING=1"
|
|
||||||
|
|
||||||
|
|
||||||
def status(message):
|
|
||||||
"""Constructs a status message to be sent to systemd."""
|
|
||||||
return b"STATUS=%s" % message.encode('utf8')
|
|
@ -1,442 +0,0 @@
|
|||||||
import re
|
|
||||||
import struct
|
|
||||||
import socket
|
|
||||||
import traceback
|
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import io
|
|
||||||
|
|
||||||
|
|
||||||
import sshuttle.ssnet as ssnet
|
|
||||||
import sshuttle.helpers as helpers
|
|
||||||
import sshuttle.hostwatch as hostwatch
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
from sshuttle.ssnet import Handler, Proxy, Mux, MuxWrapper
|
|
||||||
from sshuttle.helpers import b, log, debug1, debug2, debug3, Fatal, \
|
|
||||||
get_random_nameserver, which, get_env, SocketRWShim
|
|
||||||
|
|
||||||
|
|
||||||
def _ipmatch(ipstr):
|
|
||||||
# FIXME: IPv4 only
|
|
||||||
if ipstr == 'default':
|
|
||||||
ipstr = '0.0.0.0/0'
|
|
||||||
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
|
|
||||||
if m:
|
|
||||||
g = m.groups()
|
|
||||||
ips = g[0]
|
|
||||||
width = int(g[4] or 32)
|
|
||||||
if g[1] is None:
|
|
||||||
ips += '.0.0.0'
|
|
||||||
width = min(width, 8)
|
|
||||||
elif g[2] is None:
|
|
||||||
ips += '.0.0'
|
|
||||||
width = min(width, 16)
|
|
||||||
elif g[3] is None:
|
|
||||||
ips += '.0'
|
|
||||||
width = min(width, 24)
|
|
||||||
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
|
|
||||||
|
|
||||||
|
|
||||||
def _ipstr(ip, width):
|
|
||||||
# FIXME: IPv4 only
|
|
||||||
if width >= 32:
|
|
||||||
return ip
|
|
||||||
else:
|
|
||||||
return "%s/%d" % (ip, width)
|
|
||||||
|
|
||||||
|
|
||||||
def _maskbits(netmask):
|
|
||||||
# FIXME: IPv4 only
|
|
||||||
if not netmask:
|
|
||||||
return 32
|
|
||||||
for i in range(32):
|
|
||||||
if netmask[0] & _shl(1, i):
|
|
||||||
return 32 - i
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def _shl(n, bits):
|
|
||||||
return n * int(2 ** bits)
|
|
||||||
|
|
||||||
|
|
||||||
def _route_netstat(line):
|
|
||||||
cols = line.split(None)
|
|
||||||
if len(cols) < 3:
|
|
||||||
return None, None
|
|
||||||
ipw = _ipmatch(cols[0])
|
|
||||||
maskw = _ipmatch(cols[2]) # linux only
|
|
||||||
mask = _maskbits(maskw) # returns 32 if maskw is null
|
|
||||||
return ipw, mask
|
|
||||||
|
|
||||||
|
|
||||||
def _route_iproute(line):
|
|
||||||
ipm = line.split(None, 1)[0]
|
|
||||||
if '/' not in ipm:
|
|
||||||
return None, None
|
|
||||||
ip, mask = ipm.split('/')
|
|
||||||
ipw = _ipmatch(ip)
|
|
||||||
return ipw, int(mask)
|
|
||||||
|
|
||||||
|
|
||||||
def _route_windows(line):
|
|
||||||
if " On-link " not in line:
|
|
||||||
return None, None
|
|
||||||
dest, net_mask = re.split(r'\s+', line.strip())[:2]
|
|
||||||
if net_mask == "255.255.255.255":
|
|
||||||
return None, None
|
|
||||||
for p in ('127.', '0.', '224.', '169.254.'):
|
|
||||||
if dest.startswith(p):
|
|
||||||
return None, None
|
|
||||||
ipw = _ipmatch(dest)
|
|
||||||
mask = _maskbits(_ipmatch(net_mask))
|
|
||||||
return ipw, mask
|
|
||||||
|
|
||||||
|
|
||||||
def _list_routes(argv, extract_route):
|
|
||||||
# FIXME: IPv4 only
|
|
||||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, env=get_env())
|
|
||||||
routes = []
|
|
||||||
for line in p.stdout:
|
|
||||||
if not line.strip():
|
|
||||||
continue
|
|
||||||
ipw, mask = extract_route(line.decode("ASCII"))
|
|
||||||
if not ipw:
|
|
||||||
continue
|
|
||||||
width = min(ipw[1], mask)
|
|
||||||
ip = ipw[0] & _shl(_shl(1, width) - 1, 32 - width)
|
|
||||||
routes.append(
|
|
||||||
(socket.AF_INET, socket.inet_ntoa(struct.pack('!I', ip)), width))
|
|
||||||
rv = p.wait()
|
|
||||||
if rv != 0:
|
|
||||||
log('WARNING: %r returned %d' % (argv, rv))
|
|
||||||
|
|
||||||
return routes
|
|
||||||
|
|
||||||
|
|
||||||
def list_routes():
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
routes = _list_routes(['route', 'PRINT', '-4'], _route_windows)
|
|
||||||
else:
|
|
||||||
if which('ip'):
|
|
||||||
routes = _list_routes(['ip', 'route'], _route_iproute)
|
|
||||||
elif which('netstat'):
|
|
||||||
routes = _list_routes(['netstat', '-rn'], _route_netstat)
|
|
||||||
else:
|
|
||||||
log('WARNING: Neither "ip" nor "netstat" were found on the server. '
|
|
||||||
'--auto-nets feature will not work.')
|
|
||||||
routes = []
|
|
||||||
|
|
||||||
for (family, ip, width) in routes:
|
|
||||||
if not ip.startswith('0.') and not ip.startswith('127.'):
|
|
||||||
yield (family, ip, width)
|
|
||||||
|
|
||||||
|
|
||||||
def _exc_dump():
|
|
||||||
exc_info = sys.exc_info()
|
|
||||||
return ''.join(traceback.format_exception(*exc_info))
|
|
||||||
|
|
||||||
|
|
||||||
def start_hostwatch(seed_hosts, auto_hosts):
|
|
||||||
s1, s2 = socket.socketpair()
|
|
||||||
pid = os.fork()
|
|
||||||
if not pid:
|
|
||||||
# child
|
|
||||||
rv = 99
|
|
||||||
try:
|
|
||||||
try:
|
|
||||||
s2.close()
|
|
||||||
os.dup2(s1.fileno(), 1)
|
|
||||||
os.dup2(s1.fileno(), 0)
|
|
||||||
s1.close()
|
|
||||||
rv = hostwatch.hw_main(seed_hosts, auto_hosts) or 0
|
|
||||||
except Exception:
|
|
||||||
log('%s' % _exc_dump())
|
|
||||||
rv = 98
|
|
||||||
finally:
|
|
||||||
os._exit(rv)
|
|
||||||
s1.close()
|
|
||||||
return pid, s2
|
|
||||||
|
|
||||||
|
|
||||||
class Hostwatch:
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.pid = 0
|
|
||||||
self.sock = None
|
|
||||||
|
|
||||||
|
|
||||||
class DnsProxy(Handler):
|
|
||||||
|
|
||||||
def __init__(self, mux, chan, request, to_nameserver):
|
|
||||||
Handler.__init__(self, [])
|
|
||||||
self.timeout = time.time() + 30
|
|
||||||
self.mux = mux
|
|
||||||
self.chan = chan
|
|
||||||
self.tries = 0
|
|
||||||
self.request = request
|
|
||||||
self.peers = {}
|
|
||||||
self.to_ns_peer = None
|
|
||||||
self.to_ns_port = None
|
|
||||||
if to_nameserver is None:
|
|
||||||
self.to_nameserver = None
|
|
||||||
else:
|
|
||||||
self.to_ns_peer, self.to_ns_port = to_nameserver.split("@")
|
|
||||||
self.to_nameserver = self._addrinfo(self.to_ns_peer,
|
|
||||||
self.to_ns_port)
|
|
||||||
self.try_send()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _addrinfo(peer, port):
|
|
||||||
if int(port) == 0:
|
|
||||||
port = 53
|
|
||||||
family, _, _, _, sockaddr = socket.getaddrinfo(peer, port)[0]
|
|
||||||
return (family, sockaddr)
|
|
||||||
|
|
||||||
def try_send(self):
|
|
||||||
if self.tries >= 3:
|
|
||||||
return
|
|
||||||
self.tries += 1
|
|
||||||
|
|
||||||
if self.to_nameserver is None:
|
|
||||||
_, peer = get_random_nameserver()
|
|
||||||
port = 53
|
|
||||||
else:
|
|
||||||
peer = self.to_ns_peer
|
|
||||||
port = int(self.to_ns_port)
|
|
||||||
|
|
||||||
family, sockaddr = self._addrinfo(peer, port)
|
|
||||||
sock = socket.socket(family, socket.SOCK_DGRAM)
|
|
||||||
sock.connect(sockaddr)
|
|
||||||
|
|
||||||
self.peers[sock] = peer
|
|
||||||
|
|
||||||
debug2('DNS: sending to %r:%d (try %d)' % (peer, port, self.tries))
|
|
||||||
try:
|
|
||||||
sock.send(self.request)
|
|
||||||
self.socks.append(sock)
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.args[0] in ssnet.NET_ERRS:
|
|
||||||
# might have been spurious; try again.
|
|
||||||
# Note: these errors sometimes are reported by recv(),
|
|
||||||
# and sometimes by send(). We have to catch both.
|
|
||||||
debug2('DNS send to %r: %s' % (peer, e))
|
|
||||||
self.try_send()
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
log('DNS send to %r: %s' % (peer, e))
|
|
||||||
return
|
|
||||||
|
|
||||||
def callback(self, sock):
|
|
||||||
peer = self.peers[sock]
|
|
||||||
|
|
||||||
try:
|
|
||||||
data = sock.recv(4096)
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
self.socks.remove(sock)
|
|
||||||
del self.peers[sock]
|
|
||||||
|
|
||||||
if e.args[0] in ssnet.NET_ERRS:
|
|
||||||
# might have been spurious; try again.
|
|
||||||
# Note: these errors sometimes are reported by recv(),
|
|
||||||
# and sometimes by send(). We have to catch both.
|
|
||||||
debug2('DNS recv from %r: %s' % (peer, e))
|
|
||||||
self.try_send()
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
log('DNS recv from %r: %s' % (peer, e))
|
|
||||||
return
|
|
||||||
debug2('DNS response: %d bytes' % len(data))
|
|
||||||
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
|
|
||||||
self.ok = False
|
|
||||||
|
|
||||||
|
|
||||||
class UdpProxy(Handler):
|
|
||||||
|
|
||||||
def __init__(self, mux, chan, family):
|
|
||||||
sock = socket.socket(family, socket.SOCK_DGRAM)
|
|
||||||
Handler.__init__(self, [sock])
|
|
||||||
self.timeout = time.time() + 30
|
|
||||||
self.mux = mux
|
|
||||||
self.chan = chan
|
|
||||||
self.sock = sock
|
|
||||||
|
|
||||||
def send(self, dstip, data):
|
|
||||||
debug2('UDP: sending to %r port %d' % dstip)
|
|
||||||
try:
|
|
||||||
self.sock.sendto(data, dstip)
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
log('UDP send to %r port %d: %s' % (dstip[0], dstip[1], e))
|
|
||||||
return
|
|
||||||
|
|
||||||
def callback(self, sock):
|
|
||||||
try:
|
|
||||||
data, peer = sock.recvfrom(4096)
|
|
||||||
except socket.error:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
log('UDP recv from %r port %d: %s' % (peer[0], peer[1], e))
|
|
||||||
return
|
|
||||||
debug2('UDP response: %d bytes' % len(data))
|
|
||||||
hdr = b("%s,%r," % (peer[0], peer[1]))
|
|
||||||
self.mux.send(self.chan, ssnet.CMD_UDP_DATA, hdr + data)
|
|
||||||
|
|
||||||
|
|
||||||
def main(latency_control, latency_buffer_size, auto_hosts, to_nameserver,
|
|
||||||
auto_nets):
|
|
||||||
try:
|
|
||||||
helpers.logprefix = ' s: '
|
|
||||||
|
|
||||||
debug1('latency control setting = %r' % latency_control)
|
|
||||||
if latency_buffer_size:
|
|
||||||
import sshuttle.ssnet as ssnet
|
|
||||||
ssnet.LATENCY_BUFFER_SIZE = latency_buffer_size
|
|
||||||
|
|
||||||
# synchronization header
|
|
||||||
sys.stdout.write('\0\0SSHUTTLE0001')
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
handlers = []
|
|
||||||
# get unbuffered stdin and stdout in binary mode. Equivalent to stdin.buffer/stdout.buffer (Only available in Python 3)
|
|
||||||
r, w = io.FileIO(0, mode='r'), io.FileIO(1, mode='w')
|
|
||||||
if sys.platform == 'win32':
|
|
||||||
def _deferred_exit():
|
|
||||||
time.sleep(1) # give enough time to write logs to stderr
|
|
||||||
os._exit(23)
|
|
||||||
shim = SocketRWShim(r, w, on_end=_deferred_exit)
|
|
||||||
mux = Mux(*shim.makefiles())
|
|
||||||
else:
|
|
||||||
mux = Mux(r, w)
|
|
||||||
handlers.append(mux)
|
|
||||||
|
|
||||||
debug1('auto-nets:' + str(auto_nets))
|
|
||||||
if auto_nets:
|
|
||||||
routes = list(list_routes())
|
|
||||||
debug1('available routes:')
|
|
||||||
for r in routes:
|
|
||||||
debug1(' %d/%s/%d' % r)
|
|
||||||
else:
|
|
||||||
routes = []
|
|
||||||
|
|
||||||
routepkt = ''
|
|
||||||
for r in routes:
|
|
||||||
routepkt += '%d,%s,%d\n' % r
|
|
||||||
mux.send(0, ssnet.CMD_ROUTES, b(routepkt))
|
|
||||||
|
|
||||||
hw = Hostwatch()
|
|
||||||
hw.leftover = b('')
|
|
||||||
|
|
||||||
def hostwatch_ready(sock):
|
|
||||||
assert hw.pid
|
|
||||||
content = hw.sock.recv(4096)
|
|
||||||
if content:
|
|
||||||
lines = (hw.leftover + content).split(b('\n'))
|
|
||||||
if lines[-1]:
|
|
||||||
# no terminating newline: entry isn't complete yet!
|
|
||||||
hw.leftover = lines.pop()
|
|
||||||
lines.append(b(''))
|
|
||||||
else:
|
|
||||||
hw.leftover = b('')
|
|
||||||
mux.send(0, ssnet.CMD_HOST_LIST, b('\n').join(lines))
|
|
||||||
else:
|
|
||||||
raise Fatal('hostwatch process died')
|
|
||||||
|
|
||||||
def got_host_req(data):
|
|
||||||
if not hw.pid:
|
|
||||||
(hw.pid, hw.sock) = start_hostwatch(
|
|
||||||
data.decode("ASCII").strip().split(), auto_hosts)
|
|
||||||
handlers.append(Handler(socks=[hw.sock],
|
|
||||||
callback=hostwatch_ready))
|
|
||||||
mux.got_host_req = got_host_req
|
|
||||||
|
|
||||||
def new_channel(channel, data):
|
|
||||||
(family, dstip, dstport) = data.decode("ASCII").split(',', 2)
|
|
||||||
family = int(family)
|
|
||||||
# AF_INET is the same constant on Linux and BSD but AF_INET6
|
|
||||||
# is different. As the client and server can be running on
|
|
||||||
# different platforms we can not just set the socket family
|
|
||||||
# to what comes in the wire.
|
|
||||||
if family != socket.AF_INET:
|
|
||||||
family = socket.AF_INET6
|
|
||||||
dstport = int(dstport)
|
|
||||||
outwrap = ssnet.connect_dst(family, dstip, dstport)
|
|
||||||
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
|
|
||||||
mux.new_channel = new_channel
|
|
||||||
|
|
||||||
dnshandlers = {}
|
|
||||||
|
|
||||||
def dns_req(channel, data):
|
|
||||||
debug2('Incoming DNS request channel=%d.' % channel)
|
|
||||||
h = DnsProxy(mux, channel, data, to_nameserver)
|
|
||||||
handlers.append(h)
|
|
||||||
dnshandlers[channel] = h
|
|
||||||
mux.got_dns_req = dns_req
|
|
||||||
|
|
||||||
udphandlers = {}
|
|
||||||
|
|
||||||
def udp_req(channel, cmd, data):
|
|
||||||
debug2('Incoming UDP request channel=%d, cmd=%d' %
|
|
||||||
(channel, cmd))
|
|
||||||
if cmd == ssnet.CMD_UDP_DATA:
|
|
||||||
(dstip, dstport, data) = data.split(b(','), 2)
|
|
||||||
dstport = int(dstport)
|
|
||||||
debug2('is incoming UDP data. %r %d.' % (dstip, dstport))
|
|
||||||
h = udphandlers[channel]
|
|
||||||
h.send((dstip, dstport), data)
|
|
||||||
elif cmd == ssnet.CMD_UDP_CLOSE:
|
|
||||||
debug2('is incoming UDP close')
|
|
||||||
h = udphandlers[channel]
|
|
||||||
h.ok = False
|
|
||||||
del mux.channels[channel]
|
|
||||||
|
|
||||||
def udp_open(channel, data):
|
|
||||||
debug2('Incoming UDP open.')
|
|
||||||
family = int(data)
|
|
||||||
mux.channels[channel] = lambda cmd, data: udp_req(channel, cmd,
|
|
||||||
data)
|
|
||||||
if channel in udphandlers:
|
|
||||||
raise Fatal('UDP connection channel %d already open' %
|
|
||||||
channel)
|
|
||||||
else:
|
|
||||||
h = UdpProxy(mux, channel, family)
|
|
||||||
handlers.append(h)
|
|
||||||
udphandlers[channel] = h
|
|
||||||
mux.got_udp_open = udp_open
|
|
||||||
|
|
||||||
while mux.ok:
|
|
||||||
if hw.pid:
|
|
||||||
assert hw.pid > 0
|
|
||||||
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
|
|
||||||
if rpid:
|
|
||||||
raise Fatal(
|
|
||||||
'hostwatch exited unexpectedly: code 0x%04x' % rv)
|
|
||||||
|
|
||||||
ssnet.runonce(handlers, mux)
|
|
||||||
if latency_control:
|
|
||||||
mux.check_fullness()
|
|
||||||
|
|
||||||
if dnshandlers:
|
|
||||||
now = time.time()
|
|
||||||
remove = []
|
|
||||||
for channel, h in dnshandlers.items():
|
|
||||||
if h.timeout < now or not h.ok:
|
|
||||||
debug3('expiring dnsreqs channel=%d' % channel)
|
|
||||||
remove.append(channel)
|
|
||||||
h.ok = False
|
|
||||||
for channel in remove:
|
|
||||||
del dnshandlers[channel]
|
|
||||||
if udphandlers:
|
|
||||||
remove = []
|
|
||||||
for channel, h in udphandlers.items():
|
|
||||||
if not h.ok:
|
|
||||||
debug3('expiring UDP channel=%d' % channel)
|
|
||||||
remove.append(channel)
|
|
||||||
h.ok = False
|
|
||||||
for channel in remove:
|
|
||||||
del udphandlers[channel]
|
|
||||||
|
|
||||||
except Fatal as e:
|
|
||||||
log('fatal: %s' % e)
|
|
||||||
sys.exit(99)
|
|
255
sshuttle/ssh.py
255
sshuttle/ssh.py
@ -1,255 +0,0 @@
|
|||||||
import sys
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import zlib
|
|
||||||
import importlib
|
|
||||||
import importlib.util
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
import shlex
|
|
||||||
from shlex import quote
|
|
||||||
import ipaddress
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
import sshuttle.helpers as helpers
|
|
||||||
from sshuttle.helpers import debug2, which, get_path, SocketRWShim, Fatal
|
|
||||||
|
|
||||||
|
|
||||||
def get_module_source(name):
|
|
||||||
spec = importlib.util.find_spec(name)
|
|
||||||
with open(spec.origin, "rt") as f:
|
|
||||||
return f.read().encode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
def empackage(z, name, data=None):
|
|
||||||
if not data:
|
|
||||||
data = get_module_source(name)
|
|
||||||
content = z.compress(data)
|
|
||||||
content += z.flush(zlib.Z_SYNC_FLUSH)
|
|
||||||
|
|
||||||
return b'%s\n%d\n%s' % (name.encode("ASCII"), len(content), content)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_hostport(rhostport):
|
|
||||||
"""
|
|
||||||
parses the given rhostport variable, looking like this:
|
|
||||||
|
|
||||||
[username[:password]@]host[:port]
|
|
||||||
|
|
||||||
if only host is given, can be a hostname, IPv4/v6 address or a ssh alias
|
|
||||||
from ~/.ssh/config
|
|
||||||
|
|
||||||
and returns a tuple (username, password, port, host)
|
|
||||||
"""
|
|
||||||
# leave use of default port to ssh command to prevent overwriting
|
|
||||||
# ports configured in ~/.ssh/config when no port is given
|
|
||||||
if rhostport is None or len(rhostport) == 0:
|
|
||||||
return None, None, None, None
|
|
||||||
port = None
|
|
||||||
username = None
|
|
||||||
password = None
|
|
||||||
host = rhostport
|
|
||||||
|
|
||||||
if "@" in host:
|
|
||||||
# split username (and possible password) from the host[:port]
|
|
||||||
username, host = host.rsplit("@", 1)
|
|
||||||
# Fix #410 bad username error detect
|
|
||||||
if ":" in username:
|
|
||||||
# this will even allow for the username to be empty
|
|
||||||
username, password = username.split(":", 1)
|
|
||||||
|
|
||||||
if ":" in host:
|
|
||||||
# IPv6 address and/or got a port specified
|
|
||||||
|
|
||||||
# If it is an IPv6 address with port specification,
|
|
||||||
# then it will look like: [::1]:22
|
|
||||||
|
|
||||||
try:
|
|
||||||
# try to parse host as an IP address,
|
|
||||||
# if that works it is an IPv6 address
|
|
||||||
host = str(ipaddress.ip_address(host))
|
|
||||||
except ValueError:
|
|
||||||
# if that fails parse as URL to get the port
|
|
||||||
parsed = urlparse('//{}'.format(host))
|
|
||||||
try:
|
|
||||||
host = str(ipaddress.ip_address(parsed.hostname))
|
|
||||||
except ValueError:
|
|
||||||
# else if both fails, we have a hostname with port
|
|
||||||
host = parsed.hostname
|
|
||||||
port = parsed.port
|
|
||||||
|
|
||||||
if password is None or len(password) == 0:
|
|
||||||
password = None
|
|
||||||
|
|
||||||
return username, password, port, host
|
|
||||||
|
|
||||||
|
|
||||||
def connect(ssh_cmd, rhostport, python, stderr, add_cmd_delimiter, remote_shell, options):
|
|
||||||
username, password, port, host = parse_hostport(rhostport)
|
|
||||||
if username:
|
|
||||||
rhost = "{}@{}".format(username, host)
|
|
||||||
else:
|
|
||||||
rhost = host
|
|
||||||
|
|
||||||
z = zlib.compressobj(1)
|
|
||||||
content = get_module_source('sshuttle.assembler')
|
|
||||||
optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in list(options.items()))
|
|
||||||
optdata = optdata.encode("UTF8")
|
|
||||||
content2 = (empackage(z, 'sshuttle') +
|
|
||||||
empackage(z, 'sshuttle.cmdline_options', optdata) +
|
|
||||||
empackage(z, 'sshuttle.helpers') +
|
|
||||||
empackage(z, 'sshuttle.ssnet') +
|
|
||||||
empackage(z, 'sshuttle.hostwatch') +
|
|
||||||
empackage(z, 'sshuttle.server') +
|
|
||||||
b"\n")
|
|
||||||
|
|
||||||
# If the exec() program calls sys.exit(), it should exit python
|
|
||||||
# and the sys.exit(98) call won't be reached (so we try to only
|
|
||||||
# exit that way in the server). However, if the code that we
|
|
||||||
# exec() simply returns from main, then we will return from
|
|
||||||
# exec(). If the server's python process dies, it should stop
|
|
||||||
# executing and also won't reach sys.exit(98).
|
|
||||||
#
|
|
||||||
# So, we shouldn't reach sys.exit(98) and we certainly shouldn't
|
|
||||||
# reach it immediately after trying to start the server.
|
|
||||||
pyscript = r"""
|
|
||||||
import sys, os;
|
|
||||||
verbosity=%d;
|
|
||||||
stdin = os.fdopen(0, 'rb');
|
|
||||||
exec(compile(stdin.read(%d), 'assembler.py', 'exec'));
|
|
||||||
sys.exit(98);
|
|
||||||
""" % (helpers.verbose or 0, len(content))
|
|
||||||
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
|
|
||||||
|
|
||||||
if not rhost:
|
|
||||||
# ignore the --python argument when running locally; we already know
|
|
||||||
# which python version works.
|
|
||||||
argv = [sys.executable, '-c', pyscript]
|
|
||||||
else:
|
|
||||||
if ssh_cmd:
|
|
||||||
sshl = shlex.split(ssh_cmd)
|
|
||||||
else:
|
|
||||||
sshl = ['ssh']
|
|
||||||
if port is not None:
|
|
||||||
portl = ["-p", str(port)]
|
|
||||||
else:
|
|
||||||
portl = []
|
|
||||||
if remote_shell == "cmd":
|
|
||||||
pycmd = '"%s" -c "%s"' % (python or 'python', pyscript)
|
|
||||||
elif remote_shell == "powershell":
|
|
||||||
for c in ('\'', ' ', ';', '(', ')', ','):
|
|
||||||
pyscript = pyscript.replace(c, '`' + c)
|
|
||||||
pycmd = '%s -c %s' % (python or 'python', pyscript)
|
|
||||||
else: # posix shell expected
|
|
||||||
if python:
|
|
||||||
pycmd = '"%s" -c "%s"' % (python, pyscript)
|
|
||||||
else:
|
|
||||||
# By default, we run the following code in a shell.
|
|
||||||
# However, with restricted shells and other unusual
|
|
||||||
# situations, there can be trouble. See the RESTRICTED
|
|
||||||
# SHELL section in "man bash" for more information. The
|
|
||||||
# code makes many assumptions:
|
|
||||||
#
|
|
||||||
# (1) That /bin/sh exists and that we can call it.
|
|
||||||
# Restricted shells often do *not* allow you to run
|
|
||||||
# programs specified with an absolute path like /bin/sh.
|
|
||||||
# Either way, if there is trouble with this, it should
|
|
||||||
# return error code 127.
|
|
||||||
#
|
|
||||||
# (2) python3 or python exists in the PATH and is
|
|
||||||
# executable. If they aren't, then exec won't work (see (4)
|
|
||||||
# below).
|
|
||||||
#
|
|
||||||
# (3) In /bin/sh, that we can redirect stderr in order to
|
|
||||||
# hide the version that "python3 -V" might print (some
|
|
||||||
# restricted shells don't allow redirection, see
|
|
||||||
# RESTRICTED SHELL section in 'man bash'). However, if we
|
|
||||||
# are in a restricted shell, we'd likely have trouble with
|
|
||||||
# assumption (1) above.
|
|
||||||
#
|
|
||||||
# (4) The 'exec' command should work except if we failed
|
|
||||||
# to exec python because it doesn't exist or isn't
|
|
||||||
# executable OR if exec isn't allowed (some restricted
|
|
||||||
# shells don't allow exec). If the exec succeeded, it will
|
|
||||||
# not return and not get to the "exit 97" command. If exec
|
|
||||||
# does return, we exit with code 97.
|
|
||||||
#
|
|
||||||
# Specifying the exact python program to run with --python
|
|
||||||
# avoids many of the issues above. However, if
|
|
||||||
# you have a restricted shell on remote, you may only be
|
|
||||||
# able to run python if it is in your PATH (and you can't
|
|
||||||
# run programs specified with an absolute path). In that
|
|
||||||
# case, sshuttle might not work at all since it is not
|
|
||||||
# possible to run python on the remote machine---even if
|
|
||||||
# it is present.
|
|
||||||
devnull = '/dev/null'
|
|
||||||
pycmd = ("P=python3; $P -V 2>%s || P=python; "
|
|
||||||
"exec \"$P\" -c %s; exit 97") % \
|
|
||||||
(devnull, quote(pyscript))
|
|
||||||
pycmd = ("/bin/sh -c {}".format(quote(pycmd)))
|
|
||||||
|
|
||||||
if password is not None:
|
|
||||||
os.environ['SSHPASS'] = str(password)
|
|
||||||
argv = (["sshpass", "-e"] + sshl +
|
|
||||||
portl + [rhost])
|
|
||||||
|
|
||||||
else:
|
|
||||||
argv = (sshl + portl + [rhost])
|
|
||||||
|
|
||||||
if add_cmd_delimiter:
|
|
||||||
argv += ['--', pycmd]
|
|
||||||
else:
|
|
||||||
argv += [pycmd]
|
|
||||||
|
|
||||||
# Our which() function searches for programs in get_path()
|
|
||||||
# directories (which include PATH). This step isn't strictly
|
|
||||||
# necessary if ssh is already in the user's PATH, but it makes the
|
|
||||||
# error message friendlier if the user incorrectly passes in a
|
|
||||||
# custom ssh command that we cannot find.
|
|
||||||
abs_path = which(argv[0])
|
|
||||||
if abs_path is None:
|
|
||||||
raise Fatal("Failed to find '%s' in path %s" % (argv[0], get_path()))
|
|
||||||
argv[0] = abs_path
|
|
||||||
|
|
||||||
if sys.platform != 'win32':
|
|
||||||
(s1, s2) = socket.socketpair()
|
|
||||||
pstdin, pstdout = os.dup(s1.fileno()), os.dup(s1.fileno())
|
|
||||||
|
|
||||||
def preexec_fn():
|
|
||||||
# runs in the child process
|
|
||||||
s2.close()
|
|
||||||
s1.close()
|
|
||||||
|
|
||||||
def get_server_io():
|
|
||||||
os.close(pstdin)
|
|
||||||
os.close(pstdout)
|
|
||||||
return s2.makefile("rb", buffering=0), s2.makefile("wb", buffering=0)
|
|
||||||
else:
|
|
||||||
# In Windows CPython, BSD sockets are not supported as subprocess stdio
|
|
||||||
# and select.select() used in ssnet.py won't work on Windows pipes.
|
|
||||||
# So we have to use both socketpair (for select.select) and pipes (for subprocess.Popen) together
|
|
||||||
# along with reader/writer threads to stream data between them
|
|
||||||
# NOTE: Their could be a better way. Need to investigate further on this.
|
|
||||||
# Either to use sockets as stdio for subprocess. Or to use pipes but with a select() alternative
|
|
||||||
# https://stackoverflow.com/questions/4993119/redirect-io-of-process-to-windows-socket
|
|
||||||
|
|
||||||
pstdin = ssubprocess.PIPE
|
|
||||||
pstdout = ssubprocess.PIPE
|
|
||||||
|
|
||||||
preexec_fn = None
|
|
||||||
|
|
||||||
def get_server_io():
|
|
||||||
shim = SocketRWShim(p.stdout, p.stdin, on_end=lambda: p.terminate())
|
|
||||||
return shim.makefiles()
|
|
||||||
|
|
||||||
# See: stackoverflow.com/questions/48671215/howto-workaround-of-close-fds-true-and-redirect-stdout-stderr-on-windows
|
|
||||||
close_fds = False if sys.platform == 'win32' else True
|
|
||||||
|
|
||||||
debug2("executing: %r" % argv)
|
|
||||||
p = ssubprocess.Popen(argv, stdin=pstdin, stdout=pstdout, preexec_fn=preexec_fn,
|
|
||||||
close_fds=close_fds, stderr=stderr, bufsize=0)
|
|
||||||
|
|
||||||
rfile, wfile = get_server_io()
|
|
||||||
wfile.write(content)
|
|
||||||
wfile.write(content2)
|
|
||||||
return p, rfile, wfile
|
|
@ -1,31 +0,0 @@
|
|||||||
import sys
|
|
||||||
import os
|
|
||||||
import subprocess as ssubprocess
|
|
||||||
|
|
||||||
|
|
||||||
_p = None
|
|
||||||
|
|
||||||
|
|
||||||
def start_syslog():
|
|
||||||
global _p
|
|
||||||
with open(os.devnull, 'w') as devnull:
|
|
||||||
_p = ssubprocess.Popen(
|
|
||||||
['logger', '-p', 'daemon.err', '-t', 'sshuttle'],
|
|
||||||
stdin=ssubprocess.PIPE,
|
|
||||||
stdout=devnull,
|
|
||||||
stderr=devnull
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def close_stdin():
|
|
||||||
sys.stdin.close()
|
|
||||||
|
|
||||||
|
|
||||||
def stdout_to_syslog():
|
|
||||||
sys.stdout.flush()
|
|
||||||
os.dup2(_p.stdin.fileno(), sys.stdout.fileno())
|
|
||||||
|
|
||||||
|
|
||||||
def stderr_to_syslog():
|
|
||||||
sys.stderr.flush()
|
|
||||||
os.dup2(_p.stdin.fileno(), sys.stderr.fileno())
|
|
@ -1,45 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import getpass
|
|
||||||
from uuid import uuid4
|
|
||||||
|
|
||||||
|
|
||||||
def build_config(user_name):
|
|
||||||
"""Generates a sudoers configuration to allow passwordless execution of sshuttle."""
|
|
||||||
|
|
||||||
argv0 = os.path.abspath(sys.argv[0])
|
|
||||||
is_python_script = argv0.endswith('.py')
|
|
||||||
executable = f"{sys.executable} {argv0}" if is_python_script else argv0
|
|
||||||
dist_packages = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
cmd_alias = f"SSHUTTLE{uuid4().hex[-3:].upper()}"
|
|
||||||
|
|
||||||
template = f"""
|
|
||||||
# WARNING: If you intend to restrict a user to only running the
|
|
||||||
# sshuttle command as root, THIS CONFIGURATION IS INSECURE.
|
|
||||||
# When a user can run sshuttle as root (with or without a password),
|
|
||||||
# they can also run other commands as root because sshuttle itself
|
|
||||||
# can run a command specified by the user with the --ssh-cmd option.
|
|
||||||
|
|
||||||
# INSTRUCTIONS: Add this text to your sudo configuration to run
|
|
||||||
# sshuttle without needing to enter a sudo password. To use this
|
|
||||||
# configuration, run 'visudo /etc/sudoers.d/sshuttle_auto' as root and
|
|
||||||
# paste this text into the editor that it opens. If you want to give
|
|
||||||
# multiple users these privileges, you may wish to use different
|
|
||||||
# filenames for each one (i.e., /etc/sudoers.d/sshuttle_auto_john).
|
|
||||||
|
|
||||||
# This configuration was initially generated by the
|
|
||||||
# 'sshuttle --sudoers-no-modify' command.
|
|
||||||
|
|
||||||
Cmnd_Alias {cmd_alias} = /usr/bin/env PYTHONPATH={dist_packages} {executable} *
|
|
||||||
|
|
||||||
{user_name} ALL=NOPASSWD: {cmd_alias}
|
|
||||||
"""
|
|
||||||
|
|
||||||
return template
|
|
||||||
|
|
||||||
|
|
||||||
def sudoers(user_name=None):
|
|
||||||
user_name = user_name or getpass.getuser()
|
|
||||||
content = build_config(user_name)
|
|
||||||
sys.stdout.write(content)
|
|
||||||
exit(0)
|
|
@ -1,71 +1,60 @@
|
|||||||
import sys
|
import struct, socket, errno, select
|
||||||
import struct
|
if not globals().get('skip_imports'):
|
||||||
import socket
|
from helpers import *
|
||||||
import errno
|
|
||||||
import select
|
|
||||||
import os
|
|
||||||
|
|
||||||
from sshuttle.helpers import b, log, debug1, debug2, debug3, Fatal, set_non_blocking_io
|
|
||||||
|
|
||||||
MAX_CHANNEL = 65535
|
MAX_CHANNEL = 65535
|
||||||
LATENCY_BUFFER_SIZE = 32768
|
|
||||||
|
# these don't exist in the socket module in python 2.3!
|
||||||
SHUT_RD = 0
|
SHUT_RD = 0
|
||||||
SHUT_WR = 1
|
SHUT_WR = 1
|
||||||
SHUT_RDWR = 2
|
SHUT_RDWR = 2
|
||||||
|
|
||||||
|
|
||||||
HDR_LEN = 8
|
HDR_LEN = 8
|
||||||
|
|
||||||
|
|
||||||
CMD_EXIT = 0x4200
|
CMD_EXIT = 0x4200
|
||||||
CMD_PING = 0x4201
|
CMD_PING = 0x4201
|
||||||
CMD_PONG = 0x4202
|
CMD_PONG = 0x4202
|
||||||
CMD_TCP_CONNECT = 0x4203
|
CMD_CONNECT = 0x4203
|
||||||
CMD_TCP_STOP_SENDING = 0x4204
|
CMD_STOP_SENDING = 0x4204
|
||||||
CMD_TCP_EOF = 0x4205
|
CMD_EOF = 0x4205
|
||||||
CMD_TCP_DATA = 0x4206
|
CMD_DATA = 0x4206
|
||||||
CMD_ROUTES = 0x4207
|
CMD_ROUTES = 0x4207
|
||||||
CMD_HOST_REQ = 0x4208
|
CMD_HOST_REQ = 0x4208
|
||||||
CMD_HOST_LIST = 0x4209
|
CMD_HOST_LIST = 0x4209
|
||||||
CMD_DNS_REQ = 0x420a
|
CMD_DNS_REQ = 0x420a
|
||||||
CMD_DNS_RESPONSE = 0x420b
|
CMD_DNS_RESPONSE = 0x420b
|
||||||
CMD_UDP_OPEN = 0x420c
|
|
||||||
CMD_UDP_DATA = 0x420d
|
|
||||||
CMD_UDP_CLOSE = 0x420e
|
|
||||||
|
|
||||||
cmd_to_name = {
|
cmd_to_name = {
|
||||||
CMD_EXIT: 'EXIT',
|
CMD_EXIT: 'EXIT',
|
||||||
CMD_PING: 'PING',
|
CMD_PING: 'PING',
|
||||||
CMD_PONG: 'PONG',
|
CMD_PONG: 'PONG',
|
||||||
CMD_TCP_CONNECT: 'TCP_CONNECT',
|
CMD_CONNECT: 'CONNECT',
|
||||||
CMD_TCP_STOP_SENDING: 'TCP_STOP_SENDING',
|
CMD_STOP_SENDING: 'STOP_SENDING',
|
||||||
CMD_TCP_EOF: 'TCP_EOF',
|
CMD_EOF: 'EOF',
|
||||||
CMD_TCP_DATA: 'TCP_DATA',
|
CMD_DATA: 'DATA',
|
||||||
CMD_ROUTES: 'ROUTES',
|
CMD_ROUTES: 'ROUTES',
|
||||||
CMD_HOST_REQ: 'HOST_REQ',
|
CMD_HOST_REQ: 'HOST_REQ',
|
||||||
CMD_HOST_LIST: 'HOST_LIST',
|
CMD_HOST_LIST: 'HOST_LIST',
|
||||||
CMD_DNS_REQ: 'DNS_REQ',
|
CMD_DNS_REQ: 'DNS_REQ',
|
||||||
CMD_DNS_RESPONSE: 'DNS_RESPONSE',
|
CMD_DNS_RESPONSE: 'DNS_RESPONSE',
|
||||||
CMD_UDP_OPEN: 'UDP_OPEN',
|
|
||||||
CMD_UDP_DATA: 'UDP_DATA',
|
|
||||||
CMD_UDP_CLOSE: 'UDP_CLOSE',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
|
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
|
||||||
errno.EHOSTUNREACH, errno.ENETUNREACH,
|
errno.EHOSTUNREACH, errno.ENETUNREACH,
|
||||||
errno.EHOSTDOWN, errno.ENETDOWN,
|
errno.EHOSTDOWN, errno.ENETDOWN]
|
||||||
errno.ENETUNREACH, errno.ECONNABORTED,
|
|
||||||
errno.ECONNRESET]
|
|
||||||
|
|
||||||
|
|
||||||
def _add(socks, elem):
|
def _add(l, elem):
|
||||||
if elem not in socks:
|
if not elem in l:
|
||||||
socks.append(elem)
|
l.append(elem)
|
||||||
|
|
||||||
|
|
||||||
def _fds(socks):
|
def _fds(l):
|
||||||
out = []
|
out = []
|
||||||
for i in socks:
|
for i in l:
|
||||||
try:
|
try:
|
||||||
out.append(i.fileno())
|
out.append(i.fileno())
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -77,13 +66,11 @@ def _fds(socks):
|
|||||||
def _nb_clean(func, *args):
|
def _nb_clean(func, *args):
|
||||||
try:
|
try:
|
||||||
return func(*args)
|
return func(*args)
|
||||||
except (OSError, socket.error):
|
except OSError, e:
|
||||||
# Note: In python2 socket.error != OSError (In python3, they are same)
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
|
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
debug3('%s: err was: %s' % (func.__name__, e))
|
debug3('%s: err was: %s\n' % (func.__name__, e))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -92,26 +79,18 @@ def _try_peername(sock):
|
|||||||
pn = sock.getpeername()
|
pn = sock.getpeername()
|
||||||
if pn:
|
if pn:
|
||||||
return '%s:%s' % (pn[0], pn[1])
|
return '%s:%s' % (pn[0], pn[1])
|
||||||
except socket.error:
|
except socket.error, e:
|
||||||
_, e = sys.exc_info()[:2]
|
if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
|
||||||
if e.args[0] == errno.EINVAL:
|
|
||||||
pass
|
|
||||||
elif e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
|
|
||||||
raise
|
raise
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
return 'unknown'
|
return 'unknown'
|
||||||
|
|
||||||
|
|
||||||
_swcount = 0
|
_swcount = 0
|
||||||
|
|
||||||
|
|
||||||
class SockWrapper:
|
class SockWrapper:
|
||||||
|
|
||||||
def __init__(self, rsock, wsock, connect_to=None, peername=None):
|
def __init__(self, rsock, wsock, connect_to=None, peername=None):
|
||||||
global _swcount
|
global _swcount
|
||||||
_swcount += 1
|
_swcount += 1
|
||||||
debug3('creating new SockWrapper (%d now exist)' % _swcount)
|
debug3('creating new SockWrapper (%d now exist)\n' % _swcount)
|
||||||
self.exc = None
|
self.exc = None
|
||||||
self.rsock = rsock
|
self.rsock = rsock
|
||||||
self.wsock = wsock
|
self.wsock = wsock
|
||||||
@ -124,9 +103,9 @@ class SockWrapper:
|
|||||||
def __del__(self):
|
def __del__(self):
|
||||||
global _swcount
|
global _swcount
|
||||||
_swcount -= 1
|
_swcount -= 1
|
||||||
debug1('%r: deleting (%d remain)' % (self, _swcount))
|
debug1('%r: deleting (%d remain)\n' % (self, _swcount))
|
||||||
if self.exc:
|
if self.exc:
|
||||||
debug1('%r: error was: %s' % (self, self.exc))
|
debug1('%r: error was: %s\n' % (self, self.exc))
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
if self.rsock == self.wsock:
|
if self.rsock == self.wsock:
|
||||||
@ -148,14 +127,19 @@ class SockWrapper:
|
|||||||
if not self.connect_to:
|
if not self.connect_to:
|
||||||
return # already connected
|
return # already connected
|
||||||
self.rsock.setblocking(False)
|
self.rsock.setblocking(False)
|
||||||
debug3('%r: trying connect to %r' % (self, self.connect_to))
|
debug3('%r: trying connect to %r\n' % (self, self.connect_to))
|
||||||
|
if socket.inet_aton(self.connect_to[0])[0] == '\0':
|
||||||
|
self.seterr(Exception("Can't connect to %r: "
|
||||||
|
"IP address starts with zero\n"
|
||||||
|
% (self.connect_to,)))
|
||||||
|
self.connect_to = None
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
self.rsock.connect(self.connect_to)
|
self.rsock.connect(self.connect_to)
|
||||||
# connected successfully (Linux)
|
# connected successfully (Linux)
|
||||||
self.connect_to = None
|
self.connect_to = None
|
||||||
except socket.error:
|
except socket.error, e:
|
||||||
_, e = sys.exc_info()[:2]
|
debug3('%r: connect result: %s\n' % (self, e))
|
||||||
debug3('%r: connect result: %s' % (self, e))
|
|
||||||
if e.args[0] == errno.EINVAL:
|
if e.args[0] == errno.EINVAL:
|
||||||
# this is what happens when you call connect() on a socket
|
# this is what happens when you call connect() on a socket
|
||||||
# that is now connected but returned EINPROGRESS last time,
|
# that is now connected but returned EINPROGRESS last time,
|
||||||
@ -165,28 +149,22 @@ class SockWrapper:
|
|||||||
realerr = self.rsock.getsockopt(socket.SOL_SOCKET,
|
realerr = self.rsock.getsockopt(socket.SOL_SOCKET,
|
||||||
socket.SO_ERROR)
|
socket.SO_ERROR)
|
||||||
e = socket.error(realerr, os.strerror(realerr))
|
e = socket.error(realerr, os.strerror(realerr))
|
||||||
debug3('%r: fixed connect result: %s' % (self, e))
|
debug3('%r: fixed connect result: %s\n' % (self, e))
|
||||||
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
|
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
|
||||||
pass # not connected yet
|
pass # not connected yet
|
||||||
elif sys.platform == 'win32' and e.args[0] == errno.WSAEWOULDBLOCK: # 10035
|
|
||||||
pass # not connected yet
|
|
||||||
elif e.args[0] == 0:
|
elif e.args[0] == 0:
|
||||||
if sys.platform == 'win32':
|
# connected successfully (weird Linux bug?)
|
||||||
# On Windows "real" error of EINVAL could be 0, when socket is in connecting state
|
# Sometimes Linux seems to return EINVAL when it isn't
|
||||||
pass
|
# invalid. This *may* be caused by a race condition
|
||||||
else:
|
# between connect() and getsockopt(SO_ERROR) (ie. it
|
||||||
# connected successfully (weird Linux bug?)
|
# finishes connecting in between the two, so there is no
|
||||||
# Sometimes Linux seems to return EINVAL when it isn't
|
# longer an error). However, I'm not sure of that.
|
||||||
# invalid. This *may* be caused by a race condition
|
#
|
||||||
# between connect() and getsockopt(SO_ERROR) (ie. it
|
# I did get at least one report that the problem went away
|
||||||
# finishes connecting in between the two, so there is no
|
# when we added this, however.
|
||||||
# longer an error). However, I'm not sure of that.
|
self.connect_to = None
|
||||||
#
|
|
||||||
# I did get at least one report that the problem went away
|
|
||||||
# when we added this, however.
|
|
||||||
self.connect_to = None
|
|
||||||
elif e.args[0] == errno.EISCONN:
|
elif e.args[0] == errno.EISCONN:
|
||||||
# connected successfully (BSD + Windows)
|
# connected successfully (BSD)
|
||||||
self.connect_to = None
|
self.connect_to = None
|
||||||
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
|
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
|
||||||
# a "normal" kind of error
|
# a "normal" kind of error
|
||||||
@ -197,21 +175,20 @@ class SockWrapper:
|
|||||||
|
|
||||||
def noread(self):
|
def noread(self):
|
||||||
if not self.shut_read:
|
if not self.shut_read:
|
||||||
debug2('%r: done reading' % self)
|
debug2('%r: done reading\n' % self)
|
||||||
self.shut_read = True
|
self.shut_read = True
|
||||||
|
#self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
|
||||||
|
|
||||||
def nowrite(self):
|
def nowrite(self):
|
||||||
if not self.shut_write:
|
if not self.shut_write:
|
||||||
debug2('%r: done writing' % self)
|
debug2('%r: done writing\n' % self)
|
||||||
self.shut_write = True
|
self.shut_write = True
|
||||||
try:
|
try:
|
||||||
self.wsock.shutdown(SHUT_WR)
|
self.wsock.shutdown(SHUT_WR)
|
||||||
except socket.error:
|
except socket.error, e:
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
self.seterr('nowrite: %s' % e)
|
self.seterr('nowrite: %s' % e)
|
||||||
|
|
||||||
@staticmethod
|
def too_full(self):
|
||||||
def too_full():
|
|
||||||
return False # fullness is determined by the socket's select() state
|
return False # fullness is determined by the socket's select() state
|
||||||
|
|
||||||
def uwrite(self, buf):
|
def uwrite(self, buf):
|
||||||
@ -219,20 +196,19 @@ class SockWrapper:
|
|||||||
return 0 # still connecting
|
return 0 # still connecting
|
||||||
self.wsock.setblocking(False)
|
self.wsock.setblocking(False)
|
||||||
try:
|
try:
|
||||||
return _nb_clean(self.wsock.send, buf)
|
return _nb_clean(os.write, self.wsock.fileno(), buf)
|
||||||
except OSError:
|
except OSError, e:
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
if e.errno == errno.EPIPE:
|
if e.errno == errno.EPIPE:
|
||||||
debug1('%r: uwrite: got EPIPE' % self)
|
debug1('%r: uwrite: got EPIPE\n' % self)
|
||||||
self.nowrite()
|
self.nowrite()
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
# unexpected error... stream is dead
|
# unexpected error... stream is dead
|
||||||
self.seterr('uwrite: %s' % e)
|
self.seterr('uwrite: %s' % e)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def write(self, buf):
|
def write(self, buf):
|
||||||
assert buf
|
assert(buf)
|
||||||
return self.uwrite(buf)
|
return self.uwrite(buf)
|
||||||
|
|
||||||
def uread(self):
|
def uread(self):
|
||||||
@ -242,11 +218,10 @@ class SockWrapper:
|
|||||||
return
|
return
|
||||||
self.rsock.setblocking(False)
|
self.rsock.setblocking(False)
|
||||||
try:
|
try:
|
||||||
return _nb_clean(self.rsock.recv, 65536)
|
return _nb_clean(os.read, self.rsock.fileno(), 65536)
|
||||||
except OSError:
|
except OSError, e:
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
self.seterr('uread: %s' % e)
|
self.seterr('uread: %s' % e)
|
||||||
return b('') # unexpected error... we'll call it EOF
|
return '' # unexpected error... we'll call it EOF
|
||||||
|
|
||||||
def fill(self):
|
def fill(self):
|
||||||
if self.buf:
|
if self.buf:
|
||||||
@ -254,7 +229,7 @@ class SockWrapper:
|
|||||||
rb = self.uread()
|
rb = self.uread()
|
||||||
if rb:
|
if rb:
|
||||||
self.buf.append(rb)
|
self.buf.append(rb)
|
||||||
if rb == b(''): # empty string means EOF; None means temporarily empty
|
if rb == '': # empty string means EOF; None means temporarily empty
|
||||||
self.noread()
|
self.noread()
|
||||||
|
|
||||||
def copy_to(self, outwrap):
|
def copy_to(self, outwrap):
|
||||||
@ -268,8 +243,7 @@ class SockWrapper:
|
|||||||
|
|
||||||
|
|
||||||
class Handler:
|
class Handler:
|
||||||
|
def __init__(self, socks = None, callback = None):
|
||||||
def __init__(self, socks=None, callback=None):
|
|
||||||
self.ok = True
|
self.ok = True
|
||||||
self.socks = socks or []
|
self.socks = socks or []
|
||||||
if callback:
|
if callback:
|
||||||
@ -279,19 +253,18 @@ class Handler:
|
|||||||
for i in self.socks:
|
for i in self.socks:
|
||||||
_add(r, i)
|
_add(r, i)
|
||||||
|
|
||||||
def callback(self, sock):
|
def callback(self):
|
||||||
log('--no callback defined-- %r' % self)
|
log('--no callback defined-- %r\n' % self)
|
||||||
(r, _, _) = select.select(self.socks, [], [], 0)
|
(r,w,x) = select.select(self.socks, [], [], 0)
|
||||||
for s in r:
|
for s in r:
|
||||||
v = s.recv(4096)
|
v = s.recv(4096)
|
||||||
if not v:
|
if not v:
|
||||||
log('--closed-- %r' % self)
|
log('--closed-- %r\n' % self)
|
||||||
self.socks = []
|
self.socks = []
|
||||||
self.ok = False
|
self.ok = False
|
||||||
|
|
||||||
|
|
||||||
class Proxy(Handler):
|
class Proxy(Handler):
|
||||||
|
|
||||||
def __init__(self, wrap1, wrap2):
|
def __init__(self, wrap1, wrap2):
|
||||||
Handler.__init__(self, [wrap1.rsock, wrap1.wsock,
|
Handler.__init__(self, [wrap1.rsock, wrap1.wsock,
|
||||||
wrap2.rsock, wrap2.wsock])
|
wrap2.rsock, wrap2.wsock])
|
||||||
@ -299,11 +272,9 @@ class Proxy(Handler):
|
|||||||
self.wrap2 = wrap2
|
self.wrap2 = wrap2
|
||||||
|
|
||||||
def pre_select(self, r, w, x):
|
def pre_select(self, r, w, x):
|
||||||
if self.wrap1.shut_write:
|
if self.wrap1.shut_write: self.wrap2.noread()
|
||||||
self.wrap2.noread()
|
if self.wrap2.shut_write: self.wrap1.noread()
|
||||||
if self.wrap2.shut_write:
|
|
||||||
self.wrap1.noread()
|
|
||||||
|
|
||||||
if self.wrap1.connect_to:
|
if self.wrap1.connect_to:
|
||||||
_add(w, self.wrap1.rsock)
|
_add(w, self.wrap1.rsock)
|
||||||
elif self.wrap1.buf:
|
elif self.wrap1.buf:
|
||||||
@ -320,7 +291,7 @@ class Proxy(Handler):
|
|||||||
elif not self.wrap2.shut_read:
|
elif not self.wrap2.shut_read:
|
||||||
_add(r, self.wrap2.rsock)
|
_add(r, self.wrap2.rsock)
|
||||||
|
|
||||||
def callback(self, sock):
|
def callback(self):
|
||||||
self.wrap1.try_connect()
|
self.wrap1.try_connect()
|
||||||
self.wrap2.try_connect()
|
self.wrap2.try_connect()
|
||||||
self.wrap1.fill()
|
self.wrap1.fill()
|
||||||
@ -334,33 +305,31 @@ class Proxy(Handler):
|
|||||||
self.wrap2.buf = []
|
self.wrap2.buf = []
|
||||||
self.wrap2.noread()
|
self.wrap2.noread()
|
||||||
if (self.wrap1.shut_read and self.wrap2.shut_read and
|
if (self.wrap1.shut_read and self.wrap2.shut_read and
|
||||||
not self.wrap1.buf and not self.wrap2.buf):
|
not self.wrap1.buf and not self.wrap2.buf):
|
||||||
self.ok = False
|
self.ok = False
|
||||||
self.wrap1.nowrite()
|
self.wrap1.nowrite()
|
||||||
self.wrap2.nowrite()
|
self.wrap2.nowrite()
|
||||||
|
|
||||||
|
|
||||||
class Mux(Handler):
|
class Mux(Handler):
|
||||||
|
def __init__(self, rsock, wsock):
|
||||||
def __init__(self, rfile, wfile):
|
Handler.__init__(self, [rsock, wsock])
|
||||||
Handler.__init__(self, [rfile, wfile])
|
self.rsock = rsock
|
||||||
self.rfile = rfile
|
self.wsock = wsock
|
||||||
self.wfile = wfile
|
|
||||||
self.new_channel = self.got_dns_req = self.got_routes = None
|
self.new_channel = self.got_dns_req = self.got_routes = None
|
||||||
self.got_udp_open = self.got_udp_data = self.got_udp_close = None
|
|
||||||
self.got_host_req = self.got_host_list = None
|
self.got_host_req = self.got_host_list = None
|
||||||
self.channels = {}
|
self.channels = {}
|
||||||
self.chani = 0
|
self.chani = 0
|
||||||
self.want = 0
|
self.want = 0
|
||||||
self.inbuf = b('')
|
self.inbuf = ''
|
||||||
self.outbuf = []
|
self.outbuf = []
|
||||||
self.fullness = 0
|
self.fullness = 0
|
||||||
self.too_full = False
|
self.too_full = False
|
||||||
self.send(0, CMD_PING, b('chicken'))
|
self.send(0, CMD_PING, 'chicken')
|
||||||
|
|
||||||
def next_channel(self):
|
def next_channel(self):
|
||||||
# channel 0 is special, so we never allocate it
|
# channel 0 is special, so we never allocate it
|
||||||
for _ in range(1024):
|
for timeout in xrange(1024):
|
||||||
self.chani += 1
|
self.chani += 1
|
||||||
if self.chani > MAX_CHANNEL:
|
if self.chani > MAX_CHANNEL:
|
||||||
self.chani = 1
|
self.chani = 1
|
||||||
@ -369,52 +338,50 @@ class Mux(Handler):
|
|||||||
|
|
||||||
def amount_queued(self):
|
def amount_queued(self):
|
||||||
total = 0
|
total = 0
|
||||||
for byte in self.outbuf:
|
for b in self.outbuf:
|
||||||
total += len(byte)
|
total += len(b)
|
||||||
return total
|
return total
|
||||||
|
|
||||||
def check_fullness(self):
|
def check_fullness(self):
|
||||||
if self.fullness > LATENCY_BUFFER_SIZE:
|
if self.fullness > 32768:
|
||||||
if not self.too_full:
|
if not self.too_full:
|
||||||
self.send(0, CMD_PING, b('rttest'))
|
self.send(0, CMD_PING, 'rttest')
|
||||||
self.too_full = True
|
self.too_full = True
|
||||||
|
#ob = []
|
||||||
|
#for b in self.outbuf:
|
||||||
|
# (s1,s2,c) = struct.unpack('!ccH', b[:4])
|
||||||
|
# ob.append(c)
|
||||||
|
#log('outbuf: %d %r\n' % (self.amount_queued(), ob))
|
||||||
|
|
||||||
def send(self, channel, cmd, data):
|
def send(self, channel, cmd, data):
|
||||||
assert isinstance(data, bytes)
|
data = str(data)
|
||||||
assert len(data) <= 65535
|
assert(len(data) <= 65535)
|
||||||
p = struct.pack('!ccHHH', b('S'), b('S'), channel, cmd, len(data)) \
|
p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data
|
||||||
+ data
|
|
||||||
self.outbuf.append(p)
|
self.outbuf.append(p)
|
||||||
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)'
|
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n'
|
||||||
% (channel, cmd_to_name.get(cmd, hex(cmd)),
|
% (channel, cmd_to_name.get(cmd,hex(cmd)),
|
||||||
len(data), self.fullness))
|
len(data), self.fullness))
|
||||||
# debug3('>>> data: %r' % data)
|
|
||||||
self.fullness += len(data)
|
self.fullness += len(data)
|
||||||
|
|
||||||
def got_packet(self, channel, cmd, data):
|
def got_packet(self, channel, cmd, data):
|
||||||
debug2('< channel=%d cmd=%s len=%d'
|
debug2('< channel=%d cmd=%s len=%d\n'
|
||||||
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
|
% (channel, cmd_to_name.get(cmd,hex(cmd)), len(data)))
|
||||||
# debug3('<<< data: %r' % data)
|
|
||||||
if cmd == CMD_PING:
|
if cmd == CMD_PING:
|
||||||
self.send(0, CMD_PONG, data)
|
self.send(0, CMD_PONG, data)
|
||||||
elif cmd == CMD_PONG:
|
elif cmd == CMD_PONG:
|
||||||
debug2('received PING response')
|
debug2('received PING response\n')
|
||||||
self.too_full = False
|
self.too_full = False
|
||||||
self.fullness = 0
|
self.fullness = 0
|
||||||
elif cmd == CMD_EXIT:
|
elif cmd == CMD_EXIT:
|
||||||
self.ok = False
|
self.ok = False
|
||||||
elif cmd == CMD_TCP_CONNECT:
|
elif cmd == CMD_CONNECT:
|
||||||
assert not self.channels.get(channel)
|
assert(not self.channels.get(channel))
|
||||||
if self.new_channel:
|
if self.new_channel:
|
||||||
self.new_channel(channel, data)
|
self.new_channel(channel, data)
|
||||||
elif cmd == CMD_DNS_REQ:
|
elif cmd == CMD_DNS_REQ:
|
||||||
assert not self.channels.get(channel)
|
assert(not self.channels.get(channel))
|
||||||
if self.got_dns_req:
|
if self.got_dns_req:
|
||||||
self.got_dns_req(channel, data)
|
self.got_dns_req(channel, data)
|
||||||
elif cmd == CMD_UDP_OPEN:
|
|
||||||
assert not self.channels.get(channel)
|
|
||||||
if self.got_udp_open:
|
|
||||||
self.got_udp_open(channel, data)
|
|
||||||
elif cmd == CMD_ROUTES:
|
elif cmd == CMD_ROUTES:
|
||||||
if self.got_routes:
|
if self.got_routes:
|
||||||
self.got_routes(data)
|
self.got_routes(data)
|
||||||
@ -433,46 +400,43 @@ class Mux(Handler):
|
|||||||
else:
|
else:
|
||||||
callback = self.channels.get(channel)
|
callback = self.channels.get(channel)
|
||||||
if not callback:
|
if not callback:
|
||||||
log('warning: closed channel %d got cmd=%s len=%d'
|
log('warning: closed channel %d got cmd=%s len=%d\n'
|
||||||
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
|
% (channel, cmd_to_name.get(cmd,hex(cmd)), len(data)))
|
||||||
else:
|
else:
|
||||||
callback(cmd, data)
|
callback(cmd, data)
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
set_non_blocking_io(self.wfile.fileno())
|
self.wsock.setblocking(False)
|
||||||
if self.outbuf and self.outbuf[0]:
|
if self.outbuf and self.outbuf[0]:
|
||||||
wrote = _nb_clean(self.wfile.write, self.outbuf[0])
|
wrote = _nb_clean(os.write, self.wsock.fileno(), self.outbuf[0])
|
||||||
# self.wfile.flush()
|
debug2('mux wrote: %r/%d\n' % (wrote, len(self.outbuf[0])))
|
||||||
debug2('mux wrote: %r/%d' % (wrote, len(self.outbuf[0])))
|
|
||||||
if wrote:
|
if wrote:
|
||||||
self.outbuf[0] = self.outbuf[0][wrote:]
|
self.outbuf[0] = self.outbuf[0][wrote:]
|
||||||
while self.outbuf and not self.outbuf[0]:
|
while self.outbuf and not self.outbuf[0]:
|
||||||
self.outbuf[0:1] = []
|
self.outbuf[0:1] = []
|
||||||
|
|
||||||
def fill(self):
|
def fill(self):
|
||||||
set_non_blocking_io(self.rfile.fileno())
|
self.rsock.setblocking(False)
|
||||||
try:
|
try:
|
||||||
# If LATENCY_BUFFER_SIZE is inappropriately large, we will
|
b = _nb_clean(os.read, self.rsock.fileno(), 32768)
|
||||||
# get a MemoryError here. Read no more than 1MiB.
|
except OSError, e:
|
||||||
read = _nb_clean(self.rfile.read, min(1048576, LATENCY_BUFFER_SIZE))
|
|
||||||
debug2('mux read: %r' % len(read))
|
|
||||||
except OSError:
|
|
||||||
_, e = sys.exc_info()[:2]
|
|
||||||
raise Fatal('other end: %r' % e)
|
raise Fatal('other end: %r' % e)
|
||||||
# log('<<< %r' % b)
|
#log('<<< %r\n' % b)
|
||||||
if read == b(''): # EOF
|
if b == '': # EOF
|
||||||
self.ok = False
|
self.ok = False
|
||||||
if read:
|
if b:
|
||||||
self.inbuf += read
|
self.inbuf += b
|
||||||
|
|
||||||
def handle(self):
|
def handle(self):
|
||||||
self.fill()
|
self.fill()
|
||||||
|
#log('inbuf is: (%d,%d) %r\n'
|
||||||
|
# % (self.want, len(self.inbuf), self.inbuf))
|
||||||
while 1:
|
while 1:
|
||||||
if len(self.inbuf) >= (self.want or HDR_LEN):
|
if len(self.inbuf) >= (self.want or HDR_LEN):
|
||||||
(s1, s2, channel, cmd, datalen) = \
|
(s1,s2,channel,cmd,datalen) = \
|
||||||
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
|
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
|
||||||
assert s1 == b('S')
|
assert(s1 == 'S')
|
||||||
assert s2 == b('S')
|
assert(s2 == 'S')
|
||||||
self.want = datalen + HDR_LEN
|
self.want = datalen + HDR_LEN
|
||||||
if self.want and len(self.inbuf) >= self.want:
|
if self.want and len(self.inbuf) >= self.want:
|
||||||
data = self.inbuf[HDR_LEN:self.want]
|
data = self.inbuf[HDR_LEN:self.want]
|
||||||
@ -483,60 +447,48 @@ class Mux(Handler):
|
|||||||
break
|
break
|
||||||
|
|
||||||
def pre_select(self, r, w, x):
|
def pre_select(self, r, w, x):
|
||||||
_add(r, self.rfile)
|
_add(r, self.rsock)
|
||||||
if self.outbuf:
|
if self.outbuf:
|
||||||
_add(w, self.wfile)
|
_add(w, self.wsock)
|
||||||
|
|
||||||
def callback(self, sock):
|
def callback(self):
|
||||||
(r, w, _) = select.select([self.rfile], [self.wfile], [], 0)
|
(r,w,x) = select.select([self.rsock], [self.wsock], [], 0)
|
||||||
if self.rfile in r:
|
if self.rsock in r:
|
||||||
self.handle()
|
self.handle()
|
||||||
if self.outbuf and self.wfile in w:
|
if self.outbuf and self.wsock in w:
|
||||||
self.flush()
|
self.flush()
|
||||||
|
|
||||||
|
|
||||||
class MuxWrapper(SockWrapper):
|
class MuxWrapper(SockWrapper):
|
||||||
|
|
||||||
def __init__(self, mux, channel):
|
def __init__(self, mux, channel):
|
||||||
SockWrapper.__init__(self, mux.rfile, mux.wfile)
|
SockWrapper.__init__(self, mux.rsock, mux.wsock)
|
||||||
self.mux = mux
|
self.mux = mux
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.mux.channels[channel] = self.got_packet
|
self.mux.channels[channel] = self.got_packet
|
||||||
self.socks = []
|
self.socks = []
|
||||||
debug2('new channel: %d' % channel)
|
debug2('new channel: %d\n' % channel)
|
||||||
|
|
||||||
def __del__(self):
|
def __del__(self):
|
||||||
self.nowrite()
|
self.nowrite()
|
||||||
SockWrapper.__del__(self)
|
SockWrapper.__del__(self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return 'SW%r:Mux#%d' % (self.peername, self.channel)
|
return 'SW%r:Mux#%d' % (self.peername,self.channel)
|
||||||
|
|
||||||
def noread(self):
|
def noread(self):
|
||||||
if not self.shut_read:
|
if not self.shut_read:
|
||||||
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, b(''))
|
|
||||||
self.setnoread()
|
|
||||||
|
|
||||||
def setnoread(self):
|
|
||||||
if not self.shut_read:
|
|
||||||
debug2('%r: done reading' % self)
|
|
||||||
self.shut_read = True
|
self.shut_read = True
|
||||||
|
self.mux.send(self.channel, CMD_STOP_SENDING, '')
|
||||||
self.maybe_close()
|
self.maybe_close()
|
||||||
|
|
||||||
def nowrite(self):
|
def nowrite(self):
|
||||||
if not self.shut_write:
|
if not self.shut_write:
|
||||||
self.mux.send(self.channel, CMD_TCP_EOF, b(''))
|
|
||||||
self.setnowrite()
|
|
||||||
|
|
||||||
def setnowrite(self):
|
|
||||||
if not self.shut_write:
|
|
||||||
debug2('%r: done writing' % self)
|
|
||||||
self.shut_write = True
|
self.shut_write = True
|
||||||
|
self.mux.send(self.channel, CMD_EOF, '')
|
||||||
self.maybe_close()
|
self.maybe_close()
|
||||||
|
|
||||||
def maybe_close(self):
|
def maybe_close(self):
|
||||||
if self.shut_read and self.shut_write:
|
if self.shut_read and self.shut_write:
|
||||||
debug2('%r: closing connection' % self)
|
|
||||||
# remove the mux's reference to us. The python garbage collector
|
# remove the mux's reference to us. The python garbage collector
|
||||||
# will then be able to reap our object.
|
# will then be able to reap our object.
|
||||||
self.mux.channels[self.channel] = None
|
self.mux.channels[self.channel] = None
|
||||||
@ -549,61 +501,59 @@ class MuxWrapper(SockWrapper):
|
|||||||
return 0 # too much already enqueued
|
return 0 # too much already enqueued
|
||||||
if len(buf) > 2048:
|
if len(buf) > 2048:
|
||||||
buf = buf[:2048]
|
buf = buf[:2048]
|
||||||
self.mux.send(self.channel, CMD_TCP_DATA, buf)
|
self.mux.send(self.channel, CMD_DATA, buf)
|
||||||
return len(buf)
|
return len(buf)
|
||||||
|
|
||||||
def uread(self):
|
def uread(self):
|
||||||
if self.shut_read:
|
if self.shut_read:
|
||||||
return b('') # EOF
|
return '' # EOF
|
||||||
else:
|
else:
|
||||||
return None # no data available right now
|
return None # no data available right now
|
||||||
|
|
||||||
def got_packet(self, cmd, data):
|
def got_packet(self, cmd, data):
|
||||||
if cmd == CMD_TCP_EOF:
|
if cmd == CMD_EOF:
|
||||||
# Remote side already knows the status - set flag but don't notify
|
self.noread()
|
||||||
self.setnoread()
|
elif cmd == CMD_STOP_SENDING:
|
||||||
elif cmd == CMD_TCP_STOP_SENDING:
|
self.nowrite()
|
||||||
# Remote side already knows the status - set flag but don't notify
|
elif cmd == CMD_DATA:
|
||||||
self.setnowrite()
|
|
||||||
elif cmd == CMD_TCP_DATA:
|
|
||||||
self.buf.append(data)
|
self.buf.append(data)
|
||||||
else:
|
else:
|
||||||
raise Exception('unknown command %d (%d bytes)'
|
raise Exception('unknown command %d (%d bytes)'
|
||||||
% (cmd, len(data)))
|
% (cmd, len(data)))
|
||||||
|
|
||||||
|
|
||||||
def connect_dst(family, ip, port):
|
def connect_dst(ip, port):
|
||||||
debug2('Connecting to %s:%d' % (ip, port))
|
debug2('Connecting to %s:%d\n' % (ip, port))
|
||||||
outsock = socket.socket(family)
|
outsock = socket.socket()
|
||||||
|
outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||||
return SockWrapper(outsock, outsock,
|
return SockWrapper(outsock, outsock,
|
||||||
connect_to=(ip, port),
|
connect_to = (ip,port),
|
||||||
peername='%s:%d' % (ip, port))
|
peername = '%s:%d' % (ip,port))
|
||||||
|
|
||||||
|
|
||||||
def runonce(handlers, mux):
|
def runonce(handlers, mux):
|
||||||
r = []
|
r = []
|
||||||
w = []
|
w = []
|
||||||
x = []
|
x = []
|
||||||
to_remove = [s for s in handlers if not s.ok]
|
to_remove = filter(lambda s: not s.ok, handlers)
|
||||||
for h in to_remove:
|
for h in to_remove:
|
||||||
handlers.remove(h)
|
handlers.remove(h)
|
||||||
|
|
||||||
for s in handlers:
|
for s in handlers:
|
||||||
s.pre_select(r, w, x)
|
s.pre_select(r,w,x)
|
||||||
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)'
|
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
|
||||||
% (len(handlers), _fds(r), _fds(w), _fds(x),
|
% (len(handlers), _fds(r), _fds(w), _fds(x),
|
||||||
mux.fullness, mux.too_full))
|
mux.fullness, mux.too_full))
|
||||||
(r, w, x) = select.select(r, w, x)
|
(r,w,x) = select.select(r,w,x)
|
||||||
debug2(' Ready: %d r=%r w=%r x=%r'
|
debug2(' Ready: %d r=%r w=%r x=%r\n'
|
||||||
% (len(handlers), _fds(r), _fds(w), _fds(x)))
|
% (len(handlers), _fds(r), _fds(w), _fds(x)))
|
||||||
ready = r + w + x
|
ready = r+w+x
|
||||||
did = {}
|
did = {}
|
||||||
for h in handlers:
|
for h in handlers:
|
||||||
for s in h.socks:
|
for s in h.socks:
|
||||||
if s in ready:
|
if s in ready:
|
||||||
h.callback(s)
|
h.callback()
|
||||||
did[s] = 1
|
did[s] = 1
|
||||||
for s in ready:
|
for s in ready:
|
||||||
if s not in did:
|
if not s in did:
|
||||||
raise Fatal('socket %r was not used by any handler' % s)
|
raise Fatal('socket %r was not used by any handler' % s)
|
16
ssyslog.py
Normal file
16
ssyslog.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
import sys, os
|
||||||
|
from compat import ssubprocess
|
||||||
|
|
||||||
|
|
||||||
|
_p = None
|
||||||
|
def start_syslog():
|
||||||
|
global _p
|
||||||
|
_p = ssubprocess.Popen(['logger',
|
||||||
|
'-p', 'daemon.notice',
|
||||||
|
'-t', 'sshuttle'], stdin=ssubprocess.PIPE)
|
||||||
|
|
||||||
|
|
||||||
|
def stderr_to_syslog():
|
||||||
|
sys.stdout.flush()
|
||||||
|
sys.stderr.flush()
|
||||||
|
os.dup2(_p.stdin.fileno(), 2)
|
86
stresstest.py
Executable file
86
stresstest.py
Executable file
@ -0,0 +1,86 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import sys, os, socket, select, struct, time
|
||||||
|
|
||||||
|
listener = socket.socket()
|
||||||
|
listener.bind(('127.0.0.1', 0))
|
||||||
|
listener.listen(500)
|
||||||
|
|
||||||
|
servers = []
|
||||||
|
clients = []
|
||||||
|
remain = {}
|
||||||
|
|
||||||
|
NUMCLIENTS = 50
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
if len(clients) < NUMCLIENTS:
|
||||||
|
c = socket.socket()
|
||||||
|
c.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||||
|
c.bind(('0.0.0.0', 0))
|
||||||
|
c.connect(listener.getsockname())
|
||||||
|
count += 1
|
||||||
|
if count >= 16384:
|
||||||
|
count = 1
|
||||||
|
print 'cli CREATING %d' % count
|
||||||
|
b = struct.pack('I', count) + 'x'*count
|
||||||
|
remain[c] = count
|
||||||
|
print 'cli >> %r' % len(b)
|
||||||
|
c.send(b)
|
||||||
|
c.shutdown(socket.SHUT_WR)
|
||||||
|
clients.append(c)
|
||||||
|
r = [listener]
|
||||||
|
time.sleep(0.1)
|
||||||
|
else:
|
||||||
|
r = [listener]+servers+clients
|
||||||
|
print 'select(%d)' % len(r)
|
||||||
|
r,w,x = select.select(r, [], [], 5)
|
||||||
|
assert(r)
|
||||||
|
for i in r:
|
||||||
|
if i == listener:
|
||||||
|
s,addr = listener.accept()
|
||||||
|
servers.append(s)
|
||||||
|
elif i in servers:
|
||||||
|
b = i.recv(4096)
|
||||||
|
print 'srv << %r' % len(b)
|
||||||
|
if not i in remain:
|
||||||
|
assert(len(b) >= 4)
|
||||||
|
want = struct.unpack('I', b[:4])[0]
|
||||||
|
b = b[4:]
|
||||||
|
#i.send('y'*want)
|
||||||
|
else:
|
||||||
|
want = remain[i]
|
||||||
|
if want < len(b):
|
||||||
|
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||||
|
assert(want >= len(b))
|
||||||
|
want -= len(b)
|
||||||
|
remain[i] = want
|
||||||
|
if not b: # EOF
|
||||||
|
if want:
|
||||||
|
print 'weird: eof but wanted %d more' % want
|
||||||
|
assert(want == 0)
|
||||||
|
i.close()
|
||||||
|
servers.remove(i)
|
||||||
|
del remain[i]
|
||||||
|
else:
|
||||||
|
print 'srv >> %r' % len(b)
|
||||||
|
i.send('y'*len(b))
|
||||||
|
if not want:
|
||||||
|
i.shutdown(socket.SHUT_WR)
|
||||||
|
elif i in clients:
|
||||||
|
b = i.recv(4096)
|
||||||
|
print 'cli << %r' % len(b)
|
||||||
|
want = remain[i]
|
||||||
|
if want < len(b):
|
||||||
|
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||||
|
assert(want >= len(b))
|
||||||
|
want -= len(b)
|
||||||
|
remain[i] = want
|
||||||
|
if not b: # EOF
|
||||||
|
if want:
|
||||||
|
print 'weird: eof but wanted %d more' % want
|
||||||
|
assert(want == 0)
|
||||||
|
i.close()
|
||||||
|
clients.remove(i)
|
||||||
|
del remain[i]
|
||||||
|
listener.accept()
|
@ -1,163 +0,0 @@
|
|||||||
import io
|
|
||||||
import os
|
|
||||||
from socket import AF_INET, AF_INET6
|
|
||||||
|
|
||||||
from unittest.mock import Mock, patch, call
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
import sshuttle.firewall
|
|
||||||
|
|
||||||
|
|
||||||
def setup_daemon():
|
|
||||||
stdin = io.BytesIO(u"""ROUTES
|
|
||||||
{inet},24,0,1.2.3.0,8000,9000
|
|
||||||
{inet},32,1,1.2.3.66,8080,8080
|
|
||||||
{inet6},64,0,2404:6800:4004:80c::,0,0
|
|
||||||
{inet6},128,1,2404:6800:4004:80c::101f,80,80
|
|
||||||
NSLIST
|
|
||||||
{inet},1.2.3.33
|
|
||||||
{inet6},2404:6800:4004:80c::33
|
|
||||||
PORTS 1024,1025,1026,1027
|
|
||||||
GO 1 - - 0x01 12345
|
|
||||||
HOST 1.2.3.3,existing
|
|
||||||
""".format(inet=AF_INET, inet6=AF_INET6).encode('ASCII'))
|
|
||||||
stdout = Mock()
|
|
||||||
return stdin, stdout
|
|
||||||
|
|
||||||
|
|
||||||
def test_rewrite_etc_hosts(tmpdir):
|
|
||||||
orig_hosts = tmpdir.join("hosts.orig")
|
|
||||||
orig_hosts.write("1.2.3.3 existing\n")
|
|
||||||
|
|
||||||
new_hosts = tmpdir.join("hosts")
|
|
||||||
orig_hosts.copy(new_hosts)
|
|
||||||
|
|
||||||
hostmap = {
|
|
||||||
'myhost': '1.2.3.4',
|
|
||||||
'myotherhost': '1.2.3.5',
|
|
||||||
}
|
|
||||||
with patch('sshuttle.firewall.HOSTSFILE', new=str(new_hosts)):
|
|
||||||
sshuttle.firewall.rewrite_etc_hosts(hostmap, 10)
|
|
||||||
|
|
||||||
with new_hosts.open() as f:
|
|
||||||
line = f.readline()
|
|
||||||
s = line.split()
|
|
||||||
assert s == ['1.2.3.3', 'existing']
|
|
||||||
|
|
||||||
line = f.readline()
|
|
||||||
s = line.split()
|
|
||||||
assert s == ['1.2.3.4', 'myhost',
|
|
||||||
'#', 'sshuttle-firewall-10', 'AUTOCREATED']
|
|
||||||
|
|
||||||
line = f.readline()
|
|
||||||
s = line.split()
|
|
||||||
assert s == ['1.2.3.5', 'myotherhost',
|
|
||||||
'#', 'sshuttle-firewall-10', 'AUTOCREATED']
|
|
||||||
|
|
||||||
line = f.readline()
|
|
||||||
assert line == ""
|
|
||||||
|
|
||||||
with patch('sshuttle.firewall.HOSTSFILE', new=str(new_hosts)):
|
|
||||||
sshuttle.firewall.restore_etc_hosts(hostmap, 10)
|
|
||||||
assert orig_hosts.computehash() == new_hosts.computehash()
|
|
||||||
|
|
||||||
|
|
||||||
@patch('os.link')
|
|
||||||
@patch('os.rename')
|
|
||||||
def test_rewrite_etc_hosts_no_overwrite(mock_link, mock_rename, tmpdir):
|
|
||||||
mock_link.side_effect = OSError
|
|
||||||
mock_rename.side_effect = OSError
|
|
||||||
|
|
||||||
with pytest.raises(OSError):
|
|
||||||
os.link('/test_from', '/test_to')
|
|
||||||
|
|
||||||
with pytest.raises(OSError):
|
|
||||||
os.rename('/test_from', '/test_to')
|
|
||||||
|
|
||||||
test_rewrite_etc_hosts(tmpdir)
|
|
||||||
|
|
||||||
|
|
||||||
def test_subnet_weight():
|
|
||||||
subnets = [
|
|
||||||
(AF_INET, 16, 0, '192.168.0.0', 0, 0),
|
|
||||||
(AF_INET, 24, 0, '192.168.69.0', 0, 0),
|
|
||||||
(AF_INET, 32, 0, '192.168.69.70', 0, 0),
|
|
||||||
(AF_INET, 32, 1, '192.168.69.70', 0, 0),
|
|
||||||
(AF_INET, 32, 1, '192.168.69.70', 80, 80),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 0, 0),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 9000),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 8500),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 8000),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 400, 450)
|
|
||||||
]
|
|
||||||
subnets_sorted = [
|
|
||||||
(AF_INET, 32, 1, '192.168.69.70', 80, 80),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 8000),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 400, 450),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 8500),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 8000, 9000),
|
|
||||||
(AF_INET, 32, 1, '192.168.69.70', 0, 0),
|
|
||||||
(AF_INET, 32, 0, '192.168.69.70', 0, 0),
|
|
||||||
(AF_INET, 24, 0, '192.168.69.0', 0, 0),
|
|
||||||
(AF_INET, 16, 0, '192.168.0.0', 0, 0),
|
|
||||||
(AF_INET, 0, 1, '0.0.0.0', 0, 0)
|
|
||||||
]
|
|
||||||
|
|
||||||
assert subnets_sorted == sorted(subnets,
|
|
||||||
key=sshuttle.firewall.subnet_weight,
|
|
||||||
reverse=True)
|
|
||||||
|
|
||||||
|
|
||||||
@patch('sshuttle.firewall.rewrite_etc_hosts')
|
|
||||||
@patch('sshuttle.firewall.setup_daemon')
|
|
||||||
@patch('sshuttle.firewall.get_method')
|
|
||||||
def test_main(mock_get_method, mock_setup_daemon, mock_rewrite_etc_hosts):
|
|
||||||
stdin, stdout = setup_daemon()
|
|
||||||
mock_setup_daemon.return_value = stdin, stdout
|
|
||||||
|
|
||||||
mock_get_method("not_auto").name = "test"
|
|
||||||
mock_get_method.reset_mock()
|
|
||||||
|
|
||||||
sshuttle.firewall.main("not_auto", False)
|
|
||||||
|
|
||||||
assert mock_rewrite_etc_hosts.mock_calls == [
|
|
||||||
call({'1.2.3.3': 'existing'}, 1024),
|
|
||||||
call({}, 1024),
|
|
||||||
]
|
|
||||||
|
|
||||||
assert stdout.mock_calls == [
|
|
||||||
call.write(b'READY test\n'),
|
|
||||||
call.flush(),
|
|
||||||
call.write(b'STARTED\n'),
|
|
||||||
call.flush()
|
|
||||||
]
|
|
||||||
assert mock_setup_daemon.mock_calls == [call()]
|
|
||||||
assert mock_get_method.mock_calls == [
|
|
||||||
call('not_auto'),
|
|
||||||
call().is_supported(),
|
|
||||||
call().is_supported().__bool__(),
|
|
||||||
call().setup_firewall(
|
|
||||||
1024, 1026,
|
|
||||||
[(AF_INET6, u'2404:6800:4004:80c::33')],
|
|
||||||
AF_INET6,
|
|
||||||
[(AF_INET6, 64, False, u'2404:6800:4004:80c::', 0, 0),
|
|
||||||
(AF_INET6, 128, True, u'2404:6800:4004:80c::101f', 80, 80)],
|
|
||||||
True,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
'0x01'),
|
|
||||||
call().setup_firewall(
|
|
||||||
1025, 1027,
|
|
||||||
[(AF_INET, u'1.2.3.33')],
|
|
||||||
AF_INET,
|
|
||||||
[(AF_INET, 24, False, u'1.2.3.0', 8000, 9000),
|
|
||||||
(AF_INET, 32, True, u'1.2.3.66', 8080, 8080)],
|
|
||||||
True,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
'0x01'),
|
|
||||||
call().wait_for_firewall_ready(12345),
|
|
||||||
call().restore_firewall(1024, AF_INET6, True, None, None),
|
|
||||||
call().restore_firewall(1025, AF_INET, True, None, None),
|
|
||||||
]
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user