mirror of
https://github.com/sshuttle/sshuttle.git
synced 2025-06-20 09:57:42 +02:00
Compare commits
No commits in common. "master" and "sshuttle-0.72" have entirely different histories.
master
...
sshuttle-0
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@ -1,13 +0,0 @@
|
||||
version: 2
|
||||
enable-beta-ecosystems: true
|
||||
updates:
|
||||
- package-ecosystem: uv
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
70
.github/workflows/codeql.yml
vendored
70
.github/workflows/codeql.yml
vendored
@ -1,70 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '31 21 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
38
.github/workflows/pythonpackage.yml
vendored
38
.github/workflows/pythonpackage.yml
vendored
@ -1,38 +0,0 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Python package
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
poetry-version: ["main"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.4.30"
|
||||
enable-cache: true
|
||||
cache-dependency-glob: "uv.lock"
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras --dev
|
||||
- name: Lint with flake8
|
||||
run: uv run flake8 sshuttle tests --count --show-source --statistics
|
||||
- name: Run the automated tests
|
||||
run: uv run pytest -v
|
66
.github/workflows/release-please.yml
vendored
66
.github/workflows/release-please.yml
vendored
@ -1,66 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
name: release-please
|
||||
|
||||
jobs:
|
||||
|
||||
release-please:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
release_created: ${{ steps.release.outputs.release_created }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
||||
steps:
|
||||
- uses: googleapis/release-please-action@v4
|
||||
id: release
|
||||
with:
|
||||
token: ${{ secrets.MY_RELEASE_PLEASE_TOKEN }}
|
||||
release-type: python
|
||||
|
||||
build-pypi:
|
||||
name: Build for pypi
|
||||
needs: [release-please]
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.12
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.4.30"
|
||||
enable-cache: true
|
||||
cache-dependency-glob: "uv.lock"
|
||||
- name: Build project
|
||||
run: uv build
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
upload-pypi:
|
||||
name: Upload to pypi
|
||||
needs: [build-pypi]
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/sshuttle
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
14
.gitignore
vendored
14
.gitignore
vendored
@ -1,20 +1,6 @@
|
||||
/tmp/
|
||||
/.coverage
|
||||
/.cache/
|
||||
/.eggs/
|
||||
/.tox/
|
||||
/build/
|
||||
/dist/
|
||||
/sshuttle.egg-info/
|
||||
/docs/_build/
|
||||
*.pyc
|
||||
*~
|
||||
*.8
|
||||
/.do_built
|
||||
/.do_built.dir
|
||||
/.redo
|
||||
/.pytest_cache/
|
||||
/.python-version
|
||||
/.direnv/
|
||||
/result
|
||||
/.vscode/
|
||||
|
@ -1,24 +0,0 @@
|
||||
strictness: medium
|
||||
|
||||
pylint:
|
||||
disable:
|
||||
- too-many-statements
|
||||
- too-many-locals
|
||||
- too-many-function-args
|
||||
- too-many-arguments
|
||||
- too-many-branches
|
||||
- bare-except
|
||||
- protected-access
|
||||
- no-else-return
|
||||
- unused-argument
|
||||
- method-hidden
|
||||
- arguments-differ
|
||||
- wrong-import-position
|
||||
- raising-bad-type
|
||||
|
||||
pep8:
|
||||
options:
|
||||
max-line-length: 79
|
||||
|
||||
mccabe:
|
||||
run: false
|
@ -1,13 +0,0 @@
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-20.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
jobs:
|
||||
post_install:
|
||||
- pip install uv
|
||||
- UV_PROJECT_ENVIRONMENT=$READTHEDOCS_VIRTUALENV_PATH uv sync --all-extras --group docs --link-mode=copy
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
@ -1 +0,0 @@
|
||||
python 3.10.6
|
54
CHANGELOG.md
54
CHANGELOG.md
@ -1,54 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## [1.3.1](https://github.com/sshuttle/sshuttle/compare/v1.3.0...v1.3.1) (2025-03-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add pycodestyle config ([5942376](https://github.com/sshuttle/sshuttle/commit/5942376090395d0a8dfe38fe012a519268199341))
|
||||
* add python lint tools ([ae3c022](https://github.com/sshuttle/sshuttle/commit/ae3c022d1d67de92f1c4712d06eb8ae76c970624))
|
||||
* correct bad version number at runtime ([7b66253](https://github.com/sshuttle/sshuttle/commit/7b662536ba92d724ed8f86a32a21282fea66047c))
|
||||
* Restore "nft" method ([375810a](https://github.com/sshuttle/sshuttle/commit/375810a9a8910a51db22c9fe4c0658c39b16c9e7))
|
||||
|
||||
## [1.3.0](https://github.com/sshuttle/sshuttle/compare/v1.2.0...v1.3.0) (2025-02-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* switch to a network namespace on Linux ([8a123d9](https://github.com/sshuttle/sshuttle/commit/8a123d9762b84f168a8ca8c75f73e590954e122d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* prevent UnicodeDecodeError parsing iptables rule with comments ([cbe3d1e](https://github.com/sshuttle/sshuttle/commit/cbe3d1e402cac9d3fbc818fe0cb8a87be2e94348))
|
||||
* remove temp build hack ([1f5e6ce](https://github.com/sshuttle/sshuttle/commit/1f5e6cea703db33761fb1c3f999b9624cf3bc7ad))
|
||||
* support ':' sign in password ([7fa927e](https://github.com/sshuttle/sshuttle/commit/7fa927ef8ceea6b1b2848ca433b8b3e3b63f0509))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* replace nix-env with nix-shell ([340ccc7](https://github.com/sshuttle/sshuttle/commit/340ccc705ebd9499f14f799fcef0b5d2a8055fb4))
|
||||
* update installation instructions ([a2d405a](https://github.com/sshuttle/sshuttle/commit/a2d405a6a7f9d1a301311a109f8411f2fe8deb37))
|
||||
|
||||
## [1.2.0](https://github.com/sshuttle/sshuttle/compare/v1.1.2...v1.2.0) (2025-02-07)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add release-please to build workflow ([d910b64](https://github.com/sshuttle/sshuttle/commit/d910b64be77fd7ef2a5f169b780bfda95e67318d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Add support for Python 3.11 and Python 3.11 ([a3396a4](https://github.com/sshuttle/sshuttle/commit/a3396a443df14d3bafc3d25909d9221aa182b8fc))
|
||||
* bad file descriptor error in windows, fix pytest errors ([d4d0fa9](https://github.com/sshuttle/sshuttle/commit/d4d0fa945d50606360aa7c5f026a0f190b026c68))
|
||||
* drop Python 3.8 support ([1084c0f](https://github.com/sshuttle/sshuttle/commit/1084c0f2458c1595b00963b3bd54bd667e4cfc9f))
|
||||
* ensure poetry works for Python 3.9 ([693ee40](https://github.com/sshuttle/sshuttle/commit/693ee40c485c70f353326eb0e8f721f984850f5c))
|
||||
* fix broken workflow_dispatch CI rule ([4b6f7c6](https://github.com/sshuttle/sshuttle/commit/4b6f7c6a656a752552295863092d3b8af0b42b31))
|
||||
* Remove more references to legacy Python versions ([339b522](https://github.com/sshuttle/sshuttle/commit/339b5221bc33254329f79f2374f6114be6f30aed))
|
||||
* replace requirements.txt files with poetry ([85dc319](https://github.com/sshuttle/sshuttle/commit/85dc3199a332f9f9f0e4c6037c883a8f88dc09ca))
|
||||
* replace requirements.txt files with poetry (2) ([d08f78a](https://github.com/sshuttle/sshuttle/commit/d08f78a2d9777951d7e18f6eaebbcdd279d7683a))
|
||||
* replace requirements.txt files with poetry (3) ([62da705](https://github.com/sshuttle/sshuttle/commit/62da70510e8a1f93e8b38870fdebdbace965cd8e))
|
||||
* replace requirements.txt files with poetry (4) ([9bcedf1](https://github.com/sshuttle/sshuttle/commit/9bcedf19049e5b3a8ae26818299cc518ec03a926))
|
||||
* update nix flake to fix problems ([cda60a5](https://github.com/sshuttle/sshuttle/commit/cda60a52331c7102cff892b9b77c8321e276680a))
|
||||
* use Python >= 3.10 for docs ([bf29464](https://github.com/sshuttle/sshuttle/commit/bf294643e283cef9fb285d44e307e958686caf46))
|
315
CHANGES.rst
315
CHANGES.rst
@ -1,315 +0,0 @@
|
||||
==========
|
||||
Change log
|
||||
==========
|
||||
Release notes now moved to https://github.com/sshuttle/sshuttle/releases/
|
||||
|
||||
These are the old release notes.
|
||||
|
||||
|
||||
1.0.5 - 2020-12-29
|
||||
------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* IPv6 support in nft method.
|
||||
* Intercept DNS requests sent by systemd-resolved.
|
||||
* Set default tmark.
|
||||
* Fix python2 server compatibility.
|
||||
* Python 3.9 support.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Change license text to LGPL-2.1
|
||||
* Fix #494 sshuttle caught in infinite select() loop.
|
||||
* Include sshuttle version in verbose output.
|
||||
* Add psutil as dependency in setup.py
|
||||
* When subnets and excludes are specified with hostnames, use all IPs.
|
||||
* Update/document client's handling of IPv4 and IPv6.
|
||||
* Update sdnotify.py documentation.
|
||||
* Allow no remote to work.
|
||||
* Make prefixes in verbose output more consistent.
|
||||
* Make nat and nft rules consistent; improve rule ordering.
|
||||
* Make server and client handle resolv.conf differently.
|
||||
* Fix handling OSError in FirewallClient#__init__
|
||||
* Refactor automatic method selection.
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Drop testing of Python 3.5
|
||||
|
||||
|
||||
1.0.4 - 2020-08-24
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Allow Mux() flush/fill to work with python < 3.5
|
||||
* Fix parse_hostport to always return string for host.
|
||||
* Require -r/--remote parameter.
|
||||
* Add missing package in OpenWRT documentation.
|
||||
* Fix doc about --listen option.
|
||||
* README: add Ubuntu.
|
||||
* Increase IP4 ttl to 63 hops instead of 42.
|
||||
* Fix formatting in installation.rst
|
||||
|
||||
|
||||
1.0.3 - 2020-07-12
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Ask setuptools to require Python 3.5 and above.
|
||||
* Add missing import.
|
||||
* Fix formatting typos in usage docs
|
||||
|
||||
|
||||
1.0.2 - 2020-06-18
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Leave use of default port to ssh command.
|
||||
* Remove unwanted references to Python 2.7 in docs.
|
||||
* Replace usage of deprecated imp.
|
||||
* Fix connection with @ sign in username.
|
||||
|
||||
|
||||
1.0.1 - 2020-06-05
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Errors in python long_documentation.
|
||||
|
||||
|
||||
1.0.0 - 2020-06-05
|
||||
------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Python 3.8 support.
|
||||
* sshpass support.
|
||||
* Auto sudoers file (#269).
|
||||
* option for latency control buffer size.
|
||||
* Docs: FreeBSD'.
|
||||
* Docs: Nix'.
|
||||
* Docs: openwrt'.
|
||||
* Docs: install instructions for Fedora'.
|
||||
* Docs: install instructions for Arch Linux'.
|
||||
* Docs: 'My VPN broke and need a solution fast'.
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Python 2.6 support.
|
||||
* Python 2.7 support.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Remove debug message for getpeername failure.
|
||||
* Fix crash triggered by port scans closing socket.
|
||||
* Added "Running as a service" to docs.
|
||||
* Systemd integration.
|
||||
* Trap UnicodeError to handle cases where hostnames returned by DNS are invalid.
|
||||
* Formatting error in CHANGES.rst
|
||||
* Various errors in documentation.
|
||||
* Nftables based method.
|
||||
* Make hostwatch locale-independent (#379).
|
||||
* Add tproxy udp port mark filter that was missed in #144, fixes #367.
|
||||
* Capturing of local DNS servers.
|
||||
* Crashing on ECONNABORTED.
|
||||
* Size of pf_rule, which grew in OpenBSD 6.4.
|
||||
* Use prompt for sudo, not needed for doas.
|
||||
* Arch linux installation instructions.
|
||||
* tests for existing PR-312 (#337).
|
||||
* Hyphen in hostname.
|
||||
* Assembler import (#319).
|
||||
|
||||
|
||||
0.78.5 - 2019-01-28
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* doas support as replacement for sudo on OpenBSD.
|
||||
* Added ChromeOS section to documentation (#262)
|
||||
* Add --no-sudo-pythonpath option
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Fix forwarding to a single port.
|
||||
* Various updates to documentation.
|
||||
* Don't crash if we can't look up peername
|
||||
* Fix missing string formatting argument
|
||||
* Moved sshuttle/tests into tests.
|
||||
* Updated bandit config.
|
||||
* Replace path /dev/null by os.devnull.
|
||||
* Added coverage report to tests.
|
||||
* Fixes support for OpenBSD (6.1+) (#282).
|
||||
* Close stdin, stdout, and stderr when using syslog or forking to daemon (#283).
|
||||
* Changes pf exclusion rules precedence.
|
||||
* Fix deadlock with iptables with large ruleset.
|
||||
* docs: document --ns-hosts --to-ns and update --dns.
|
||||
* Use subprocess.check_output instead of run.
|
||||
* Fix potential deadlock condition in nft_get_handle.
|
||||
* auto-nets: retrieve routes only if using auto-nets.
|
||||
|
||||
|
||||
0.78.4 - 2018-04-02
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Add homebrew instructions.
|
||||
* Route traffic by linux user.
|
||||
* Add nat-like method using nftables instead of iptables.
|
||||
|
||||
Changed
|
||||
~~~~~~~
|
||||
* Talk to custom DNS server on pod, instead of the ones in /etc/resolv.conf.
|
||||
* Add new option for overriding destination DNS server.
|
||||
* Changed subnet parsing. Previously 10/8 become 10.0.0.0/8. Now it gets
|
||||
parsed as 0.0.0.10/8.
|
||||
* Make hostwatch find both fqdn and hostname.
|
||||
* Use versions of python3 greater than 3.5 when available (e.g. 3.6).
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Remove Python 2.6 from automatic tests.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Fix case where there is no --dns.
|
||||
* [pf] Avoid port forwarding from loopback address.
|
||||
* Use getaddrinfo to obtain a correct sockaddr.
|
||||
* Skip empty lines on incoming routes data.
|
||||
* Just skip empty lines of routes data instead of stopping processing.
|
||||
* [pf] Load pf kernel module when enabling pf.
|
||||
* [pf] Test double restore (ipv4, ipv6) disables only once; test kldload.
|
||||
* Fixes UDP and DNS proxies binding to the same socket address.
|
||||
* Mock socket bind to avoid depending on local IPs being available in test box.
|
||||
* Fix no value passed for argument auto_hosts in hw_main call.
|
||||
* Fixed incorrect license information in setup.py.
|
||||
* Preserve peer and port properly.
|
||||
* Make --to-dns and --ns-host work well together.
|
||||
* Remove test that fails under OSX.
|
||||
* Specify pip requirements for tests.
|
||||
* Use flake8 to find Python syntax errors or undefined names.
|
||||
* Fix compatibility with the sudoers file.
|
||||
* Stop using SO_REUSEADDR on sockets.
|
||||
* Declare 'verbosity' as global variable to placate linters.
|
||||
* Adds 'cd sshuttle' after 'git' to README and docs.
|
||||
* Documentation for loading options from configuration file.
|
||||
* Load options from a file.
|
||||
* Fix firewall.py.
|
||||
* Move sdnotify after setting up firewall rules.
|
||||
* Fix tests on Macos.
|
||||
|
||||
|
||||
0.78.3 - 2017-07-09
|
||||
-------------------
|
||||
The "I should have done a git pull" first release.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Order first by port range and only then by swidth
|
||||
|
||||
|
||||
0.78.2 - 2017-07-09
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Adds support for tunneling specific port ranges (#144).
|
||||
* Add support for iproute2.
|
||||
* Allow remote hosts with colons in the username.
|
||||
* Re-introduce ipfw support for sshuttle on FreeBSD with support for --DNS option as well.
|
||||
* Add support for PfSense.
|
||||
* Tests and documentation for systemd integration.
|
||||
* Allow subnets to be given only by file (-s).
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Work around non tabular headers in BSD netstat.
|
||||
* Fix UDP and DNS support on Python 2.7 with tproxy method.
|
||||
* Fixed tests after adding support for iproute2.
|
||||
* Small refactoring of netstat/iproute parsing.
|
||||
* Set started_by_sshuttle False after disabling pf.
|
||||
* Fix punctuation and explain Type=notify.
|
||||
* Move pytest-runner to tests_require.
|
||||
* Fix warning: closed channel got=STOP_SENDING.
|
||||
* Support sdnotify for better systemd integration.
|
||||
* Fix #117 to allow for no subnets via file (-s).
|
||||
* Fix argument splitting for multi-word arguments.
|
||||
* requirements.rst: Fix mistakes.
|
||||
* Fix typo, space not required here.
|
||||
* Update installation instructions.
|
||||
* Support using run from different directory.
|
||||
* Ensure we update sshuttle/version.py in run.
|
||||
* Don't print python version in run.
|
||||
* Add CWD to PYTHONPATH in run.
|
||||
|
||||
|
||||
0.78.1 - 2016-08-06
|
||||
-------------------
|
||||
* Fix readthedocs versioning.
|
||||
* Don't crash on ENETUNREACH.
|
||||
* Various bug fixes.
|
||||
* Improvements to BSD and OSX support.
|
||||
|
||||
|
||||
0.78.0 - 2016-04-08
|
||||
-------------------
|
||||
|
||||
* Don't force IPv6 if IPv6 nameservers supplied. Fixes #74.
|
||||
* Call /bin/sh as users shell may not be POSIX compliant. Fixes #77.
|
||||
* Use argparse for command line processing. Fixes #75.
|
||||
* Remove useless --server option.
|
||||
* Support multiple -s (subnet) options. Fixes #86.
|
||||
* Make server parts work with old versions of Python. Fixes #81.
|
||||
|
||||
|
||||
0.77.2 - 2016-03-07
|
||||
-------------------
|
||||
|
||||
* Accidentally switched LGPL2 license with GPL2 license in 0.77.1 - now fixed.
|
||||
|
||||
|
||||
0.77.1 - 2016-03-07
|
||||
-------------------
|
||||
|
||||
* Use semantic versioning. http://semver.org/
|
||||
* Update GPL 2 license text.
|
||||
* New release to fix PyPI.
|
||||
|
||||
|
||||
0.77 - 2016-03-03
|
||||
-----------------
|
||||
|
||||
* Various bug fixes.
|
||||
* Fix Documentation.
|
||||
* Add fix for MacOS X issue.
|
||||
* Add support for OpenBSD.
|
||||
|
||||
|
||||
0.76 - 2016-01-17
|
||||
-----------------
|
||||
|
||||
* Add option to disable IPv6 support.
|
||||
* Update documentation.
|
||||
* Move documentation, including man page, to Sphinx.
|
||||
* Use setuptools-scm for automatic versioning.
|
||||
|
||||
|
||||
0.75 - 2016-01-12
|
||||
-----------------
|
||||
|
||||
* Revert change that broke sshuttle entry point.
|
||||
|
||||
|
||||
0.74 - 2016-01-10
|
||||
-----------------
|
||||
|
||||
* Add CHANGES.rst file.
|
||||
* Numerous bug fixes.
|
||||
* Python 3.5 fixes.
|
||||
* PF fixes, especially for BSD.
|
199
LICENSE
199
LICENSE
@ -1,14 +1,13 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 2.1, February 1999
|
||||
GNU LIBRARY GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Copyright (C) 1991 Free Software Foundation, Inc.
|
||||
675 Mass Ave, Cambridge, MA 02139, USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
[This is the first released version of the Lesser GPL. It also counts
|
||||
as the successor of the GNU Library Public License, version 2, hence
|
||||
the version number 2.1.]
|
||||
[This is the first released version of the library GPL. It is
|
||||
numbered 2 because it goes with version 2 of the ordinary GPL.]
|
||||
|
||||
Preamble
|
||||
|
||||
@ -17,109 +16,97 @@ freedom to share and change it. By contrast, the GNU General Public
|
||||
Licenses are intended to guarantee your freedom to share and change
|
||||
free software--to make sure the software is free for all its users.
|
||||
|
||||
This license, the Lesser General Public License, applies to some
|
||||
specially designated software packages--typically libraries--of the
|
||||
Free Software Foundation and other authors who decide to use it. You
|
||||
can use it too, but we suggest you first think carefully about whether
|
||||
this license or the ordinary General Public License is the better
|
||||
strategy to use in any particular case, based on the explanations below.
|
||||
This license, the Library General Public License, applies to some
|
||||
specially designated Free Software Foundation software, and to any
|
||||
other libraries whose authors decide to use it. You can use it for
|
||||
your libraries, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom of use,
|
||||
not price. Our General Public Licenses are designed to make sure that
|
||||
you have the freedom to distribute copies of free software (and charge
|
||||
for this service if you wish); that you receive source code or can get
|
||||
it if you want it; that you can change the software and use pieces of
|
||||
it in new free programs; and that you are informed that you can do
|
||||
these things.
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
distributors to deny you these rights or to ask you to surrender these
|
||||
rights. These restrictions translate to certain responsibilities for
|
||||
you if you distribute copies of the library or if you modify it.
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if
|
||||
you distribute copies of the library, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of the library, whether gratis
|
||||
or for a fee, you must give the recipients all the rights that we gave
|
||||
you. You must make sure that they, too, receive or can get the source
|
||||
code. If you link other code with the library, you must provide
|
||||
complete object files to the recipients, so that they can relink them
|
||||
with the library after making changes to the library and recompiling
|
||||
code. If you link a program with the library, you must provide
|
||||
complete object files to the recipients so that they can relink them
|
||||
with the library, after making changes to the library and recompiling
|
||||
it. And you must show them these terms so they know their rights.
|
||||
|
||||
We protect your rights with a two-step method: (1) we copyright the
|
||||
library, and (2) we offer you this license, which gives you legal
|
||||
Our method of protecting your rights has two steps: (1) copyright
|
||||
the library, and (2) offer you this license which gives you legal
|
||||
permission to copy, distribute and/or modify the library.
|
||||
|
||||
To protect each distributor, we want to make it very clear that
|
||||
there is no warranty for the free library. Also, if the library is
|
||||
modified by someone else and passed on, the recipients should know
|
||||
that what they have is not the original version, so that the original
|
||||
author's reputation will not be affected by problems that might be
|
||||
introduced by others.
|
||||
Also, for each distributor's protection, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
library. If the library is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original
|
||||
version, so that any problems introduced by others will not reflect on
|
||||
the original authors' reputations.
|
||||
|
||||
Finally, software patents pose a constant threat to the existence of
|
||||
any free program. We wish to make sure that a company cannot
|
||||
effectively restrict the users of a free program by obtaining a
|
||||
restrictive license from a patent holder. Therefore, we insist that
|
||||
any patent license obtained for a version of the library must be
|
||||
consistent with the full freedom of use specified in this license.
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that companies distributing free
|
||||
software will individually obtain patent licenses, thus in effect
|
||||
transforming the program into proprietary software. To prevent this,
|
||||
we have made it clear that any patent must be licensed for everyone's
|
||||
free use or not licensed at all.
|
||||
|
||||
Most GNU software, including some libraries, is covered by the
|
||||
ordinary GNU General Public License. This license, the GNU Lesser
|
||||
General Public License, applies to certain designated libraries, and
|
||||
is quite different from the ordinary General Public License. We use
|
||||
this license for certain libraries in order to permit linking those
|
||||
libraries into non-free programs.
|
||||
Most GNU software, including some libraries, is covered by the ordinary
|
||||
GNU General Public License, which was designed for utility programs. This
|
||||
license, the GNU Library General Public License, applies to certain
|
||||
designated libraries. This license is quite different from the ordinary
|
||||
one; be sure to read it in full, and don't assume that anything in it is
|
||||
the same as in the ordinary license.
|
||||
|
||||
When a program is linked with a library, whether statically or using
|
||||
a shared library, the combination of the two is legally speaking a
|
||||
combined work, a derivative of the original library. The ordinary
|
||||
General Public License therefore permits such linking only if the
|
||||
entire combination fits its criteria of freedom. The Lesser General
|
||||
Public License permits more lax criteria for linking other code with
|
||||
the library.
|
||||
The reason we have a separate public license for some libraries is that
|
||||
they blur the distinction we usually make between modifying or adding to a
|
||||
program and simply using it. Linking a program with a library, without
|
||||
changing the library, is in some sense simply using the library, and is
|
||||
analogous to running a utility program or application program. However, in
|
||||
a textual and legal sense, the linked executable is a combined work, a
|
||||
derivative of the original library, and the ordinary General Public License
|
||||
treats it as such.
|
||||
|
||||
We call this license the "Lesser" General Public License because it
|
||||
does Less to protect the user's freedom than the ordinary General
|
||||
Public License. It also provides other free software developers Less
|
||||
of an advantage over competing non-free programs. These disadvantages
|
||||
are the reason we use the ordinary General Public License for many
|
||||
libraries. However, the Lesser license provides advantages in certain
|
||||
special circumstances.
|
||||
Because of this blurred distinction, using the ordinary General
|
||||
Public License for libraries did not effectively promote software
|
||||
sharing, because most developers did not use the libraries. We
|
||||
concluded that weaker conditions might promote sharing better.
|
||||
|
||||
For example, on rare occasions, there may be a special need to
|
||||
encourage the widest possible use of a certain library, so that it becomes
|
||||
a de-facto standard. To achieve this, non-free programs must be
|
||||
allowed to use the library. A more frequent case is that a free
|
||||
library does the same job as widely used non-free libraries. In this
|
||||
case, there is little to gain by limiting the free library to free
|
||||
software only, so we use the Lesser General Public License.
|
||||
|
||||
In other cases, permission to use a particular library in non-free
|
||||
programs enables a greater number of people to use a large body of
|
||||
free software. For example, permission to use the GNU C Library in
|
||||
non-free programs enables many more people to use the whole GNU
|
||||
operating system, as well as its variant, the GNU/Linux operating
|
||||
system.
|
||||
|
||||
Although the Lesser General Public License is Less protective of the
|
||||
users' freedom, it does ensure that the user of a program that is
|
||||
linked with the Library has the freedom and the wherewithal to run
|
||||
that program using a modified version of the Library.
|
||||
However, unrestricted linking of non-free programs would deprive the
|
||||
users of those programs of all benefit from the free status of the
|
||||
libraries themselves. This Library General Public License is intended to
|
||||
permit developers of non-free programs to use free libraries, while
|
||||
preserving your freedom as a user of such programs to change the free
|
||||
libraries that are incorporated in them. (We have not seen how to achieve
|
||||
this as regards changes in header files, but we have achieved it as regards
|
||||
changes in the actual functions of the Library.) The hope is that this
|
||||
will lead to faster development of free libraries.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow. Pay close attention to the difference between a
|
||||
"work based on the library" and a "work that uses the library". The
|
||||
former contains code derived from the library, whereas the latter must
|
||||
be combined with the library in order to run.
|
||||
former contains code derived from the library, while the latter only
|
||||
works together with the library.
|
||||
|
||||
Note that it is possible for a library to be covered by the ordinary
|
||||
General Public License rather than by this special one.
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
GNU LIBRARY GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License Agreement applies to any software library or other
|
||||
program which contains a notice placed by the copyright holder or
|
||||
other authorized party saying it may be distributed under the terms of
|
||||
this Lesser General Public License (also called "this License").
|
||||
Each licensee is addressed as "you".
|
||||
0. This License Agreement applies to any software library which
|
||||
contains a notice placed by the copyright holder or other authorized
|
||||
party saying it may be distributed under the terms of this Library
|
||||
General Public License (also called "this License"). Each licensee is
|
||||
addressed as "you".
|
||||
|
||||
A "library" means a collection of software functions and/or data
|
||||
prepared so as to be conveniently linked with application programs
|
||||
@ -268,7 +255,7 @@ distribute the object code for the work under the terms of Section 6.
|
||||
Any executables containing that work also fall under Section 6,
|
||||
whether or not they are linked directly with the Library itself.
|
||||
|
||||
6. As an exception to the Sections above, you may also combine or
|
||||
6. As an exception to the Sections above, you may also compile or
|
||||
link a "work that uses the Library" with the Library to produce a
|
||||
work containing portions of the Library, and distribute that work
|
||||
under terms of your choice, provided that the terms permit
|
||||
@ -295,31 +282,23 @@ of these things:
|
||||
Library will not necessarily be able to recompile the application
|
||||
to use the modified definitions.)
|
||||
|
||||
b) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (1) uses at run time a
|
||||
copy of the library already present on the user's computer system,
|
||||
rather than copying library functions into the executable, and (2)
|
||||
will operate properly with a modified version of the library, if
|
||||
the user installs one, as long as the modified version is
|
||||
interface-compatible with the version that the work was made with.
|
||||
|
||||
c) Accompany the work with a written offer, valid for at
|
||||
b) Accompany the work with a written offer, valid for at
|
||||
least three years, to give the same user the materials
|
||||
specified in Subsection 6a, above, for a charge no more
|
||||
than the cost of performing this distribution.
|
||||
|
||||
d) If distribution of the work is made by offering access to copy
|
||||
c) If distribution of the work is made by offering access to copy
|
||||
from a designated place, offer equivalent access to copy the above
|
||||
specified materials from the same place.
|
||||
|
||||
e) Verify that the user has already received a copy of these
|
||||
d) Verify that the user has already received a copy of these
|
||||
materials or that you have already sent this user a copy.
|
||||
|
||||
For an executable, the required form of the "work that uses the
|
||||
Library" must include any data and utility programs needed for
|
||||
reproducing the executable from it. However, as a special exception,
|
||||
the materials to be distributed need not include anything that is
|
||||
normally distributed (in either source or binary form) with the major
|
||||
the source code distributed need not include anything that is normally
|
||||
distributed (in either source or binary form) with the major
|
||||
components (compiler, kernel, and so on) of the operating system on
|
||||
which the executable runs, unless that component itself accompanies
|
||||
the executable.
|
||||
@ -368,7 +347,7 @@ Library), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute, link with or modify the Library
|
||||
subject to these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties with
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
11. If, as a consequence of a court judgment or allegation of patent
|
||||
@ -411,7 +390,7 @@ excluded. In such case, this License incorporates the limitation as if
|
||||
written in the body of this License.
|
||||
|
||||
13. The Free Software Foundation may publish revised and/or new
|
||||
versions of the Lesser General Public License from time to time.
|
||||
versions of the Library General Public License from time to time.
|
||||
Such new versions will be similar in spirit to the present version,
|
||||
but may differ in detail to address new problems or concerns.
|
||||
|
||||
@ -457,7 +436,7 @@ DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Libraries
|
||||
Appendix: How to Apply These Terms to Your New Libraries
|
||||
|
||||
If you develop a new library, and you want it to be of the greatest
|
||||
possible use to the public, we recommend making it free software that
|
||||
@ -474,18 +453,18 @@ convey the exclusion of warranty; and each file should have at least the
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
modify it under the terms of the GNU Library General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
version 2 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
Library General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
You should have received a copy of the GNU Library General Public
|
||||
License along with this library; if not, write to the Free
|
||||
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
|
14
MANIFEST.in
14
MANIFEST.in
@ -1,14 +0,0 @@
|
||||
include *.txt
|
||||
include *.rst
|
||||
include *.py
|
||||
include MANIFEST.in
|
||||
include LICENSE
|
||||
include run
|
||||
include tox.ini
|
||||
exclude sshuttle/version.py
|
||||
recursive-include docs *.bat
|
||||
recursive-include docs *.py
|
||||
recursive-include docs *.rst
|
||||
recursive-include docs Makefile
|
||||
recursive-include sshuttle *.py
|
||||
recursive-exclude docs/_build *
|
221
README.md
Normal file
221
README.md
Normal file
@ -0,0 +1,221 @@
|
||||
|
||||
WARNING:
|
||||
On MacOS 10.6 (at least up to 10.6.6), your network will
|
||||
stop responding about 10 minutes after the first time you
|
||||
start sshuttle, because of a MacOS kernel bug relating to
|
||||
arp and the net.inet.ip.scopedroute sysctl. To fix it,
|
||||
just switch your wireless off and on. Sshuttle makes the
|
||||
kernel setting it changes permanent, so this won't happen
|
||||
again, even after a reboot.
|
||||
|
||||
Required Software
|
||||
=================
|
||||
|
||||
- You need PyXAPI, available here:
|
||||
http://www.pps.univ-paris-diderot.fr/~ylg/PyXAPI/
|
||||
- Python 2.x, both locally and the remote system
|
||||
|
||||
|
||||
Additional Suggested Software
|
||||
-----------------------------
|
||||
|
||||
- You may want to need autossh, available in various package management
|
||||
systems
|
||||
|
||||
|
||||
sshuttle: where transparent proxy meets VPN meets ssh
|
||||
=====================================================
|
||||
|
||||
As far as I know, sshuttle is the only program that solves the following
|
||||
common case:
|
||||
|
||||
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
|
||||
|
||||
- You have access to a remote network via ssh.
|
||||
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
|
||||
- The remote network has no VPN, or only stupid/complex VPN
|
||||
protocols (IPsec, PPTP, etc). Or maybe you <i>are</i> the
|
||||
admin and you just got frustrated with the awful state of
|
||||
VPN tools.
|
||||
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has terrible performance (see below).
|
||||
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
- sudo, su, or logged in as root on your client machine.
|
||||
(The server doesn't need admin access.)
|
||||
|
||||
- If you use Linux on your client machine:
|
||||
iptables installed on the client, including at
|
||||
least the iptables DNAT, REDIRECT, and ttl modules.
|
||||
These are installed by default on most Linux distributions.
|
||||
(The server doesn't need iptables and doesn't need to be
|
||||
Linux.)
|
||||
|
||||
- If you use MacOS or BSD on your client machine:
|
||||
Your kernel needs to be compiled with `IPFIREWALL_FORWARD`
|
||||
(MacOS has this by default) and you need to have ipfw
|
||||
available. (The server doesn't need to be MacOS or BSD.)
|
||||
|
||||
|
||||
Obtaining sshuttle
|
||||
------------------
|
||||
|
||||
- First, go get PyXAPI from the link above
|
||||
|
||||
- Clone: `git clone https://github.com/sshuttle/sshuttle.git`
|
||||
|
||||
|
||||
Usage on (Ubuntu) Linux
|
||||
-----------------------
|
||||
|
||||
- `cd packaging; ./make_deb`
|
||||
|
||||
- `sudo dpkg -i ./sshuttle-VERSION.deb`
|
||||
|
||||
- Check out the files in `/etc/sshuttle`; configure them so your tunnel works
|
||||
|
||||
- `sudo service sshuttle start`
|
||||
|
||||
|
||||
Usage on other Linuxes and OSes
|
||||
-------------------------------
|
||||
|
||||
<tt>src/sshuttle -r username@sshserver 0.0.0.0/0 -vv</tt>
|
||||
|
||||
- There is a shortcut for 0.0.0.0/0 for those that value
|
||||
their wrists
|
||||
<tt>src/sshuttle -r username@sshserver 0/0 -vv</tt>
|
||||
|
||||
- If you would also like your DNS queries to be proxied
|
||||
through the DNS server of the server you are connect to:
|
||||
<tt>src/sshuttle --dns -vvr username@sshserver 0/0</tt>
|
||||
|
||||
The above is probably what you want to use to prevent
|
||||
local network attacks such as Firesheep and friends.
|
||||
|
||||
(You may be prompted for one or more passwords; first, the
|
||||
local password to become root using either sudo or su, and
|
||||
then the remote ssh password. Or you might have sudo and ssh set
|
||||
up to not require passwords, in which case you won't be
|
||||
prompted at all.)
|
||||
|
||||
Usage Notes
|
||||
-----------
|
||||
|
||||
That's it! Now your local machine can access the remote network as if you
|
||||
were right there. And if your "client" machine is a router, everyone on
|
||||
your local network can make connections to your remote network.
|
||||
|
||||
You don't need to install sshuttle on the remote server;
|
||||
the remote server just needs to have python available.
|
||||
sshuttle will automatically upload and run its source code
|
||||
to the remote python interpreter.
|
||||
|
||||
This creates a transparent proxy server on your local machine for all IP
|
||||
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
|
||||
you want; use any number of IP addresses or subnets to change which
|
||||
addresses get proxied. Using 0.0.0.0/0 proxies <i>everything</i>, which is
|
||||
interesting if you don't trust the people on your local network.)
|
||||
|
||||
Any TCP session you initiate to one of the proxied IP addresses will be
|
||||
captured by sshuttle and sent over an ssh session to the remote copy of
|
||||
sshuttle, which will then regenerate the connection on that end, and funnel
|
||||
the data back and forth through ssh.
|
||||
|
||||
Fun, right? A poor man's instant VPN, and you don't even have to have
|
||||
admin access on the server.
|
||||
|
||||
|
||||
Theory of Operation
|
||||
-------------------
|
||||
|
||||
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
|
||||
of both, and kind of neither.
|
||||
|
||||
It's like a VPN, since it can forward every port on an entire network, not
|
||||
just ports you specify. Conveniently, it lets you use the "real" IP
|
||||
addresses of each host rather than faking port numbers on localhost.
|
||||
|
||||
On the other hand, the way it *works* is more like ssh port forwarding than
|
||||
a VPN. Normally, a VPN forwards your data one packet at a time, and
|
||||
doesn't care about individual connections; ie. it's "stateless" with respect
|
||||
to the traffic. sshuttle is the opposite of stateless; it tracks every
|
||||
single connection.
|
||||
|
||||
You could compare sshuttle to something like the old <a
|
||||
href="http://en.wikipedia.org/wiki/Slirp">Slirp</a> program, which was a
|
||||
userspace TCP/IP implementation that did something similar. But it
|
||||
operated on a packet-by-packet basis on the client side, reassembling the
|
||||
packets on the server side. That worked okay back in the "real live serial
|
||||
port" days, because serial ports had predictable latency and buffering.
|
||||
|
||||
But you can't safely just forward TCP packets over a TCP session (like ssh),
|
||||
because TCP's performance depends fundamentally on packet loss; it
|
||||
<i>must</i> experience packet loss in order to know when to slow down! At
|
||||
the same time, the outer TCP session (ssh, in this case) is a reliable
|
||||
transport, which means that what you forward through the tunnel <i>never</i>
|
||||
experiences packet loss. The ssh session itself experiences packet loss, of
|
||||
course, but TCP fixes it up and ssh (and thus you) never know the
|
||||
difference. But neither does your inner TCP session, and extremely screwy
|
||||
performance ensues.
|
||||
|
||||
sshuttle assembles the TCP stream locally, multiplexes it statefully over
|
||||
an ssh session, and disassembles it back into packets at the other end. So
|
||||
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
|
||||
safe.
|
||||
|
||||
|
||||
Useless Trivia
|
||||
--------------
|
||||
|
||||
Back in 1998 (12 years ago! Yikes!), I released the first version of <a
|
||||
href="http://alumnit.ca/wiki/?TunnelVisionReadMe">Tunnel Vision</a>, a
|
||||
semi-intelligent VPN client for Linux. Unfortunately, I made two big mistakes:
|
||||
I implemented the key exchange myself (oops), and I ended up doing
|
||||
TCP-over-TCP (double oops). The resulting program worked okay - and people
|
||||
used it for years - but the performance was always a bit funny. And nobody
|
||||
ever found any security flaws in my key exchange, either, but that doesn't
|
||||
mean anything. :)
|
||||
|
||||
The same year, dcoombs and I also released Fast Forward, a proxy server
|
||||
supporting transparent proxying. Among other things, we used it for
|
||||
automatically splitting traffic across more than one Internet connection (a
|
||||
tool we called "Double Vision").
|
||||
|
||||
I was still in university at the time. A couple years after that, one of my
|
||||
professors was working with some graduate students on the technology that
|
||||
would eventually become <a href="http://www.slipstream.com/">Slipstream
|
||||
Internet Acceleration</a>. He asked me to do a contract for him to build an
|
||||
initial prototype of a transparent proxy server for mobile networks. The
|
||||
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
|
||||
packets, you can reduce latency and improve performance vs. just forwarding
|
||||
the packets over a plain VPN or mobile network. (It's unlikely that any of
|
||||
my code has persisted in the Slipstream product today, but the concept is
|
||||
still pretty cool. I'm still horrified that people use plain TCP on
|
||||
complex mobile networks with crazily variable latency, for which it was
|
||||
never really intended.)
|
||||
|
||||
That project I did for Slipstream was what first gave me the idea to merge
|
||||
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
|
||||
program that was the best of all worlds. And here we are, at last, 10 years
|
||||
later. You're welcome.
|
||||
|
||||
--
|
||||
Avery Pennarun <apenwarr@gmail.com>
|
||||
|
||||
Mailing list:
|
||||
Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
|
||||
List archives are at: http://groups.google.com/group/sshuttle
|
49
README.rst
49
README.rst
@ -1,49 +0,0 @@
|
||||
sshuttle: where transparent proxy meets VPN meets ssh
|
||||
=====================================================
|
||||
|
||||
As far as I know, sshuttle is the only program that solves the following
|
||||
common case:
|
||||
|
||||
- Your client machine (or router) is Linux, FreeBSD, MacOS or Windows.
|
||||
|
||||
- You have access to a remote network via ssh.
|
||||
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
|
||||
- The remote network has no VPN, or only stupid/complex VPN
|
||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
||||
admin and you just got frustrated with the awful state of
|
||||
VPN tools.
|
||||
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has `terrible performance`_.
|
||||
|
||||
.. _terrible performance: https://sshuttle.readthedocs.io/en/stable/how-it-works.html
|
||||
|
||||
Obtaining sshuttle
|
||||
------------------
|
||||
|
||||
Please see the documentation_.
|
||||
|
||||
.. _Documentation: https://sshuttle.readthedocs.io/en/stable/installation.html
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
The documentation for the stable version is available at:
|
||||
https://sshuttle.readthedocs.org/
|
||||
|
||||
The documentation for the latest development version is available at:
|
||||
https://sshuttle.readthedocs.org/en/latest/
|
||||
|
||||
|
||||
Running as a service
|
||||
--------------------
|
||||
Sshuttle can also be run as a service and configured using a config management system:
|
||||
https://medium.com/@mike.reider/using-sshuttle-as-a-service-bec2684a65fe
|
@ -1,9 +0,0 @@
|
||||
exclude_dirs:
|
||||
- tests
|
||||
skips:
|
||||
- B101
|
||||
- B104
|
||||
- B404
|
||||
- B603
|
||||
- B606
|
||||
- B607
|
177
docs/Makefile
177
docs/Makefile
@ -1,177 +0,0 @@
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sshuttle.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sshuttle.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/sshuttle"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sshuttle"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
@ -1 +0,0 @@
|
||||
.. include:: ../CHANGES.rst
|
@ -1,11 +0,0 @@
|
||||
Google ChromeOS
|
||||
===============
|
||||
|
||||
Currently there is no built in support for running sshuttle directly on
|
||||
Google ChromeOS/Chromebooks.
|
||||
|
||||
What we can really do is to create a Linux VM with Crostini. In the default
|
||||
stretch/Debian 9 VM, you can then install sshuttle as on any Linux box and
|
||||
it just works, as do xterms and ssvncviewer etc.
|
||||
|
||||
https://www.reddit.com/r/Crostini/wiki/getstarted/crostini-setup-guide
|
261
docs/conf.py
261
docs/conf.py
@ -1,261 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# sshuttle documentation build configuration file, created by
|
||||
# sphinx-quickstart on Sun Jan 17 12:13:47 2016.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
import sshuttle # NOQA
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.todo',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'sshuttle'
|
||||
copyright = '2016, Brian May'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = sshuttle.__version__
|
||||
# The short X.Y version.
|
||||
version = '.'.join(release.split('.')[:2])
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
# keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'furo'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'sshuttledoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
# 'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'sshuttle.tex', 'sshuttle documentation', 'Brian May', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('manpage', 'sshuttle', 'sshuttle documentation', ['Brian May'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'sshuttle', 'sshuttle documentation',
|
||||
'Brian May', 'sshuttle', 'A transparent proxy-based VPN using ssh',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
# texinfo_no_detailmenu = False
|
@ -1,36 +0,0 @@
|
||||
How it works
|
||||
============
|
||||
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
|
||||
of both, and kind of neither.
|
||||
|
||||
It's like a VPN, since it can forward every port on an entire network, not
|
||||
just ports you specify. Conveniently, it lets you use the "real" IP
|
||||
addresses of each host rather than faking port numbers on localhost.
|
||||
|
||||
On the other hand, the way it *works* is more like ssh port forwarding than
|
||||
a VPN. Normally, a VPN forwards your data one packet at a time, and
|
||||
doesn't care about individual connections; ie. it's "stateless" with respect
|
||||
to the traffic. sshuttle is the opposite of stateless; it tracks every
|
||||
single connection.
|
||||
|
||||
You could compare sshuttle to something like the old `Slirp
|
||||
<http://en.wikipedia.org/wiki/Slirp>`_ program, which was a userspace TCP/IP
|
||||
implementation that did something similar. But it operated on a
|
||||
packet-by-packet basis on the client side, reassembling the packets on the
|
||||
server side. That worked okay back in the "real live serial port" days,
|
||||
because serial ports had predictable latency and buffering.
|
||||
|
||||
But you can't safely just forward TCP packets over a TCP session (like ssh),
|
||||
because TCP's performance depends fundamentally on packet loss; it
|
||||
*must* experience packet loss in order to know when to slow down! At
|
||||
the same time, the outer TCP session (ssh, in this case) is a reliable
|
||||
transport, which means that what you forward through the tunnel *never*
|
||||
experiences packet loss. The ssh session itself experiences packet loss, of
|
||||
course, but TCP fixes it up and ssh (and thus you) never know the
|
||||
difference. But neither does your inner TCP session, and extremely screwy
|
||||
performance ensues.
|
||||
|
||||
sshuttle assembles the TCP stream locally, multiplexes it statefully over
|
||||
an ssh session, and disassembles it back into packets at the other end. So
|
||||
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
|
||||
safe.
|
@ -1,28 +0,0 @@
|
||||
sshuttle: where transparent proxy meets VPN meets ssh
|
||||
=====================================================
|
||||
|
||||
:Date: |today|
|
||||
:Version: |version|
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
overview
|
||||
requirements
|
||||
installation
|
||||
usage
|
||||
platform
|
||||
Man Page <manpage>
|
||||
how-it-works
|
||||
support
|
||||
trivia
|
||||
changes
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
@ -1,84 +0,0 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
- Ubuntu 16.04 or later::
|
||||
|
||||
apt-get install sshuttle
|
||||
|
||||
- Debian stretch or later::
|
||||
|
||||
apt-get install sshuttle
|
||||
|
||||
- Arch Linux::
|
||||
|
||||
pacman -S sshuttle
|
||||
|
||||
- Fedora::
|
||||
|
||||
dnf install sshuttle
|
||||
|
||||
- openSUSE::
|
||||
|
||||
zypper in sshuttle
|
||||
|
||||
- Gentoo::
|
||||
|
||||
emerge -av net-proxy/sshuttle
|
||||
|
||||
- NixOS::
|
||||
|
||||
nix-env -iA nixos.sshuttle
|
||||
|
||||
- From PyPI::
|
||||
|
||||
sudo pip install sshuttle
|
||||
|
||||
- Clone::
|
||||
|
||||
git clone https://github.com/sshuttle/sshuttle.git
|
||||
cd sshuttle
|
||||
sudo ./setup.py install
|
||||
|
||||
- FreeBSD::
|
||||
|
||||
# ports
|
||||
cd /usr/ports/net/py-sshuttle && make install clean
|
||||
# pkg
|
||||
pkg install py39-sshuttle
|
||||
|
||||
- OpenBSD::
|
||||
|
||||
pkg_add sshuttle
|
||||
|
||||
- macOS, via MacPorts::
|
||||
|
||||
sudo port selfupdate
|
||||
sudo port install sshuttle
|
||||
|
||||
It is also possible to install into a virtualenv as a non-root user.
|
||||
|
||||
- From PyPI::
|
||||
|
||||
python3 -m venv /tmp/sshuttle
|
||||
. /tmp/sshuttle/bin/activate
|
||||
pip install sshuttle
|
||||
|
||||
- Clone::
|
||||
|
||||
git clone https://github.com/sshuttle/sshuttle.git
|
||||
cd sshuttle
|
||||
python3 -m venv /tmp/sshuttle
|
||||
. /tmp/sshuttle/bin/activate
|
||||
python -m pip install .
|
||||
|
||||
- Homebrew::
|
||||
|
||||
brew install sshuttle
|
||||
|
||||
- Nix::
|
||||
|
||||
nix-shell -p sshuttle
|
||||
|
||||
- Windows::
|
||||
|
||||
pip install sshuttle
|
242
docs/make.bat
242
docs/make.bat
@ -1,242 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\sshuttle.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\sshuttle.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
503
docs/manpage.rst
503
docs/manpage.rst
@ -1,503 +0,0 @@
|
||||
sshuttle
|
||||
========
|
||||
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
**sshuttle** [*options*] **-r** *[username@]sshserver[:port]* \<*subnets* ...\>
|
||||
|
||||
|
||||
Description
|
||||
-----------
|
||||
:program:`sshuttle` allows you to create a VPN connection from your
|
||||
machine to any remote server that you can connect to via ssh, as long
|
||||
as that server has a sufficiently new Python installation.
|
||||
|
||||
To work, you must have root access on the local machine,
|
||||
but you can have a normal account on the server.
|
||||
|
||||
It's valid to run :program:`sshuttle` more than once simultaneously on
|
||||
a single client machine, connecting to a different server
|
||||
every time, so you can be on more than one VPN at once.
|
||||
|
||||
If run on a router, :program:`sshuttle` can forward traffic for your
|
||||
entire subnet to the VPN.
|
||||
|
||||
|
||||
Options
|
||||
-------
|
||||
.. program:: sshuttle
|
||||
|
||||
.. option:: <subnets>
|
||||
|
||||
A list of subnets to route over the VPN, in the form
|
||||
``a.b.c.d[/width][port[-port]]``. Valid examples are 1.2.3.4 (a
|
||||
single IP address) and 1.2.3.4/32 (equivalent to 1.2.3.4),
|
||||
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0 netmask).
|
||||
Specify subnets 0/0 to match all IPv4 addresses and ::/0 to match
|
||||
all IPv6 addresses. Any of the previous examples are also valid if
|
||||
you append a port or a port range, so 1.2.3.4:8000 will only
|
||||
tunnel traffic that has as the destination port 8000 of 1.2.3.4
|
||||
and 1.2.3.0/24:8000-9000 will tunnel traffic going to any port
|
||||
between 8000 and 9000 (inclusive) for all IPs in the 1.2.3.0/24
|
||||
subnet. A hostname can be provided instead of an IP address. If
|
||||
the hostname resolves to multiple IPs, all of the IPs are
|
||||
included. If a width is provided with a hostname, the width is
|
||||
applied to all of the hostnames IPs (if they are all either IPv4
|
||||
or IPv6). Widths cannot be supplied to hostnames that resolve to
|
||||
both IPv4 and IPv6. Valid examples are example.com,
|
||||
example.com:8000, example.com/24, example.com/24:8000 and
|
||||
example.com:8000-9000.
|
||||
|
||||
.. option:: --method <auto|nat|nft|tproxy|pf|ipfw>
|
||||
|
||||
Which firewall method should sshuttle use? For auto, sshuttle attempts to
|
||||
guess the appropriate method depending on what it can find in PATH. The
|
||||
default value is auto.
|
||||
|
||||
.. option:: -l <[ip:]port>, --listen=<[ip:]port>
|
||||
|
||||
Use this ip address and port number as the transparent
|
||||
proxy port. By default :program:`sshuttle` finds an available
|
||||
port automatically and listens on IP 127.0.0.1
|
||||
(localhost), so you don't need to override it, and
|
||||
connections are only proxied from the local machine,
|
||||
not from outside machines. If you want to accept
|
||||
connections from other machines on your network (ie. to
|
||||
run :program:`sshuttle` on a router) try enabling IP Forwarding in
|
||||
your kernel, then using ``--listen 0.0.0.0:0``.
|
||||
You can use any name resolving to an IP address of the machine running
|
||||
:program:`sshuttle`, e.g. ``--listen localhost``.
|
||||
|
||||
For the nft, tproxy and pf methods this can be an IPv6 address. Use
|
||||
this option with comma separated values if required, to provide both
|
||||
IPv4 and IPv6 addresses, e.g. ``--listen 127.0.0.1:0,[::1]:0``.
|
||||
|
||||
.. option:: -H, --auto-hosts
|
||||
|
||||
Scan for remote hostnames and update the local /etc/hosts
|
||||
file with matching entries for as long as the VPN is
|
||||
open. This is nicer than changing your system's DNS
|
||||
(/etc/resolv.conf) settings, for several reasons. First,
|
||||
hostnames are added without domain names attached, so
|
||||
you can ``ssh thatserver`` without worrying if your local
|
||||
domain matches the remote one. Second, if you :program:`sshuttle`
|
||||
into more than one VPN at a time, it's impossible to
|
||||
use more than one DNS server at once anyway, but
|
||||
:program:`sshuttle` correctly merges /etc/hosts entries between
|
||||
all running copies. Third, if you're only routing a
|
||||
few subnets over the VPN, you probably would prefer to
|
||||
keep using your local DNS server for everything else.
|
||||
|
||||
:program:`sshuttle` tries to store a cache of the hostnames in
|
||||
~/.sshuttle.hosts on the remote host. Similarly, it tries to read
|
||||
the file when you later reconnect to the host with --auto-hosts
|
||||
enabled to quickly populate the host list. When troubleshooting
|
||||
this feature, try removing this file on the remote host when
|
||||
sshuttle is not running.
|
||||
|
||||
.. option:: -N, --auto-nets
|
||||
|
||||
In addition to the subnets provided on the command
|
||||
line, ask the server which subnets it thinks we should
|
||||
route, and route those automatically. The suggestions
|
||||
are taken automatically from the server's routing
|
||||
table.
|
||||
|
||||
This feature does not detect IPv6 routes. Specify IPv6 subnets
|
||||
manually. For example, specify the ``::/0`` subnet on the command
|
||||
line to route all IPv6 traffic.
|
||||
|
||||
.. option:: --dns
|
||||
|
||||
Capture local DNS requests and forward to the remote DNS
|
||||
server. All queries to any of the local system's DNS
|
||||
servers (/etc/resolv.conf and, if it exists,
|
||||
/run/systemd/resolve/resolv.conf) will be intercepted and
|
||||
resolved on the remote side of the tunnel instead, there
|
||||
using the DNS specified via the :option:`--to-ns` option,
|
||||
if specified. Only plain DNS traffic sent to these servers
|
||||
on port 53 are captured.
|
||||
|
||||
.. option:: --ns-hosts=<server1[,server2[,server3[...]]]>
|
||||
|
||||
Capture local DNS requests to the specified server(s)
|
||||
and forward to the remote DNS server. Contrary to the
|
||||
:option:`--dns` option, this flag allows to specify the
|
||||
DNS server(s) the queries to which to intercept,
|
||||
instead of intercepting all DNS traffic on the local
|
||||
machine. This can be useful when only certain DNS
|
||||
requests should be resolved on the remote side of the
|
||||
tunnel, e.g. in combination with dnsmasq.
|
||||
|
||||
.. option:: --to-ns=<server>
|
||||
|
||||
The DNS to forward requests to when remote DNS
|
||||
resolution is enabled. If not given, sshuttle will
|
||||
simply resolve using the system configured resolver on
|
||||
the remote side (via /etc/resolv.conf on the remote
|
||||
side).
|
||||
|
||||
.. option:: --python
|
||||
|
||||
Specify the name/path of the remote python interpreter. The
|
||||
default is to use ``python3`` (or ``python``, if ``python3``
|
||||
fails) in the remote system's PATH.
|
||||
|
||||
.. option:: -r <[username@]sshserver[:port]>, --remote=<[username@]sshserver[:port]>
|
||||
|
||||
The remote hostname and optional username and ssh
|
||||
port number to use for connecting to the remote server.
|
||||
For example, example.com, testuser@example.com,
|
||||
testuser@example.com:2222, or example.com:2244. This
|
||||
hostname is passed to ssh, so it will recognize any
|
||||
aliases and settings you may have configured in
|
||||
~/.ssh/config.
|
||||
|
||||
.. option:: -x <subnet>, --exclude=<subnet>
|
||||
|
||||
Explicitly exclude this subnet from forwarding. The
|
||||
format of this option is the same as the ``<subnets>``
|
||||
option. To exclude more than one subnet, specify the
|
||||
``-x`` option more than once. You can say something like
|
||||
``0/0 -x 1.2.3.0/24`` to forward everything except the
|
||||
local subnet over the VPN, for example.
|
||||
|
||||
.. option:: -X <file>, --exclude-from=<file>
|
||||
|
||||
Exclude the subnets specified in a file, one subnet per
|
||||
line. Useful when you have lots of subnets to exclude.
|
||||
|
||||
.. option:: -v, --verbose
|
||||
|
||||
Print more information about the session. This option
|
||||
can be used more than once for increased verbosity. By
|
||||
default, :program:`sshuttle` prints only error messages.
|
||||
|
||||
.. option:: -e, --ssh-cmd
|
||||
|
||||
The command to use to connect to the remote server. The
|
||||
default is just ``ssh``. Use this if your ssh client is
|
||||
in a non-standard location or you want to provide extra
|
||||
options to the ssh command, for example, ``-e 'ssh -v'``.
|
||||
|
||||
.. option:: --remote-shell
|
||||
|
||||
For Windows targets, specify configured remote shell program alternative to defacto posix shell.
|
||||
It would be either ``cmd`` or ``powershell`` unless something like git-bash is in use.
|
||||
|
||||
.. option:: --no-cmd-delimiter
|
||||
|
||||
Do not add a double dash (--) delimiter before invoking Python on
|
||||
the remote host. This option is useful when the ssh command used
|
||||
to connect is a custom command that does not interpret this
|
||||
delimiter correctly.
|
||||
|
||||
.. option:: --seed-hosts
|
||||
|
||||
A comma-separated list of hostnames to use to
|
||||
initialize the :option:`--auto-hosts` scan algorithm.
|
||||
:option:`--auto-hosts` does things like poll netstat output
|
||||
for lists of local hostnames, but can speed things up
|
||||
if you use this option to give it a few names to start
|
||||
from.
|
||||
|
||||
If this option is used *without* :option:`--auto-hosts`,
|
||||
then the listed hostnames will be scanned and added, but
|
||||
no further hostnames will be added.
|
||||
|
||||
.. option:: --no-latency-control
|
||||
|
||||
Sacrifice latency to improve bandwidth benchmarks. ssh
|
||||
uses really big socket buffers, which can overload the
|
||||
connection if you start doing large file transfers,
|
||||
thus making all your other sessions inside the same
|
||||
tunnel go slowly. Normally, :program:`sshuttle` tries to avoid
|
||||
this problem using a "fullness check" that allows only
|
||||
a certain amount of outstanding data to be buffered at
|
||||
a time. But on high-bandwidth links, this can leave a
|
||||
lot of your bandwidth underutilized. It also makes
|
||||
:program:`sshuttle` seem slow in bandwidth benchmarks (benchmarks
|
||||
rarely test ping latency, which is what :program:`sshuttle` is
|
||||
trying to control). This option disables the latency
|
||||
control feature, maximizing bandwidth usage. Use at
|
||||
your own risk.
|
||||
|
||||
.. option:: --latency-buffer-size
|
||||
|
||||
Set the size of the buffer used in latency control. The
|
||||
default is ``32768``. Changing this option allows a compromise
|
||||
to be made between latency and bandwidth without completely
|
||||
disabling latency control (with :option:`--no-latency-control`).
|
||||
|
||||
.. option:: -D, --daemon
|
||||
|
||||
Automatically fork into the background after connecting
|
||||
to the remote server. Implies :option:`--syslog`.
|
||||
|
||||
.. option:: -s <file>, --subnets=<file>
|
||||
|
||||
Include the subnets specified in a file instead of on the
|
||||
command line. One subnet per line.
|
||||
|
||||
.. option:: --syslog
|
||||
|
||||
after connecting, send all log messages to the
|
||||
:manpage:`syslog(3)` service instead of stderr. This is
|
||||
implicit if you use :option:`--daemon`.
|
||||
|
||||
.. option:: --pidfile=<pidfilename>
|
||||
|
||||
when using :option:`--daemon`, save :program:`sshuttle`'s pid to
|
||||
*pidfilename*. The default is ``sshuttle.pid`` in the
|
||||
current directory.
|
||||
|
||||
.. option:: --disable-ipv6
|
||||
|
||||
Disable IPv6 support for methods that support it (nat, nft,
|
||||
tproxy, and pf).
|
||||
|
||||
.. option:: --firewall
|
||||
|
||||
(internal use only) run the firewall manager. This is
|
||||
the only part of :program:`sshuttle` that must run as root. If
|
||||
you start :program:`sshuttle` as a non-root user, it will
|
||||
automatically run ``sudo`` or ``su`` to start the firewall
|
||||
manager, but the core of :program:`sshuttle` still runs as a
|
||||
normal user.
|
||||
|
||||
.. option:: --hostwatch
|
||||
|
||||
(internal use only) run the hostwatch daemon. This
|
||||
process runs on the server side and collects hostnames for
|
||||
the :option:`--auto-hosts` option. Using this option by itself
|
||||
makes it a lot easier to debug and test the :option:`--auto-hosts`
|
||||
feature.
|
||||
|
||||
.. option:: --sudoers-no-modify
|
||||
|
||||
sshuttle prints a configuration to stdout which allows a user to
|
||||
run sshuttle without a password. This option is INSECURE because,
|
||||
with some cleverness, it also allows the user to run any command
|
||||
as root without a password. The output also includes a suggested
|
||||
method for you to install the configuration.
|
||||
|
||||
Use --sudoers-user to modify the user that it applies to.
|
||||
|
||||
.. option:: --sudoers-user
|
||||
|
||||
Set the user name or group with %group_name for passwordless
|
||||
operation. Default is the current user. Set to ALL for all users
|
||||
(NOT RECOMMENDED: See note about security in --sudoers-no-modify
|
||||
documentation above). Only works with the --sudoers-no-modify
|
||||
option.
|
||||
|
||||
.. option:: -t <mark>, --tmark=<mark>
|
||||
|
||||
An option used by the tproxy method: Use the specified traffic
|
||||
mark. The mark must be a hexadecimal value. Defaults to 0x01.
|
||||
|
||||
.. option:: --version
|
||||
|
||||
Print program version.
|
||||
|
||||
|
||||
Configuration File
|
||||
------------------
|
||||
All the options described above can optionally be specified in a configuration
|
||||
file.
|
||||
|
||||
To run :program:`sshuttle` with options defined in, e.g., `/etc/sshuttle.conf`
|
||||
just pass the path to the file preceded by the `@` character, e.g.
|
||||
`@/etc/sshuttle.conf`.
|
||||
|
||||
When running :program:`sshuttle` with options defined in a configuration file,
|
||||
options can still be passed via the command line in addition to what is
|
||||
defined in the file. If a given option is defined both in the file and in
|
||||
the command line, the value in the command line will take precedence.
|
||||
|
||||
Arguments read from a file must be one per line, as shown below::
|
||||
|
||||
value
|
||||
--option1
|
||||
value1
|
||||
--option2
|
||||
value2
|
||||
|
||||
The configuration file supports comments for human-readable
|
||||
annotations. For example::
|
||||
|
||||
# company-internal API
|
||||
8.8.8.8/32
|
||||
# home IoT
|
||||
192.168.63.0/24
|
||||
|
||||
|
||||
Environment Variable
|
||||
--------------------
|
||||
|
||||
You can specify command line options with the `SSHUTTLE_ARGS` environment
|
||||
variable. If a given option is defined in both the environment variable and
|
||||
command line, the value on the command line will take precedence.
|
||||
|
||||
For example::
|
||||
|
||||
SSHUTTLE_ARGS="-e 'ssh -v' --dns" sshuttle -r example.com 0/0
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Use the following command to route all IPv4 TCP traffic through remote
|
||||
(-r) host example.com (and possibly other traffic too, depending on
|
||||
the selected --method). The 0/0 subnet, short for 0.0.0.0/0, matches
|
||||
all IPv4 addresses. The ::/0 subnet, matching all IPv6 addresses could
|
||||
be added to the example. We also exclude (-x) example.com:22 so that
|
||||
we can establish ssh connections from our local machine to the remote
|
||||
host without them being routed through sshuttle. Excluding the remote
|
||||
host may be necessary on some machines for sshuttle to work properly.
|
||||
Press Ctrl+C to exit. To also route DNS queries through sshuttle, try
|
||||
adding --dns. Add or remove -v options to see more or less
|
||||
information::
|
||||
|
||||
$ sshuttle -r example.com -x example.com:22 0/0
|
||||
|
||||
Starting sshuttle proxy (version ...).
|
||||
[local sudo] Password:
|
||||
fw: Starting firewall with Python version 3.9.5
|
||||
fw: ready method name nat.
|
||||
c : IPv6 disabled since it isn't supported by method nat.
|
||||
c : Method: nat
|
||||
c : IPv4: on
|
||||
c : IPv6: off (not available with nat method)
|
||||
c : UDP : off (not available with nat method)
|
||||
c : DNS : off (available)
|
||||
c : User: off (available)
|
||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
||||
c : (<AddressFamily.AF_INET: 2>, '0.0.0.0', 0, 0, 0)
|
||||
c : Subnets to exclude from forwarding:
|
||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
||||
c : TCP redirector listening on ('127.0.0.1', 12299).
|
||||
c : Starting client with Python version 3.9.5
|
||||
c : Connecting to server...
|
||||
user@example.com's password:
|
||||
s: Starting server with Python version 3.6.8
|
||||
s: latency control setting = True
|
||||
s: auto-nets:False
|
||||
c : Connected to server.
|
||||
fw: setting up.
|
||||
fw: iptables -w -t nat -N sshuttle-12299
|
||||
fw: iptables -w -t nat -F sshuttle-12299
|
||||
...
|
||||
Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||
^C
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
||||
|
||||
|
||||
Connect to a remote server, with automatic hostname
|
||||
and subnet guessing::
|
||||
|
||||
$ sshuttle -vNHr example.com -x example.com:22
|
||||
Starting sshuttle proxy (version ...).
|
||||
[local sudo] Password:
|
||||
fw: Starting firewall with Python version 3.9.5
|
||||
fw: ready method name nat.
|
||||
c : IPv6 disabled since it isn't supported by method nat.
|
||||
c : Method: nat
|
||||
c : IPv4: on
|
||||
c : IPv6: off (not available with nat method)
|
||||
c : UDP : off (not available with nat method)
|
||||
c : DNS : off (available)
|
||||
c : User: off (available)
|
||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
||||
c : NOTE: Additional subnets to forward may be added below by --auto-nets.
|
||||
c : Subnets to exclude from forwarding:
|
||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
||||
c : TCP redirector listening on ('127.0.0.1', 12300).
|
||||
c : Starting client with Python version 3.9.5
|
||||
c : Connecting to server...
|
||||
user@example.com's password:
|
||||
s: Starting server with Python version 3.6.8
|
||||
s: latency control setting = True
|
||||
s: auto-nets:True
|
||||
c : Connected to server.
|
||||
c : seed_hosts: []
|
||||
s: available routes:
|
||||
s: 77.141.99.0/24
|
||||
fw: setting up.
|
||||
fw: iptables -w -t nat -N sshuttle-12300
|
||||
fw: iptables -w -t nat -F sshuttle-12300
|
||||
...
|
||||
c : Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||
^C
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
||||
|
||||
Run :program:`sshuttle` with a `/etc/sshuttle.conf` configuration file::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf
|
||||
|
||||
Use the options defined in `/etc/sshuttle.conf` but be more verbose::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf -vvv
|
||||
|
||||
Override the remote server defined in `/etc/sshuttle.conf`::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf -r otheruser@test.example.com
|
||||
|
||||
Example configuration file::
|
||||
|
||||
192.168.0.0/16
|
||||
--remote
|
||||
user@example.com
|
||||
|
||||
|
||||
Discussion
|
||||
----------
|
||||
When it starts, :program:`sshuttle` creates an ssh session to the
|
||||
server specified by the ``-r`` option.
|
||||
|
||||
After connecting to the remote server, :program:`sshuttle` uploads its
|
||||
(python) source code to the remote end and executes it
|
||||
there. Thus, you don't need to install :program:`sshuttle` on the
|
||||
remote server, and there are never :program:`sshuttle` version
|
||||
conflicts between client and server.
|
||||
|
||||
Unlike most VPNs, :program:`sshuttle` forwards sessions, not packets.
|
||||
That is, it uses kernel transparent proxying (`iptables
|
||||
REDIRECT` rules on Linux) to
|
||||
capture outgoing TCP sessions, then creates entirely
|
||||
separate TCP sessions out to the original destination at
|
||||
the other end of the tunnel.
|
||||
|
||||
Packet-level forwarding (eg. using the tun/tap devices on
|
||||
Linux) seems elegant at first, but it results in
|
||||
several problems, notably the 'tcp over tcp' problem. The
|
||||
tcp protocol depends fundamentally on packets being dropped
|
||||
in order to implement its congestion control algorithm; if
|
||||
you pass tcp packets through a tcp-based tunnel (such as
|
||||
ssh), the inner tcp packets will never be dropped, and so
|
||||
the inner tcp stream's congestion control will be
|
||||
completely broken, and performance will be terrible. Thus,
|
||||
packet-based VPNs (such as IPsec and openvpn) cannot use
|
||||
tcp-based encrypted streams like ssh or ssl, and have to
|
||||
implement their own encryption from scratch, which is very
|
||||
complex and error prone.
|
||||
|
||||
:program:`sshuttle`'s simplicity comes from the fact that it can
|
||||
safely use the existing ssh encrypted tunnel without
|
||||
incurring a performance penalty. It does this by letting
|
||||
the client-side kernel manage the incoming tcp stream, and
|
||||
the server-side kernel manage the outgoing tcp stream;
|
||||
there is no need for congestion control to be shared
|
||||
between the two separate streams, so a tcp-based tunnel is
|
||||
fine.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:manpage:`ssh(1)`, :manpage:`python(1)`
|
@ -1,8 +0,0 @@
|
||||
OpenWRT
|
||||
========
|
||||
|
||||
Run::
|
||||
|
||||
opkg install python3 python3-pip iptables-mod-extra iptables-mod-nat-extra iptables-mod-ipopt
|
||||
python3 /usr/bin/pip3 install sshuttle
|
||||
sshuttle -l 0.0.0.0 -r <IP> -x 192.168.1.1 0/0
|
@ -1,26 +0,0 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
As far as I know, sshuttle is the only program that solves the following
|
||||
common case:
|
||||
|
||||
- Your client machine (or router) is Linux, MacOS, FreeBSD, OpenBSD or pfSense.
|
||||
|
||||
- You have access to a remote network via ssh.
|
||||
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
|
||||
- The remote network has no VPN, or only stupid/complex VPN
|
||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
||||
admin and you just got frustrated with the awful state of
|
||||
VPN tools.
|
||||
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has terrible performance (see below).
|
@ -1,12 +0,0 @@
|
||||
Platform Specific Notes
|
||||
=======================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
chromeos
|
||||
tproxy
|
||||
windows
|
||||
openwrt
|
@ -1,97 +0,0 @@
|
||||
Requirements
|
||||
============
|
||||
|
||||
Client side Requirements
|
||||
------------------------
|
||||
|
||||
- sudo, or root access on your client machine.
|
||||
(The server doesn't need admin access.)
|
||||
- Python 3.9 or greater.
|
||||
|
||||
|
||||
Linux with NAT method
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* iptables DNAT and REDIRECT modules. ip6tables for IPv6.
|
||||
|
||||
Linux with nft method
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* nftables
|
||||
|
||||
Linux with TPROXY method
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 UDP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 UDP
|
||||
* IPv6 DNS
|
||||
|
||||
|
||||
MacOS / FreeBSD / OpenBSD / pfSense
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Method: pf
|
||||
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* You need to have the pfctl command.
|
||||
|
||||
Windows
|
||||
~~~~~~~
|
||||
|
||||
Experimental built-in support available. See :doc:`windows` for more information.
|
||||
|
||||
|
||||
Server side Requirements
|
||||
------------------------
|
||||
|
||||
- Python 3.9 or greater.
|
||||
|
||||
|
||||
Additional Suggested Software
|
||||
-----------------------------
|
||||
|
||||
- If you are using systemd, sshuttle can notify it when the connection to
|
||||
the remote end is established and the firewall rules are installed. For
|
||||
this feature to work you must configure the process start-up type for the
|
||||
sshuttle service unit to notify, as shown in the example below.
|
||||
|
||||
.. code-block:: ini
|
||||
:emphasize-lines: 6
|
||||
|
||||
[Unit]
|
||||
Description=sshuttle
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/sshuttle --dns --remote <user>@<server> <subnets...>
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,11 +0,0 @@
|
||||
Support
|
||||
=======
|
||||
|
||||
Mailing list:
|
||||
|
||||
* Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
|
||||
* List archives are at: http://groups.google.com/group/sshuttle
|
||||
|
||||
Issue tracker and pull requests at github:
|
||||
|
||||
* https://github.com/sshuttle/sshuttle
|
@ -1,40 +0,0 @@
|
||||
TPROXY
|
||||
======
|
||||
TPROXY is the only method that supports UDP.
|
||||
|
||||
There are some things you need to consider for TPROXY to work:
|
||||
|
||||
- The following commands need to be run first as root. This only needs to be
|
||||
done once after booting up::
|
||||
|
||||
ip route add local default dev lo table 100
|
||||
ip rule add fwmark {TMARK} lookup 100
|
||||
ip -6 route add local default dev lo table 100
|
||||
ip -6 rule add fwmark {TMARK} lookup 100
|
||||
|
||||
where {TMARK} is the identifier mark passed with -t or --tmark flag
|
||||
as a hexadecimal string (default value is '0x01').
|
||||
|
||||
- The ``--auto-nets`` feature does not detect IPv6 routes automatically. Add IPv6
|
||||
routes manually. e.g. by adding ``'::/0'`` to the end of the command line.
|
||||
|
||||
- The client needs to be run as root. e.g.::
|
||||
|
||||
sudo SSH_AUTH_SOCK="$SSH_AUTH_SOCK" $HOME/tree/sshuttle.tproxy/sshuttle --method=tproxy ...
|
||||
|
||||
- You may need to exclude the IP address of the server you are connecting to.
|
||||
Otherwise sshuttle may attempt to intercept the ssh packets, which will not
|
||||
work. Use the ``--exclude`` parameter for this.
|
||||
|
||||
- You need the ``--method=tproxy`` parameter, as above.
|
||||
|
||||
- The routes for the outgoing packets must already exist. For example, if your
|
||||
connection does not have IPv6 support, no IPv6 routes will exist, IPv6
|
||||
packets will not be generated and sshuttle cannot intercept them::
|
||||
|
||||
telnet -6 www.google.com 80
|
||||
Trying 2404:6800:4001:805::1010...
|
||||
telnet: Unable to connect to remote host: Network is unreachable
|
||||
|
||||
Add some dummy routes to external interfaces. Make sure they get removed
|
||||
however after sshuttle exits.
|
@ -1,35 +0,0 @@
|
||||
Useless Trivia
|
||||
==============
|
||||
This section written by the original author, Avery Pennarun
|
||||
<apenwarr@gmail.com>.
|
||||
|
||||
Back in 1998, I released the first version of `Tunnel
|
||||
Vision <http://alumnit.ca/wiki/?TunnelVisionReadMe>`_, a semi-intelligent VPN
|
||||
client for Linux. Unfortunately, I made two big mistakes: I implemented the
|
||||
key exchange myself (oops), and I ended up doing TCP-over-TCP (double oops).
|
||||
The resulting program worked okay - and people used it for years - but the
|
||||
performance was always a bit funny. And nobody ever found any security flaws
|
||||
in my key exchange, either, but that doesn't mean anything. :)
|
||||
|
||||
The same year, dcoombs and I also released Fast Forward, a proxy server
|
||||
supporting transparent proxying. Among other things, we used it for
|
||||
automatically splitting traffic across more than one Internet connection (a
|
||||
tool we called "Double Vision").
|
||||
|
||||
I was still in university at the time. A couple years after that, one of my
|
||||
professors was working with some graduate students on the technology that would
|
||||
eventually become `Slipstream Internet Acceleration
|
||||
<http://www.slipstream.com/>`_. He asked me to do a contract for him to build
|
||||
an initial prototype of a transparent proxy server for mobile networks. The
|
||||
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
|
||||
packets, you can reduce latency and improve performance vs. just forwarding
|
||||
the packets over a plain VPN or mobile network. (It's unlikely that any of my
|
||||
code has persisted in the Slipstream product today, but the concept is still
|
||||
pretty cool. I'm still horrified that people use plain TCP on complex mobile
|
||||
networks with crazily variable latency, for which it was never really
|
||||
intended.)
|
||||
|
||||
That project I did for Slipstream was what first gave me the idea to merge
|
||||
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
|
||||
program that was the best of all worlds. And here we are, at last.
|
||||
You're welcome.
|
@ -1,93 +0,0 @@
|
||||
Usage
|
||||
=====
|
||||
|
||||
.. note::
|
||||
|
||||
For information on usage with Windows, see the :doc:`windows` section.
|
||||
For information on using the TProxy method, see the :doc:`tproxy` section.
|
||||
|
||||
Forward all traffic::
|
||||
|
||||
sshuttle -r username@sshserver 0.0.0.0/0
|
||||
|
||||
- Use the :option:`sshuttle -r` parameter to specify a remote server.
|
||||
On some systems, you may also need to use the :option:`sshuttle -x`
|
||||
parameter to exclude sshserver or sshserver:22 so that your local
|
||||
machine can communicate directly to sshserver without it being
|
||||
redirected by sshuttle.
|
||||
|
||||
- By default sshuttle will automatically choose a method to use. Override with
|
||||
the :option:`sshuttle --method` parameter.
|
||||
|
||||
- There is a shortcut for 0.0.0.0/0 for those that value
|
||||
their wrists::
|
||||
|
||||
sshuttle -r username@sshserver 0/0
|
||||
|
||||
|
||||
- For 'My VPN broke and need a temporary solution FAST to access local IPv4 addresses'::
|
||||
|
||||
sshuttle --dns -NHr username@sshserver 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
|
||||
|
||||
If you would also like your DNS queries to be proxied
|
||||
through the DNS server of the server you are connect to::
|
||||
|
||||
sshuttle --dns -r username@sshserver 0/0
|
||||
|
||||
The above is probably what you want to use to prevent
|
||||
local network attacks such as Firesheep and friends.
|
||||
See the documentation for the :option:`sshuttle --dns` parameter.
|
||||
|
||||
(You may be prompted for one or more passwords; first, the local password to
|
||||
become root using sudo, and then the remote ssh password. Or you might have
|
||||
sudo and ssh set up to not require passwords, in which case you won't be
|
||||
prompted at all.)
|
||||
|
||||
|
||||
Usage Notes
|
||||
-----------
|
||||
That's it! Now your local machine can access the remote network as if you
|
||||
were right there. And if your "client" machine is a router, everyone on
|
||||
your local network can make connections to your remote network.
|
||||
|
||||
You don't need to install sshuttle on the remote server;
|
||||
the remote server just needs to have python available.
|
||||
sshuttle will automatically upload and run its source code
|
||||
to the remote python interpreter.
|
||||
|
||||
This creates a transparent proxy server on your local machine for all IP
|
||||
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
|
||||
you want; use any number of IP addresses or subnets to change which
|
||||
addresses get proxied. Using 0.0.0.0/0 proxies *everything*, which is
|
||||
interesting if you don't trust the people on your local network.)
|
||||
|
||||
Any TCP session you initiate to one of the proxied IP addresses will be
|
||||
captured by sshuttle and sent over an ssh session to the remote copy of
|
||||
sshuttle, which will then regenerate the connection on that end, and funnel
|
||||
the data back and forth through ssh.
|
||||
|
||||
Fun, right? A poor man's instant VPN, and you don't even have to have
|
||||
admin access on the server.
|
||||
|
||||
Sudoers File
|
||||
------------
|
||||
|
||||
sshuttle can generate a sudoers.d file for Linux and MacOS. This
|
||||
allows one or more users to run sshuttle without entering the
|
||||
local sudo password. **WARNING:** This option is *insecure*
|
||||
because, with some cleverness, it also allows these users to run any
|
||||
command (via the --ssh-cmd option) as root without a password.
|
||||
|
||||
To print a sudo configuration file and see a suggested way to install it, run::
|
||||
|
||||
sshuttle --sudoers-no-modify
|
||||
|
||||
A custom user or group can be set with the
|
||||
:option:`sshuttle --sudoers-no-modify --sudoers-user {user_descriptor}`
|
||||
option. Valid values for this vary based on how your system is configured.
|
||||
Values such as usernames, groups prepended with `%` and sudoers user
|
||||
aliases will work. See the sudoers manual for more information on valid
|
||||
user-specified actions. The option must be used with `--sudoers-no-modify`::
|
||||
|
||||
sshuttle --sudoers-no-modify --sudoers-user mike
|
||||
sshuttle --sudoers-no-modify --sudoers-user %sudo
|
@ -1,28 +0,0 @@
|
||||
Microsoft Windows
|
||||
=================
|
||||
|
||||
Experimental native support::
|
||||
|
||||
Experimental built-in support for Windows is available through `windivert` method.
|
||||
You have to install https://pypi.org/project/pydivert package. You need Administrator privileges to use windivert method
|
||||
|
||||
Notes
|
||||
- sshuttle should be executed from admin shell (Automatic firewall process admin elevation is not available)
|
||||
- TCP/IPv4 supported (IPv6/UDP/DNS are not available)
|
||||
|
||||
Use Linux VM on Windows::
|
||||
|
||||
What we can really do is to create a Linux VM with Vagrant (or simply
|
||||
Virtualbox if you like). In the Vagrant settings, remember to turn on bridged
|
||||
NIC. Then, run sshuttle inside the VM like below::
|
||||
|
||||
sshuttle -l 0.0.0.0 -x 10.0.0.0/8 -x 192.168.0.0/16 0/0
|
||||
|
||||
10.0.0.0/8 excludes NAT traffic of Vagrant and 192.168.0.0/16 excludes
|
||||
traffic to local area network (assuming that we're using 192.168.0.0 subnet).
|
||||
|
||||
Assuming the VM has the IP 192.168.1.200 obtained on the bridge NIC (we can
|
||||
configure that in Vagrant), we can then ask Windows to route all its traffic
|
||||
via the VM by running the following in cmd.exe with admin right::
|
||||
|
||||
route add 0.0.0.0 mask 0.0.0.0 192.168.1.200
|
133
flake.lock
generated
133
flake.lock
generated
@ -1,133 +0,0 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1740743217,
|
||||
"narHash": "sha256-brsCRzLqimpyhORma84c3W2xPbIidZlIc3JGIuQVSNI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b27ba4eb322d9d2bf2dc9ada9fd59442f50c8d7c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pyproject-build-systems": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"pyproject-nix": [
|
||||
"pyproject-nix"
|
||||
],
|
||||
"uv2nix": [
|
||||
"uv2nix"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1740362541,
|
||||
"narHash": "sha256-S8Mno07MspggOv/xIz5g8hB2b/C5HPiX8E+rXzKY+5U=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "build-system-pkgs",
|
||||
"rev": "e151741c848ba92331af91f4e47640a1fb82be19",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "build-system-pkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pyproject-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1739758351,
|
||||
"narHash": "sha256-Aoa4dEoC7Hf6+gFVk/SDquZTMFlmlfsgdTWuqQxzePs=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "pyproject.nix",
|
||||
"rev": "1329712f7f9af3a8b270764ba338a455b7323811",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "pyproject.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"pyproject-build-systems": "pyproject-build-systems",
|
||||
"pyproject-nix": "pyproject-nix",
|
||||
"uv2nix": "uv2nix"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"uv2nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"pyproject-nix": [
|
||||
"pyproject-nix"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1740497536,
|
||||
"narHash": "sha256-K+8wsVooqhaqyxuvew3+62mgOfRLJ7whv7woqPU3Ypo=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "uv2nix",
|
||||
"rev": "d01fd3a141755ad5d5b93dd9fcbd76d6401f5bac",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "uv2nix",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
117
flake.nix
117
flake.nix
@ -1,117 +0,0 @@
|
||||
{
|
||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling.";
|
||||
|
||||
inputs = {
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
pyproject-nix = {
|
||||
url = "github:pyproject-nix/pyproject.nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
uv2nix = {
|
||||
url = "github:pyproject-nix/uv2nix";
|
||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
pyproject-build-systems = {
|
||||
url = "github:pyproject-nix/build-system-pkgs";
|
||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
||||
inputs.uv2nix.follows = "uv2nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
{
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
pyproject-nix,
|
||||
uv2nix,
|
||||
pyproject-build-systems,
|
||||
}:
|
||||
flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
inherit (nixpkgs) lib;
|
||||
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
python = pkgs.python312;
|
||||
|
||||
workspace = uv2nix.lib.workspace.loadWorkspace { workspaceRoot = ./.; };
|
||||
|
||||
# Create package overlay from workspace.
|
||||
overlay = workspace.mkPyprojectOverlay {
|
||||
sourcePreference = "sdist";
|
||||
};
|
||||
|
||||
# Extend generated overlay with build fixups
|
||||
#
|
||||
# Uv2nix can only work with what it has, and uv.lock is missing essential metadata to perform some builds.
|
||||
# This is an additional overlay implementing build fixups.
|
||||
# See:
|
||||
# - https://pyproject-nix.github.io/uv2nix/FAQ.html
|
||||
pyprojectOverrides =
|
||||
final: prev:
|
||||
# Implement build fixups here.
|
||||
# Note that uv2nix is _not_ using Nixpkgs buildPythonPackage.
|
||||
# It's using https://pyproject-nix.github.io/pyproject.nix/build.html
|
||||
let
|
||||
inherit (final) resolveBuildSystem;
|
||||
inherit (builtins) mapAttrs;
|
||||
|
||||
# Build system dependencies specified in the shape expected by resolveBuildSystem
|
||||
# The empty lists below are lists of optional dependencies.
|
||||
#
|
||||
# A package `foo` with specification written as:
|
||||
# `setuptools-scm[toml]` in pyproject.toml would be written as
|
||||
# `foo.setuptools-scm = [ "toml" ]` in Nix
|
||||
buildSystemOverrides = {
|
||||
chardet.setuptools = [ ];
|
||||
colorlog.setuptools = [ ];
|
||||
python-debian.setuptools = [ ];
|
||||
pluggy.setuptools = [ ];
|
||||
pathspec.flit-core = [ ];
|
||||
packaging.flit-core = [ ];
|
||||
};
|
||||
|
||||
in
|
||||
mapAttrs (
|
||||
name: spec:
|
||||
prev.${name}.overrideAttrs (old: {
|
||||
nativeBuildInputs = old.nativeBuildInputs ++ resolveBuildSystem spec;
|
||||
})
|
||||
) buildSystemOverrides;
|
||||
|
||||
pythonSet =
|
||||
(pkgs.callPackage pyproject-nix.build.packages {
|
||||
inherit python;
|
||||
}).overrideScope
|
||||
(
|
||||
lib.composeManyExtensions [
|
||||
pyproject-build-systems.overlays.default
|
||||
overlay
|
||||
pyprojectOverrides
|
||||
]
|
||||
);
|
||||
|
||||
inherit (pkgs.callPackages pyproject-nix.build.util { }) mkApplication;
|
||||
package = mkApplication {
|
||||
venv = pythonSet.mkVirtualEnv "sshuttle" workspace.deps.default;
|
||||
package = pythonSet.sshuttle;
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
sshuttle = package;
|
||||
default = package;
|
||||
};
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = [
|
||||
pkgs.uv
|
||||
];
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
28
packaging/control
Normal file
28
packaging/control
Normal file
@ -0,0 +1,28 @@
|
||||
Package: sshuttle
|
||||
Version: 0+git
|
||||
Architecture: all
|
||||
Maintainer: Jim Wyllie <jwyllie83@gmail.com>
|
||||
Depends: iptables, python (>= 2.6)
|
||||
Suggests: autossh
|
||||
Section: net
|
||||
Priority: optional
|
||||
Homepage: http://github.com/sshuttle/sshuttle
|
||||
Description: "Full-featured" VPN over an SSH tunnel
|
||||
It allows full remote access somewhere where all you have is an SSH
|
||||
connection. It works well if you generally find yourself in the
|
||||
following situation:
|
||||
.
|
||||
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
|
||||
- You have access to a remote network via ssh.
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
- You do not wish to, or can't, use other VPN software
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has suboptimal performance
|
||||
.
|
||||
It also has hooks for more complicated setups (VPN-in-a-SSH-VPN, etc.) to allow
|
||||
you to set it up as you like.
|
46
packaging/make_deb
Executable file
46
packaging/make_deb
Executable file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script puts together a .deb package suitable for installing on an Ubuntu
|
||||
# system
|
||||
|
||||
B="/tmp/sshuttle/build"
|
||||
|
||||
if [ ! -x /usr/bin/dpkg ]; then
|
||||
echo 'Unable to build: dpkg not found on system'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the new directory structure
|
||||
mkdir -p ${B}/etc/sshuttle/pre-start.d
|
||||
mkdir -p ${B}/etc/sshuttle/post-stop.d
|
||||
mkdir -p ${B}/usr/share/sshuttle
|
||||
mkdir -p ${B}/usr/bin
|
||||
mkdir -p ${B}/etc/init
|
||||
mkdir -p ${B}/DEBIAN
|
||||
|
||||
# Copy over all of the files
|
||||
cp -r ../src/* ${B}/usr/share/sshuttle
|
||||
cp ../src/sshuttle ${B}/usr/bin
|
||||
cp -r sshuttle.conf ${B}/etc/init
|
||||
cp prefixes.conf ${B}/etc/sshuttle
|
||||
cp tunnel.conf ${B}/etc/sshuttle
|
||||
# Remove MacOS X stuff from .deb
|
||||
rm -r ${B}/usr/share/sshuttle/ui-macos
|
||||
|
||||
# Fix path to main.py
|
||||
sed -e 's:^DIR=.*$:DIR=/usr/share/sshuttle/:' -i ${B}/usr/bin/sshuttle
|
||||
|
||||
# Copy the control file over, as well
|
||||
cp control ${B}/DEBIAN
|
||||
|
||||
# Create the md5sum manifest
|
||||
if [ -x /usr/bin/md5sum ]; then
|
||||
cd ${B}
|
||||
find . -type f | egrep -v DEBIAN | sed -re 's/^..//' | xargs md5sum > ${B}/DEBIAN/md5sums
|
||||
cd ${OLDPWD}
|
||||
fi
|
||||
|
||||
# Build the debian package
|
||||
VERSION=$(egrep -e '^Version' control | sed -re 's/^[^:]*: //')
|
||||
dpkg --build ${B} ./sshuttle-${VERSION}.deb
|
||||
rm -rf ${B}
|
5
packaging/prefixes.conf
Normal file
5
packaging/prefixes.conf
Normal file
@ -0,0 +1,5 @@
|
||||
# Output prefixes here, one per line. Prefix is in:
|
||||
# prefix/netmask format
|
||||
# Like this:
|
||||
# 192.168.0.0/16
|
||||
# 192.0.43.10/32
|
90
packaging/sshuttle.conf
Normal file
90
packaging/sshuttle.conf
Normal file
@ -0,0 +1,90 @@
|
||||
description "Create a transparent proxy over SSH"
|
||||
author "Jim Wyllie <jwyllie83@gmail.com>"
|
||||
|
||||
manual
|
||||
nice -5
|
||||
|
||||
# Edit this file with network prefixes that should be loaded through the SSH
|
||||
# tunnel.
|
||||
env PREFIX_LOCATION=/etc/sshuttle/prefixes.conf
|
||||
|
||||
# Routing table; defaults to 100
|
||||
env ROUTE_TABLE=100
|
||||
|
||||
# fwmark; defaults to 1
|
||||
env FWMARK=1
|
||||
|
||||
# SSH tunnel configuration file
|
||||
env SSHUTTLE_TUNNEL_FILE=/etc/sshuttle/tunnel.conf
|
||||
|
||||
# File containing the tunnel proxy name / host / whatever
|
||||
env TUNNEL_PROXY="/etc/sshuttle/tunnel.conf"
|
||||
|
||||
# Any other commands needed to run before or after loading the SSH tunnel.
|
||||
# This is where you can put any of your hacks to set up tunnels-in-tunnels,
|
||||
# etc. Scripts in this directory are executed in order.
|
||||
env MISC_START_DIR=/etc/sshuttle/pre-start.d
|
||||
env MISC_STOP_DIR=/etc/sshuttle/post-stop.d
|
||||
|
||||
start on (local-filesystems and net-device-up IFACE!=lo)
|
||||
stop on stopping network-services
|
||||
|
||||
#respawn
|
||||
|
||||
pre-start script
|
||||
# Make sure we have created the routes
|
||||
sudo ip rule add fwmark ${FWMARK} lookup ${ROUTE_TABLE}
|
||||
logger "Starting sshuttle..."
|
||||
|
||||
if [ -f "${PREFIX_LOCATION}" ]; then
|
||||
cat "${PREFIX_LOCATION}" | while read ROUTE; do
|
||||
|
||||
# Skip comments
|
||||
if [ -n "$(echo ${ROUTE} | egrep "^[ ]*#")" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Skip empty lines
|
||||
if [ -z "${ROUTE}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
logger "Adding route: ${ROUTE}"
|
||||
ip route add local ${ROUTE} dev lo table ${ROUTE_TABLE}
|
||||
done
|
||||
fi
|
||||
|
||||
for RUNFILE in ${MISC_START_DIR}/*; do
|
||||
logger "Executing ${RUNFILE}"
|
||||
/bin/sh -c "${RUNFILE}"
|
||||
done
|
||||
end script
|
||||
|
||||
post-stop script
|
||||
if [ -f "${PREFIX_LOCATION}" ]; then
|
||||
cat "${PREFIX_LOCATION}" | while read ROUTE; do
|
||||
|
||||
# Skip comments
|
||||
if [ -n "$(echo ${ROUTE} | egrep "^[ ]*#")" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Skip empty lines
|
||||
if [ -z "${ROUTE}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
logger "Deleting route: ${ROUTE}"
|
||||
ip route del local ${ROUTE} dev lo table ${ROUTE_TABLE}
|
||||
done
|
||||
fi
|
||||
|
||||
ip rule del fwmark ${FWMARK}
|
||||
|
||||
for RUNFILE in "${MISC_STOP_DIR}/*"; do
|
||||
logger "Executing ${RUNFILE}"
|
||||
/bin/sh -c "${RUNFILE}"
|
||||
done
|
||||
end script
|
||||
|
||||
exec /usr/bin/sshuttle --dns --method=tproxy --listen 0.0.0.0 --remote sshuttle_tunnel -s /etc/sshuttle/prefixes.conf -e "ssh -F ${TUNNEL_PROXY}"
|
19
packaging/tunnel.conf
Normal file
19
packaging/tunnel.conf
Normal file
@ -0,0 +1,19 @@
|
||||
# Here is where you can specify any SSH tunnel options See ssh_config(5) for
|
||||
# details. You need to leave the Host line intact, but everything else can
|
||||
# specify whatever you want
|
||||
Host sshuttle_tunnel
|
||||
|
||||
# REQUIRED: Set this to be the host to which you would like to connect your
|
||||
# tunnel
|
||||
#Hostname localhost
|
||||
|
||||
# REQUIRED: Set this to be the target SSH user on the remote system
|
||||
#User foo
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# The rest are all optional; see ssh_config(5) for the full list of what can
|
||||
# be specified. Some very commonly needed ones are below.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# SSH key used for connecting
|
||||
#IdentityFile /path/to/key
|
@ -1,57 +0,0 @@
|
||||
[project]
|
||||
authors = [
|
||||
{name = "Brian May", email = "brian@linuxpenguins.xyz"},
|
||||
]
|
||||
license = {text = "LGPL-2.1"}
|
||||
requires-python = "<4.0,>=3.9"
|
||||
dependencies = []
|
||||
name = "sshuttle"
|
||||
version = "1.3.1"
|
||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling."
|
||||
readme = "README.rst"
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: End Users/Desktop",
|
||||
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Topic :: System :: Networking",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
sshuttle = "sshuttle.cmdline:main"
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest<9.0.0,>=8.0.1",
|
||||
"pytest-cov<7.0,>=4.1",
|
||||
"flake8<8.0.0,>=7.0.0",
|
||||
"pyflakes<4.0.0,>=3.2.0",
|
||||
"bump2version<2.0.0,>=1.0.1",
|
||||
"twine<7,>=5",
|
||||
"black>=25.1.0",
|
||||
"jedi-language-server>=0.44.0",
|
||||
"pylsp-mypy>=0.7.0",
|
||||
"python-lsp-server>=1.12.2",
|
||||
"ruff>=0.11.2",
|
||||
]
|
||||
docs = [
|
||||
"sphinx==8.1.3; python_version ~= \"3.10\"",
|
||||
"furo==2024.8.6",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
default-groups = []
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.sdist]
|
||||
exclude = [
|
||||
"/.jj"
|
||||
]
|
15
run
15
run
@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
export PYTHONPATH="$(dirname "$0"):$PYTHONPATH"
|
||||
export PATH="$(dirname "$0")/bin:$PATH"
|
||||
|
||||
python_best_version() {
|
||||
if [ -x "$(command -v python3)" ] &&
|
||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
||||
exec python3 "$@"
|
||||
else
|
||||
exec python "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
python_best_version -m "sshuttle" "$@"
|
@ -1,39 +0,0 @@
|
||||
# https://hub.docker.com/r/linuxserver/openssh-server/
|
||||
ARG BASE_IMAGE=docker.io/linuxserver/openssh-server:version-9.3_p2-r1
|
||||
|
||||
FROM ${BASE_IMAGE} as pyenv
|
||||
|
||||
# https://github.com/pyenv/pyenv/wiki#suggested-build-environment
|
||||
RUN apk add --no-cache build-base git libffi-dev openssl-dev bzip2-dev zlib-dev readline-dev sqlite-dev
|
||||
ENV PYENV_ROOT=/pyenv
|
||||
RUN curl https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
|
||||
RUN /pyenv/bin/pyenv install 3.10
|
||||
RUN /pyenv/bin/pyenv install 3.11
|
||||
RUN /pyenv/bin/pyenv install 3.12
|
||||
RUN bash -xc 'rm -rf /pyenv/{.git,plugins} /pyenv/versions/*/lib/*/{test,config,config-*linux-gnu}' && \
|
||||
find /pyenv -type d -name __pycache__ -exec rm -rf {} + && \
|
||||
find /pyenv -type f -name '*.py[co]' -delete
|
||||
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
RUN apk add --no-cache bash nginx iperf3
|
||||
|
||||
# pyenv setup
|
||||
ENV PYENV_ROOT=/pyenv
|
||||
ENV PATH=/pyenv/shims:/pyenv/bin:$PATH
|
||||
COPY --from=pyenv /pyenv /pyenv
|
||||
|
||||
# OpenSSH Server variables
|
||||
ENV PUID=1000
|
||||
ENV PGID=1000
|
||||
ENV PASSWORD_ACCESS=true
|
||||
ENV USER_NAME=test
|
||||
ENV USER_PASSWORD=test
|
||||
ENV LOG_STDOUT=true
|
||||
|
||||
# suppress linuxserver.io logo printing, chnage sshd config
|
||||
RUN sed -i '1 a exec &>/dev/null' /etc/s6-overlay/s6-rc.d/init-adduser/run
|
||||
|
||||
# https://www.linuxserver.io/blog/2019-09-14-customizing-our-containers
|
||||
# To customize the container and start other components
|
||||
COPY container.setup.sh /custom-cont-init.d/setup.sh
|
@ -1,21 +0,0 @@
|
||||
# Container based test bed for sshuttle
|
||||
|
||||
```bash
|
||||
test-bed up -d # start containers
|
||||
|
||||
exec-sshuttle <node-id> [--copy-id] [--server-py=2.7|3.10] [--client-py=2.7|3.10] [--sshuttle-bin=/path/to/sshuttle] [sshuttle-args...]
|
||||
# --copy-id -> optionally do ssh-copy-id to make it passwordless for future runs
|
||||
# --sshuttle-bin -> use another sshuttle binary instead of one from dev setup
|
||||
# --server-py -> Python version to use in server. (manged by pyenv)
|
||||
# --client-py -> Python version to use in client (manged by pyenv)
|
||||
|
||||
exec-sshuttle node-1 # start sshuttle to connect to node-1
|
||||
|
||||
exec-tool curl node-1 # curl to nginx instance running on node1 via IP that is only reachable via sshuttle
|
||||
exec-tool iperf3 node-1 # measure throughput to node-1
|
||||
|
||||
run-benchmark node-1 --client-py=3.10
|
||||
|
||||
```
|
||||
|
||||
<https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows>
|
@ -1,34 +0,0 @@
|
||||
name: sshuttle-testbed
|
||||
|
||||
services:
|
||||
node-1:
|
||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
||||
container_name: sshuttle-testbed-node-1
|
||||
hostname: node-1
|
||||
cap_add:
|
||||
- "NET_ADMIN"
|
||||
environment:
|
||||
- ADD_IP_ADDRESSES=10.55.1.77/24
|
||||
networks:
|
||||
default:
|
||||
ipv6_address: 2001:0DB8::551
|
||||
node-2:
|
||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
||||
container_name: sshuttle-testbed-node-2
|
||||
hostname: node-2
|
||||
cap_add:
|
||||
- "NET_ADMIN"
|
||||
environment:
|
||||
- ADD_IP_ADDRESSES=10.55.2.77/32
|
||||
networks:
|
||||
default:
|
||||
ipv6_address: 2001:0DB8::552
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
enable_ipv6: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 2001:0DB8::/112
|
||||
# internal: true
|
@ -1,65 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
log ">>> Setting up $(hostname) | id: $(id)\nIP:\n$(ip a)\nRoutes:\n$(ip r)\npyenv:\n$(pyenv versions)"
|
||||
|
||||
echo "
|
||||
AcceptEnv PYENV_VERSION
|
||||
" >> /etc/ssh/sshd_config
|
||||
|
||||
iface="$(ip route | awk '/default/ { print $5 }')"
|
||||
default_gw="$(ip route | awk '/default/ { print $3 }')"
|
||||
for addr in ${ADD_IP_ADDRESSES//,/ }; do
|
||||
log ">>> Adding $addr to interface $iface"
|
||||
net_addr=$(ipcalc -n "$addr" | awk -F= '{print $2}')
|
||||
with_set_x ip addr add "$addr" dev "$iface"
|
||||
with_set_x ip route add "$net_addr" via "$default_gw" dev "$iface" # so that sshuttle -N can discover routes
|
||||
done
|
||||
|
||||
log ">>> Starting iperf3 server"
|
||||
iperf3 --server --port 5001 &
|
||||
|
||||
mkdir -p /www
|
||||
echo "<h5>Hello from $(hostname)</h5>
|
||||
<pre>
|
||||
<u>ip address</u>
|
||||
$(ip address)
|
||||
<u>ip route</u>
|
||||
$(ip route)
|
||||
</pre>" >/www/index.html
|
||||
echo "
|
||||
daemon off;
|
||||
worker_processes 1;
|
||||
error_log /dev/stdout info;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
server {
|
||||
access_log /dev/stdout;
|
||||
listen 8080 default_server;
|
||||
listen [::]:8080 default_server;
|
||||
root /www;
|
||||
}
|
||||
}" >/etc/nginx/nginx.conf
|
||||
|
||||
log ">>> Starting nginx"
|
||||
nginx &
|
@ -1,159 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export MSYS_NO_PATHCONV=1
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
ssh_cmd='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
|
||||
ssh_copy_id=false
|
||||
args=()
|
||||
subnet_args=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg=$1
|
||||
shift
|
||||
case "$arg" in
|
||||
-v|-vv*)
|
||||
ssh_cmd+=" -v"
|
||||
args+=("$arg")
|
||||
;;
|
||||
-r)
|
||||
args+=("-r" "$1")
|
||||
shift
|
||||
;;
|
||||
--copy-id)
|
||||
ssh_copy_id=true
|
||||
;;
|
||||
--server-py=*)
|
||||
server_pyenv_ver="${arg#*=}"
|
||||
;;
|
||||
--client-py=*)
|
||||
client_pyenv_ver="${arg#*=}"
|
||||
;;
|
||||
-6)
|
||||
ipv6_only=true
|
||||
;;
|
||||
--sshuttle-bin=*)
|
||||
sshuttle_bin="${arg#*=}"
|
||||
;;
|
||||
-N|*/*)
|
||||
subnet_args+=("$arg")
|
||||
;;
|
||||
-*)
|
||||
args+=("$arg")
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$target" ]]; then
|
||||
target=$arg
|
||||
else
|
||||
args+=("$arg")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [[ ${#subnet_args[@]} -eq 0 ]]; then
|
||||
subnet_args=("-N")
|
||||
fi
|
||||
|
||||
if [[ $target == node-* ]]; then
|
||||
log "Target is a a test-bed node"
|
||||
port="2222"
|
||||
user_part="test:test"
|
||||
host=$("$(dirname "$0")/test-bed" get-ip "$target")
|
||||
index=${target#node-}
|
||||
if [[ $ipv6_only == true ]]; then
|
||||
args+=("2001:0DB8::/112")
|
||||
else
|
||||
args+=("10.55.$index.0/24")
|
||||
fi
|
||||
target="$user_part@$host:$port"
|
||||
if ! command -v sshpass >/dev/null; then
|
||||
log "sshpass is not found. You might have to manually enter ssh password: 'test'"
|
||||
fi
|
||||
if [[ -z $server_pyenv_ver ]]; then
|
||||
log "server-py argumwnt is not specified. Setting it to 3.8"
|
||||
server_pyenv_ver="3.8"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n $server_pyenv_ver ]]; then
|
||||
log "Would pass PYENV_VERRSION=$server_pyenv_ver to server. pyenv is required on server to make it work"
|
||||
pycmd="/pyenv/shims/python"
|
||||
ssh_cmd+=" -o SetEnv=PYENV_VERSION=${server_pyenv_ver:-'3'}"
|
||||
args=("--python=$pycmd" "${args[@]}")
|
||||
fi
|
||||
|
||||
if [[ $ssh_copy_id == true ]]; then
|
||||
log "Trying to make it passwordless"
|
||||
if [[ $target == *@* ]]; then
|
||||
user_part="${target%%@*}"
|
||||
host_part="${target#*@}"
|
||||
else
|
||||
user_part="$(whoami)"
|
||||
host_part="$target"
|
||||
fi
|
||||
if [[ $host_part == *:* ]]; then
|
||||
host="${host_part%:*}"
|
||||
port="${host_part#*:}"
|
||||
else
|
||||
host="$host_part"
|
||||
port="22"
|
||||
fi
|
||||
if [[ $user_part == *:* ]]; then
|
||||
user="${user_part%:*}"
|
||||
password="${user_part#*:}"
|
||||
else
|
||||
user="$user_part"
|
||||
password=""
|
||||
fi
|
||||
cmd=(ssh-copy-id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p "$port" "$user@$host")
|
||||
if [[ -n $password ]] && command -v sshpass >/dev/null; then
|
||||
cmd=(sshpass -p "$password" "${cmd[@]}")
|
||||
fi
|
||||
with_set_x "${cmd[@]}"
|
||||
fi
|
||||
|
||||
if [[ -z $sshuttle_bin || "$sshuttle_bin" == dev ]]; then
|
||||
cd "$(dirname "$0")/.."
|
||||
export PYTHONPATH="."
|
||||
if [[ -n $client_pyenv_ver ]]; then
|
||||
log "Using pyenv version: $client_pyenv_ver"
|
||||
command -v pyenv &>/dev/null || log "You have to install pyenv to use --client-py" && exit 1
|
||||
sshuttle_cmd=(/usr/bin/env PYENV_VERSION="$client_pyenv_ver" pyenv exec python -m sshuttle)
|
||||
else
|
||||
log "Using best python version availble"
|
||||
if [ -x "$(command -v python3)" ] &&
|
||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
||||
sshuttle_cmd=(python3 -m sshuttle)
|
||||
else
|
||||
sshuttle_cmd=(python -m sshuttle)
|
||||
fi
|
||||
fi
|
||||
else
|
||||
[[ -n $client_pyenv_ver ]] && log "Can't specify --client-py when --sshuttle-bin is specified" && exit 1
|
||||
sshuttle_cmd=("$sshuttle_bin")
|
||||
fi
|
||||
|
||||
if [[ " ${args[*]} " != *" --ssh-cmd "* ]]; then
|
||||
args=("--ssh-cmd" "$ssh_cmd" "${args[@]}")
|
||||
fi
|
||||
|
||||
if [[ " ${args[*]} " != *" -r "* ]]; then
|
||||
args=("-r" "$target" "${args[@]}")
|
||||
fi
|
||||
|
||||
set -x
|
||||
"${sshuttle_cmd[@]}" --version
|
||||
exec "${sshuttle_cmd[@]}" "${args[@]}" "${subnet_args[@]}"
|
@ -1,86 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
|
||||
args=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg=$1
|
||||
shift
|
||||
case "$arg" in
|
||||
-6)
|
||||
ipv6_only=true
|
||||
continue
|
||||
;;
|
||||
-*) ;;
|
||||
*)
|
||||
if [[ -z $tool ]]; then
|
||||
tool=$arg
|
||||
continue
|
||||
elif [[ -z $node ]]; then
|
||||
node=$arg
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
args+=("$arg")
|
||||
done
|
||||
|
||||
tool=${tool?:"tool argument missing. should be one of iperf3,ping,curl,ab"}
|
||||
node=${node?:"node argument missing. should be 'node-1' , 'node-2' etc"}
|
||||
|
||||
if [[ $node == node-* ]]; then
|
||||
index=${node#node-}
|
||||
if [[ $ipv6_only == true ]]; then
|
||||
host="2001:0DB8::55$index"
|
||||
else
|
||||
host="10.55.$index.77"
|
||||
fi
|
||||
else
|
||||
host=$node
|
||||
fi
|
||||
|
||||
connect_timeout_sec=3
|
||||
|
||||
case "$tool" in
|
||||
ping)
|
||||
with_set_x exec ping -W $connect_timeout_sec "${args[@]}" "$host"
|
||||
;;
|
||||
iperf3)
|
||||
port=5001
|
||||
with_set_x exec iperf3 --client "$host" --port=$port --connect-timeout=$((connect_timeout_sec * 1000)) "${args[@]}"
|
||||
;;
|
||||
curl)
|
||||
port=8080
|
||||
if [[ $host = *:* ]]; then
|
||||
host="[$host]"
|
||||
args+=(--ipv6)
|
||||
fi
|
||||
with_set_x exec curl "http://$host:$port/" -v --connect-timeout $connect_timeout_sec "${args[@]}"
|
||||
;;
|
||||
ab)
|
||||
port=8080
|
||||
if [[ " ${args[*]}" != *" -n "* && " ${args[*]}" != *" -c "* ]]; then
|
||||
args+=(-n 500 -c 50 "${args[@]}")
|
||||
fi
|
||||
with_set_x exec ab -s $connect_timeout_sec "${args[@]}" "http://$host:$port/"
|
||||
;;
|
||||
*)
|
||||
log "Unknown tool: $tool"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
@ -1,40 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
./test-bed up -d
|
||||
|
||||
benchmark() {
|
||||
log -e "\n======== Benchmarking sshuttle | Args: [$*] ========"
|
||||
local node=$1
|
||||
shift
|
||||
with_set_x ./exec-sshuttle "$node" --listen 55771 "$@" &
|
||||
sshuttle_pid=$!
|
||||
trap 'kill -0 $sshuttle_pid &>/dev/null && kill -15 $sshuttle_pid' EXIT
|
||||
while ! nc -z localhost 55771; do sleep 0.1; done
|
||||
sleep 1
|
||||
./exec-tool iperf3 "$node" --time=4
|
||||
with_set_x kill -15 $sshuttle_pid
|
||||
wait $sshuttle_pid || true
|
||||
}
|
||||
|
||||
if [[ $# -gt 0 ]]; then
|
||||
benchmark "${@}"
|
||||
else
|
||||
benchmark node-1 --sshuttle-bin="${SSHUTTLE_BIN:-sshuttle}"
|
||||
benchmark node-1 --sshuttle-bin=dev
|
||||
fi
|
@ -1,9 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export PYTHONPATH=.
|
||||
|
||||
set -x
|
||||
python -m flake8 sshuttle tests
|
||||
python -m pytest .
|
@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ -z $1 || $1 = -* ]]; then
|
||||
set -- up "$@"
|
||||
fi
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function build() {
|
||||
# podman build -t ghcr.io/sshuttle/sshuttle-testbed .
|
||||
with_set_x docker build --progress=plain -t ghcr.io/sshuttle/sshuttle-testbed -f Containerfile .
|
||||
}
|
||||
|
||||
function compose() {
|
||||
# podman-compose "$@"
|
||||
with_set_x docker compose "$@"
|
||||
}
|
||||
|
||||
function get-ip() {
|
||||
local container_name=sshuttle-testbed-"$1"
|
||||
docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$container_name"
|
||||
}
|
||||
|
||||
if [[ $1 == get-ip ]]; then
|
||||
shift
|
||||
get-ip "$@"
|
||||
else
|
||||
if [[ $* = *--build* ]]; then
|
||||
build
|
||||
fi
|
||||
compose "$@"
|
||||
fi
|
30
setup.cfg
30
setup.cfg
@ -1,30 +0,0 @@
|
||||
[bumpversion]
|
||||
current_version = 1.3.1
|
||||
|
||||
[bumpversion:file:setup.py]
|
||||
|
||||
[bumpversion:file:pyproject.toml]
|
||||
|
||||
[bumpversion:file:sshuttle/version.py]
|
||||
|
||||
[aliases]
|
||||
test = pytest
|
||||
|
||||
[bdist_wheel]
|
||||
universal = 1
|
||||
|
||||
[upload]
|
||||
sign = true
|
||||
identity = 0x1784577F811F6EAC
|
||||
|
||||
[flake8]
|
||||
count = true
|
||||
show-source = true
|
||||
statistics = true
|
||||
max-line-length = 128
|
||||
|
||||
[pycodestyle]
|
||||
max-line-length = 128
|
||||
|
||||
[tool:pytest]
|
||||
addopts = --cov=sshuttle --cov-branch --cov-report=term-missing
|
10
src/Makefile
Normal file
10
src/Makefile
Normal file
@ -0,0 +1,10 @@
|
||||
all:
|
||||
|
||||
Makefile:
|
||||
@
|
||||
|
||||
%: FORCE
|
||||
+./do $@
|
||||
|
||||
.PHONY: FORCE
|
||||
|
11
src/all.do
Normal file
11
src/all.do
Normal file
@ -0,0 +1,11 @@
|
||||
exec >&2
|
||||
UI=
|
||||
[ "$(uname)" = "Darwin" ] && UI=ui-macos/all
|
||||
redo-ifchange sshuttle.8 $UI
|
||||
|
||||
echo
|
||||
echo "What now?"
|
||||
[ -z "$UI" ] || echo "- Try the MacOS GUI: open ui-macos/Sshuttle*.app"
|
||||
echo "- Run sshuttle: ./sshuttle --dns -r HOSTNAME 0/0"
|
||||
echo "- Read the README: less README.md"
|
||||
echo "- Read the man page: less sshuttle.md"
|
27
src/assembler.py
Normal file
27
src/assembler.py
Normal file
@ -0,0 +1,27 @@
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
z = zlib.decompressobj()
|
||||
mainmod = sys.modules[__name__]
|
||||
while 1:
|
||||
name = sys.stdin.readline().strip()
|
||||
if name:
|
||||
nbytes = int(sys.stdin.readline())
|
||||
if verbosity >= 2:
|
||||
sys.stderr.write('server: assembling %r (%d bytes)\n'
|
||||
% (name, nbytes))
|
||||
content = z.decompress(sys.stdin.read(nbytes))
|
||||
exec compile(content, name, "exec")
|
||||
|
||||
# FIXME: this crushes everything into a single module namespace,
|
||||
# then makes each of the module names point at this one. Gross.
|
||||
assert(name.endswith('.py'))
|
||||
modname = name[:-3]
|
||||
mainmod.__dict__[modname] = mainmod
|
||||
else:
|
||||
break
|
||||
|
||||
verbose = verbosity
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
main()
|
2
src/clean.do
Normal file
2
src/clean.do
Normal file
@ -0,0 +1,2 @@
|
||||
redo ui-macos/clean
|
||||
rm -f *~ */*~ .*~ */.*~ *.8 *.tmp */*.tmp *.pyc */*.pyc
|
776
src/client.py
Normal file
776
src/client.py
Normal file
@ -0,0 +1,776 @@
|
||||
import struct
|
||||
import errno
|
||||
import re
|
||||
import signal
|
||||
import time
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers
|
||||
import os
|
||||
import ssnet
|
||||
import ssh
|
||||
import ssyslog
|
||||
import sys
|
||||
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
|
||||
from helpers import log, debug1, debug2, debug3, Fatal, islocal
|
||||
|
||||
recvmsg = None
|
||||
try:
|
||||
# try getting recvmsg from python
|
||||
import socket as pythonsocket
|
||||
getattr(pythonsocket.socket, "recvmsg")
|
||||
socket = pythonsocket
|
||||
recvmsg = "python"
|
||||
except AttributeError:
|
||||
# try getting recvmsg from socket_ext library
|
||||
try:
|
||||
import socket_ext
|
||||
getattr(socket_ext.socket, "recvmsg")
|
||||
socket = socket_ext
|
||||
recvmsg = "socket_ext"
|
||||
except ImportError:
|
||||
import socket
|
||||
|
||||
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||
|
||||
|
||||
def got_signal(signum, frame):
|
||||
log('exiting on signal %d\n' % signum)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
_pidname = None
|
||||
IP_TRANSPARENT = 19
|
||||
IP_ORIGDSTADDR = 20
|
||||
IP_RECVORIGDSTADDR = IP_ORIGDSTADDR
|
||||
SOL_IPV6 = 41
|
||||
IPV6_ORIGDSTADDR = 74
|
||||
IPV6_RECVORIGDSTADDR = IPV6_ORIGDSTADDR
|
||||
|
||||
|
||||
if recvmsg == "python":
|
||||
def recv_udp(listener, bufsize):
|
||||
debug3('Accept UDP python using recvmsg.\n')
|
||||
data, ancdata, msg_flags, srcip = listener.recvmsg(
|
||||
4096, socket.CMSG_SPACE(24))
|
||||
dstip = None
|
||||
family = None
|
||||
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
||||
if cmsg_level == socket.SOL_IP and cmsg_type == IP_ORIGDSTADDR:
|
||||
family, port = struct.unpack('=HH', cmsg_data[0:4])
|
||||
port = socket.htons(port)
|
||||
if family == socket.AF_INET:
|
||||
start = 4
|
||||
length = 4
|
||||
else:
|
||||
raise Fatal("Unsupported socket type '%s'" % family)
|
||||
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
|
||||
dstip = (ip, port)
|
||||
break
|
||||
elif cmsg_level == SOL_IPV6 and cmsg_type == IPV6_ORIGDSTADDR:
|
||||
family, port = struct.unpack('=HH', cmsg_data[0:4])
|
||||
port = socket.htons(port)
|
||||
if family == socket.AF_INET6:
|
||||
start = 8
|
||||
length = 16
|
||||
else:
|
||||
raise Fatal("Unsupported socket type '%s'" % family)
|
||||
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
|
||||
dstip = (ip, port)
|
||||
break
|
||||
return (srcip, dstip, data)
|
||||
elif recvmsg == "socket_ext":
|
||||
def recv_udp(listener, bufsize):
|
||||
debug3('Accept UDP using socket_ext recvmsg.\n')
|
||||
srcip, data, adata, flags = listener.recvmsg(
|
||||
(bufsize,), socket.CMSG_SPACE(24))
|
||||
dstip = None
|
||||
family = None
|
||||
for a in adata:
|
||||
if a.cmsg_level == socket.SOL_IP and a.cmsg_type == IP_ORIGDSTADDR:
|
||||
family, port = struct.unpack('=HH', a.cmsg_data[0:4])
|
||||
port = socket.htons(port)
|
||||
if family == socket.AF_INET:
|
||||
start = 4
|
||||
length = 4
|
||||
else:
|
||||
raise Fatal("Unsupported socket type '%s'" % family)
|
||||
ip = socket.inet_ntop(
|
||||
family, a.cmsg_data[start:start + length])
|
||||
dstip = (ip, port)
|
||||
break
|
||||
elif a.cmsg_level == SOL_IPV6 and a.cmsg_type == IPV6_ORIGDSTADDR:
|
||||
family, port = struct.unpack('=HH', a.cmsg_data[0:4])
|
||||
port = socket.htons(port)
|
||||
if family == socket.AF_INET6:
|
||||
start = 8
|
||||
length = 16
|
||||
else:
|
||||
raise Fatal("Unsupported socket type '%s'" % family)
|
||||
ip = socket.inet_ntop(
|
||||
family, a.cmsg_data[start:start + length])
|
||||
dstip = (ip, port)
|
||||
break
|
||||
return (srcip, dstip, data[0])
|
||||
else:
|
||||
def recv_udp(listener, bufsize):
|
||||
debug3('Accept UDP using recvfrom.\n')
|
||||
data, srcip = listener.recvfrom(bufsize)
|
||||
return (srcip, None, data)
|
||||
|
||||
|
||||
def check_daemon(pidfile):
|
||||
global _pidname
|
||||
_pidname = os.path.abspath(pidfile)
|
||||
try:
|
||||
oldpid = open(_pidname).read(1024)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return # no pidfile, ok
|
||||
else:
|
||||
raise Fatal("can't read %s: %s" % (_pidname, e))
|
||||
if not oldpid:
|
||||
os.unlink(_pidname)
|
||||
return # invalid pidfile, ok
|
||||
oldpid = int(oldpid.strip() or 0)
|
||||
if oldpid <= 0:
|
||||
os.unlink(_pidname)
|
||||
return # invalid pidfile, ok
|
||||
try:
|
||||
os.kill(oldpid, 0)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ESRCH:
|
||||
os.unlink(_pidname)
|
||||
return # outdated pidfile, ok
|
||||
elif e.errno == errno.EPERM:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
raise Fatal("%s: sshuttle is already running (pid=%d)"
|
||||
% (_pidname, oldpid))
|
||||
|
||||
|
||||
def daemonize():
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
os.setsid()
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
|
||||
outfd = os.open(_pidname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0666)
|
||||
try:
|
||||
os.write(outfd, '%d\n' % os.getpid())
|
||||
finally:
|
||||
os.close(outfd)
|
||||
os.chdir("/")
|
||||
|
||||
# Normal exit when killed, or try/finally won't work and the pidfile won't
|
||||
# be deleted.
|
||||
signal.signal(signal.SIGTERM, got_signal)
|
||||
|
||||
si = open('/dev/null', 'r+')
|
||||
os.dup2(si.fileno(), 0)
|
||||
os.dup2(si.fileno(), 1)
|
||||
si.close()
|
||||
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
|
||||
def daemon_cleanup():
|
||||
try:
|
||||
os.unlink(_pidname)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
pf_command_file = None
|
||||
|
||||
def pf_dst(sock):
|
||||
peer = sock.getpeername()
|
||||
proxy = sock.getsockname()
|
||||
|
||||
argv = (sock.family, socket.IPPROTO_TCP, peer[0], peer[1], proxy[0], proxy[1])
|
||||
pf_command_file.write("QUERY_PF_NAT %r,%r,%s,%r,%s,%r\n" % argv)
|
||||
pf_command_file.flush()
|
||||
line = pf_command_file.readline()
|
||||
debug2("QUERY_PF_NAT %r,%r,%s,%r,%s,%r" % argv + ' > ' + line)
|
||||
if line.startswith('QUERY_PF_NAT_SUCCESS '):
|
||||
(ip, port) = line[21:].split(',')
|
||||
return (ip, int(port))
|
||||
|
||||
return sock.getsockname()
|
||||
|
||||
def original_dst(sock):
|
||||
try:
|
||||
SO_ORIGINAL_DST = 80
|
||||
SOCKADDR_MIN = 16
|
||||
sockaddr_in = sock.getsockopt(socket.SOL_IP,
|
||||
SO_ORIGINAL_DST, SOCKADDR_MIN)
|
||||
(proto, port, a, b, c, d) = struct.unpack('!HHBBBB', sockaddr_in[:8])
|
||||
assert(socket.htons(proto) == socket.AF_INET)
|
||||
ip = '%d.%d.%d.%d' % (a, b, c, d)
|
||||
return (ip, port)
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.ENOPROTOOPT:
|
||||
return sock.getsockname()
|
||||
raise
|
||||
|
||||
|
||||
class MultiListener:
|
||||
|
||||
def __init__(self, type=socket.SOCK_STREAM, proto=0):
|
||||
self.v6 = socket.socket(socket.AF_INET6, type, proto)
|
||||
self.v4 = socket.socket(socket.AF_INET, type, proto)
|
||||
|
||||
def setsockopt(self, level, optname, value):
|
||||
if self.v6:
|
||||
self.v6.setsockopt(level, optname, value)
|
||||
if self.v4:
|
||||
self.v4.setsockopt(level, optname, value)
|
||||
|
||||
def add_handler(self, handlers, callback, method, mux):
|
||||
if self.v6:
|
||||
handlers.append(
|
||||
Handler(
|
||||
[self.v6],
|
||||
lambda: callback(self.v6, method, mux, handlers)))
|
||||
if self.v4:
|
||||
handlers.append(
|
||||
Handler(
|
||||
[self.v4],
|
||||
lambda: callback(self.v4, method, mux, handlers)))
|
||||
|
||||
def listen(self, backlog):
|
||||
if self.v6:
|
||||
self.v6.listen(backlog)
|
||||
if self.v4:
|
||||
try:
|
||||
self.v4.listen(backlog)
|
||||
except socket.error, e:
|
||||
# on some systems v4 bind will fail if the v6 suceeded,
|
||||
# in this case the v6 socket will receive v4 too.
|
||||
if e.errno == errno.EADDRINUSE and self.v6:
|
||||
self.v4 = None
|
||||
else:
|
||||
raise e
|
||||
|
||||
def bind(self, address_v6, address_v4):
|
||||
if address_v6 and self.v6:
|
||||
self.v6.bind(address_v6)
|
||||
else:
|
||||
self.v6 = None
|
||||
if address_v4 and self.v4:
|
||||
self.v4.bind(address_v4)
|
||||
else:
|
||||
self.v4 = None
|
||||
|
||||
def print_listening(self, what):
|
||||
if self.v6:
|
||||
listenip = self.v6.getsockname()
|
||||
debug1('%s listening on %r.\n' % (what, listenip))
|
||||
if self.v4:
|
||||
listenip = self.v4.getsockname()
|
||||
debug1('%s listening on %r.\n' % (what, listenip))
|
||||
|
||||
|
||||
class FirewallClient:
|
||||
|
||||
def __init__(self, port_v6, port_v4, subnets_include, subnets_exclude,
|
||||
dnsport_v6, dnsport_v4, method, udp):
|
||||
self.auto_nets = []
|
||||
self.subnets_include = subnets_include
|
||||
self.subnets_exclude = subnets_exclude
|
||||
argvbase = ([sys.argv[1], sys.argv[0], sys.argv[1]] +
|
||||
['-v'] * (helpers.verbose or 0) +
|
||||
['--firewall', str(port_v6), str(port_v4),
|
||||
str(dnsport_v6), str(dnsport_v4),
|
||||
method, str(int(udp))])
|
||||
if ssyslog._p:
|
||||
argvbase += ['--syslog']
|
||||
argv_tries = [
|
||||
['sudo', '-p', '[local sudo] Password: '] + argvbase,
|
||||
['su', '-c', ' '.join(argvbase)],
|
||||
argvbase
|
||||
]
|
||||
|
||||
# we can't use stdin/stdout=subprocess.PIPE here, as we normally would,
|
||||
# because stupid Linux 'su' requires that stdin be attached to a tty.
|
||||
# Instead, attach a *bidirectional* socket to its stdout, and use
|
||||
# that for talking in both directions.
|
||||
(s1, s2) = socket.socketpair()
|
||||
|
||||
def setup():
|
||||
# run in the child process
|
||||
s2.close()
|
||||
e = None
|
||||
if os.getuid() == 0:
|
||||
argv_tries = argv_tries[-1:] # last entry only
|
||||
for argv in argv_tries:
|
||||
try:
|
||||
if argv[0] == 'su':
|
||||
sys.stderr.write('[local su] ')
|
||||
self.p = ssubprocess.Popen(argv, stdout=s1, preexec_fn=setup)
|
||||
e = None
|
||||
break
|
||||
except OSError, e:
|
||||
pass
|
||||
self.argv = argv
|
||||
s1.close()
|
||||
self.pfile = s2.makefile('wb+')
|
||||
if e:
|
||||
log('Spawning firewall manager: %r\n' % self.argv)
|
||||
raise Fatal(e)
|
||||
line = self.pfile.readline()
|
||||
self.check()
|
||||
if line[0:5] != 'READY':
|
||||
raise Fatal('%r expected READY, got %r' % (self.argv, line))
|
||||
self.method = line[6:-1]
|
||||
|
||||
def check(self):
|
||||
rv = self.p.poll()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (self.argv, rv))
|
||||
|
||||
def start(self):
|
||||
self.pfile.write('ROUTES\n')
|
||||
for (family, ip, width) in self.subnets_include + self.auto_nets:
|
||||
self.pfile.write('%d,%d,0,%s\n' % (family, width, ip))
|
||||
for (family, ip, width) in self.subnets_exclude:
|
||||
self.pfile.write('%d,%d,1,%s\n' % (family, width, ip))
|
||||
self.pfile.write('GO\n')
|
||||
self.pfile.flush()
|
||||
line = self.pfile.readline()
|
||||
self.check()
|
||||
if line != 'STARTED\n':
|
||||
raise Fatal('%r expected STARTED, got %r' % (self.argv, line))
|
||||
|
||||
def sethostip(self, hostname, ip):
|
||||
assert(not re.search(r'[^-\w]', hostname))
|
||||
assert(not re.search(r'[^0-9.]', ip))
|
||||
self.pfile.write('HOST %s,%s\n' % (hostname, ip))
|
||||
self.pfile.flush()
|
||||
|
||||
def done(self):
|
||||
self.pfile.close()
|
||||
rv = self.p.wait()
|
||||
if rv:
|
||||
raise Fatal('cleanup: %r returned %d' % (self.argv, rv))
|
||||
|
||||
|
||||
dnsreqs = {}
|
||||
udp_by_src = {}
|
||||
|
||||
|
||||
def expire_connections(now, mux):
|
||||
for chan, timeout in dnsreqs.items():
|
||||
if timeout < now:
|
||||
debug3('expiring dnsreqs channel=%d\n' % chan)
|
||||
del mux.channels[chan]
|
||||
del dnsreqs[chan]
|
||||
debug3('Remaining DNS requests: %d\n' % len(dnsreqs))
|
||||
for peer, (chan, timeout) in udp_by_src.items():
|
||||
if timeout < now:
|
||||
debug3('expiring UDP channel channel=%d peer=%r\n' % (chan, peer))
|
||||
mux.send(chan, ssnet.CMD_UDP_CLOSE, '')
|
||||
del mux.channels[chan]
|
||||
del udp_by_src[peer]
|
||||
debug3('Remaining UDP channels: %d\n' % len(udp_by_src))
|
||||
|
||||
|
||||
def onaccept_tcp(listener, method, mux, handlers):
|
||||
global _extra_fd
|
||||
try:
|
||||
sock, srcip = listener.accept()
|
||||
except socket.error, e:
|
||||
if e.args[0] in [errno.EMFILE, errno.ENFILE]:
|
||||
debug1('Rejected incoming connection: too many open files!\n')
|
||||
# free up an fd so we can eat the connection
|
||||
os.close(_extra_fd)
|
||||
try:
|
||||
sock, srcip = listener.accept()
|
||||
sock.close()
|
||||
finally:
|
||||
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||
return
|
||||
else:
|
||||
raise
|
||||
if method == "tproxy":
|
||||
dstip = sock.getsockname()
|
||||
elif method == "pf":
|
||||
dstip = pf_dst(sock)
|
||||
else:
|
||||
dstip = original_dst(sock)
|
||||
debug1('Accept TCP: %s:%r -> %s:%r.\n' % (srcip[0], srcip[1],
|
||||
dstip[0], dstip[1]))
|
||||
if dstip[1] == sock.getsockname()[1] and islocal(dstip[0], sock.family):
|
||||
debug1("-- ignored: that's my address!\n")
|
||||
sock.close()
|
||||
return
|
||||
chan = mux.next_channel()
|
||||
if not chan:
|
||||
log('warning: too many open channels. Discarded connection.\n')
|
||||
sock.close()
|
||||
return
|
||||
mux.send(chan, ssnet.CMD_TCP_CONNECT, '%d,%s,%s' %
|
||||
(sock.family, dstip[0], dstip[1]))
|
||||
outwrap = MuxWrapper(mux, chan)
|
||||
handlers.append(Proxy(SockWrapper(sock, sock), outwrap))
|
||||
expire_connections(time.time(), mux)
|
||||
|
||||
|
||||
def udp_done(chan, data, method, family, dstip):
|
||||
(src, srcport, data) = data.split(",", 2)
|
||||
srcip = (src, int(srcport))
|
||||
debug3('doing send from %r to %r\n' % (srcip, dstip,))
|
||||
|
||||
try:
|
||||
sender = socket.socket(family, socket.SOCK_DGRAM)
|
||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
sender.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
||||
sender.bind(srcip)
|
||||
sender.sendto(data, dstip)
|
||||
sender.close()
|
||||
except socket.error, e:
|
||||
debug1('-- ignored socket error sending UDP data: %r\n' % e)
|
||||
|
||||
|
||||
def onaccept_udp(listener, method, mux, handlers):
|
||||
now = time.time()
|
||||
srcip, dstip, data = recv_udp(listener, 4096)
|
||||
if not dstip:
|
||||
debug1(
|
||||
"-- ignored UDP from %r: "
|
||||
"couldn't determine destination IP address\n" % (srcip,))
|
||||
return
|
||||
debug1('Accept UDP: %r -> %r.\n' % (srcip, dstip,))
|
||||
if srcip in udp_by_src:
|
||||
chan, timeout = udp_by_src[srcip]
|
||||
else:
|
||||
chan = mux.next_channel()
|
||||
mux.channels[chan] = lambda cmd, data: udp_done(
|
||||
chan, data, method, listener.family, dstip=srcip)
|
||||
mux.send(chan, ssnet.CMD_UDP_OPEN, listener.family)
|
||||
udp_by_src[srcip] = chan, now + 30
|
||||
|
||||
hdr = "%s,%r," % (dstip[0], dstip[1])
|
||||
mux.send(chan, ssnet.CMD_UDP_DATA, hdr + data)
|
||||
|
||||
expire_connections(now, mux)
|
||||
|
||||
|
||||
def dns_done(chan, data, method, sock, srcip, dstip, mux):
|
||||
debug3('dns_done: channel=%d src=%r dst=%r\n' % (chan, srcip, dstip))
|
||||
del mux.channels[chan]
|
||||
del dnsreqs[chan]
|
||||
if method == "tproxy":
|
||||
debug3('doing send from %r to %r\n' % (srcip, dstip,))
|
||||
sender = socket.socket(sock.family, socket.SOCK_DGRAM)
|
||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
sender.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
||||
sender.bind(srcip)
|
||||
sender.sendto(data, dstip)
|
||||
sender.close()
|
||||
else:
|
||||
debug3('doing sendto %r\n' % (dstip,))
|
||||
sock.sendto(data, dstip)
|
||||
|
||||
|
||||
def ondns(listener, method, mux, handlers):
|
||||
now = time.time()
|
||||
srcip, dstip, data = recv_udp(listener, 4096)
|
||||
if method == "tproxy" and not dstip:
|
||||
debug1(
|
||||
"-- ignored UDP from %r: "
|
||||
"couldn't determine destination IP address\n" % (srcip,))
|
||||
return
|
||||
debug1('DNS request from %r to %r: %d bytes\n' % (srcip, dstip, len(data)))
|
||||
chan = mux.next_channel()
|
||||
dnsreqs[chan] = now + 30
|
||||
mux.send(chan, ssnet.CMD_DNS_REQ, data)
|
||||
mux.channels[chan] = lambda cmd, data: dns_done(
|
||||
chan, data, method, listener, srcip=dstip, dstip=srcip, mux=mux)
|
||||
expire_connections(now, mux)
|
||||
|
||||
|
||||
def _main(tcp_listener, udp_listener, fw, ssh_cmd, remotename,
|
||||
python, latency_control,
|
||||
dns_listener, method, seed_hosts, auto_nets,
|
||||
syslog, daemon):
|
||||
handlers = []
|
||||
if helpers.verbose >= 1:
|
||||
helpers.logprefix = 'c : '
|
||||
else:
|
||||
helpers.logprefix = 'client: '
|
||||
debug1('connecting to server...\n')
|
||||
|
||||
try:
|
||||
(serverproc, serversock) = ssh.connect(
|
||||
ssh_cmd, remotename, python,
|
||||
stderr=ssyslog._p and ssyslog._p.stdin,
|
||||
options=dict(latency_control=latency_control, method=method))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.EPIPE:
|
||||
raise Fatal("failed to establish ssh session (1)")
|
||||
else:
|
||||
raise
|
||||
mux = Mux(serversock, serversock)
|
||||
handlers.append(mux)
|
||||
|
||||
expected = 'SSHUTTLE0001'
|
||||
|
||||
try:
|
||||
v = 'x'
|
||||
while v and v != '\0':
|
||||
v = serversock.recv(1)
|
||||
v = 'x'
|
||||
while v and v != '\0':
|
||||
v = serversock.recv(1)
|
||||
initstring = serversock.recv(len(expected))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.ECONNRESET:
|
||||
raise Fatal("failed to establish ssh session (2)")
|
||||
else:
|
||||
raise
|
||||
|
||||
rv = serverproc.poll()
|
||||
if rv:
|
||||
raise Fatal('server died with error code %d' % rv)
|
||||
|
||||
if initstring != expected:
|
||||
raise Fatal('expected server init string %r; got %r'
|
||||
% (expected, initstring))
|
||||
debug1('connected.\n')
|
||||
print 'Connected.'
|
||||
sys.stdout.flush()
|
||||
if daemon:
|
||||
daemonize()
|
||||
log('daemonizing (%s).\n' % _pidname)
|
||||
elif syslog:
|
||||
debug1('switching to syslog.\n')
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
def onroutes(routestr):
|
||||
if auto_nets:
|
||||
for line in routestr.strip().split('\n'):
|
||||
(family, ip, width) = line.split(',', 2)
|
||||
fw.auto_nets.append((int(family), ip, int(width)))
|
||||
|
||||
# we definitely want to do this *after* starting ssh, or we might end
|
||||
# up intercepting the ssh connection!
|
||||
#
|
||||
# Moreover, now that we have the --auto-nets option, we have to wait
|
||||
# for the server to send us that message anyway. Even if we haven't
|
||||
# set --auto-nets, we might as well wait for the message first, then
|
||||
# ignore its contents.
|
||||
mux.got_routes = None
|
||||
fw.start()
|
||||
mux.got_routes = onroutes
|
||||
|
||||
def onhostlist(hostlist):
|
||||
debug2('got host list: %r\n' % hostlist)
|
||||
for line in hostlist.strip().split():
|
||||
if line:
|
||||
name, ip = line.split(',', 1)
|
||||
fw.sethostip(name, ip)
|
||||
mux.got_host_list = onhostlist
|
||||
|
||||
tcp_listener.add_handler(handlers, onaccept_tcp, method, mux)
|
||||
|
||||
if udp_listener:
|
||||
udp_listener.add_handler(handlers, onaccept_udp, method, mux)
|
||||
|
||||
if dns_listener:
|
||||
dns_listener.add_handler(handlers, ondns, method, mux)
|
||||
|
||||
if seed_hosts is not None:
|
||||
debug1('seed_hosts: %r\n' % seed_hosts)
|
||||
mux.send(0, ssnet.CMD_HOST_REQ, '\n'.join(seed_hosts))
|
||||
|
||||
while 1:
|
||||
rv = serverproc.poll()
|
||||
if rv:
|
||||
raise Fatal('server died with error code %d' % rv)
|
||||
|
||||
ssnet.runonce(handlers, mux)
|
||||
if latency_control:
|
||||
mux.check_fullness()
|
||||
mux.callback()
|
||||
|
||||
|
||||
def main(listenip_v6, listenip_v4,
|
||||
ssh_cmd, remotename, python, latency_control, dns,
|
||||
method, seed_hosts, auto_nets,
|
||||
subnets_include, subnets_exclude, syslog, daemon, pidfile):
|
||||
|
||||
if syslog:
|
||||
ssyslog.start_syslog()
|
||||
if daemon:
|
||||
try:
|
||||
check_daemon(pidfile)
|
||||
except Fatal, e:
|
||||
log("%s\n" % e)
|
||||
return 5
|
||||
debug1('Starting sshuttle proxy.\n')
|
||||
|
||||
if recvmsg is not None:
|
||||
debug1("recvmsg %s support enabled.\n" % recvmsg)
|
||||
|
||||
if method == "tproxy":
|
||||
if recvmsg is not None:
|
||||
debug1("tproxy UDP support enabled.\n")
|
||||
udp = True
|
||||
else:
|
||||
debug1("tproxy UDP support requires recvmsg function.\n")
|
||||
udp = False
|
||||
if dns and recvmsg is None:
|
||||
debug1("tproxy DNS support requires recvmsg function.\n")
|
||||
dns = False
|
||||
else:
|
||||
debug1("UDP support requires tproxy; disabling UDP.\n")
|
||||
udp = False
|
||||
|
||||
if listenip_v6 and listenip_v6[1] and listenip_v4 and listenip_v4[1]:
|
||||
# if both ports given, no need to search for a spare port
|
||||
ports = [0, ]
|
||||
else:
|
||||
# if at least one port missing, we have to search
|
||||
ports = xrange(12300, 9000, -1)
|
||||
|
||||
# search for free ports and try to bind
|
||||
last_e = None
|
||||
redirectport_v6 = 0
|
||||
redirectport_v4 = 0
|
||||
bound = False
|
||||
debug2('Binding redirector:')
|
||||
for port in ports:
|
||||
debug2(' %d' % port)
|
||||
tcp_listener = MultiListener()
|
||||
tcp_listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
if udp:
|
||||
udp_listener = MultiListener(socket.SOCK_DGRAM)
|
||||
udp_listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
else:
|
||||
udp_listener = None
|
||||
|
||||
if listenip_v6 and listenip_v6[1]:
|
||||
lv6 = listenip_v6
|
||||
redirectport_v6 = lv6[1]
|
||||
elif listenip_v6:
|
||||
lv6 = (listenip_v6[0], port)
|
||||
redirectport_v6 = port
|
||||
else:
|
||||
lv6 = None
|
||||
redirectport_v6 = 0
|
||||
|
||||
if listenip_v4 and listenip_v4[1]:
|
||||
lv4 = listenip_v4
|
||||
redirectport_v4 = lv4[1]
|
||||
elif listenip_v4:
|
||||
lv4 = (listenip_v4[0], port)
|
||||
redirectport_v4 = port
|
||||
else:
|
||||
lv4 = None
|
||||
redirectport_v4 = 0
|
||||
|
||||
try:
|
||||
tcp_listener.bind(lv6, lv4)
|
||||
if udp_listener:
|
||||
udp_listener.bind(lv6, lv4)
|
||||
bound = True
|
||||
break
|
||||
except socket.error, e:
|
||||
if e.errno == errno.EADDRINUSE:
|
||||
last_e = e
|
||||
else:
|
||||
raise e
|
||||
debug2('\n')
|
||||
if not bound:
|
||||
assert(last_e)
|
||||
raise last_e
|
||||
tcp_listener.listen(10)
|
||||
tcp_listener.print_listening("TCP redirector")
|
||||
if udp_listener:
|
||||
udp_listener.print_listening("UDP redirector")
|
||||
|
||||
bound = False
|
||||
if dns:
|
||||
# search for spare port for DNS
|
||||
debug2('Binding DNS:')
|
||||
ports = xrange(12300, 9000, -1)
|
||||
for port in ports:
|
||||
debug2(' %d' % port)
|
||||
dns_listener = MultiListener(socket.SOCK_DGRAM)
|
||||
|
||||
if listenip_v6:
|
||||
lv6 = (listenip_v6[0], port)
|
||||
dnsport_v6 = port
|
||||
else:
|
||||
lv6 = None
|
||||
dnsport_v6 = 0
|
||||
|
||||
if listenip_v4:
|
||||
lv4 = (listenip_v4[0], port)
|
||||
dnsport_v4 = port
|
||||
else:
|
||||
lv4 = None
|
||||
dnsport_v4 = 0
|
||||
|
||||
try:
|
||||
dns_listener.bind(lv6, lv4)
|
||||
bound = True
|
||||
break
|
||||
except socket.error, e:
|
||||
if e.errno == errno.EADDRINUSE:
|
||||
last_e = e
|
||||
else:
|
||||
raise e
|
||||
debug2('\n')
|
||||
dns_listener.print_listening("DNS")
|
||||
if not bound:
|
||||
assert(last_e)
|
||||
raise last_e
|
||||
else:
|
||||
dnsport_v6 = 0
|
||||
dnsport_v4 = 0
|
||||
dns_listener = None
|
||||
|
||||
fw = FirewallClient(redirectport_v6, redirectport_v4, subnets_include,
|
||||
subnets_exclude, dnsport_v6, dnsport_v4, method, udp)
|
||||
|
||||
if fw.method == "tproxy":
|
||||
tcp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
||||
if udp_listener:
|
||||
udp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
||||
if udp_listener.v4 is not None:
|
||||
udp_listener.v4.setsockopt(
|
||||
socket.SOL_IP, IP_RECVORIGDSTADDR, 1)
|
||||
if udp_listener.v6 is not None:
|
||||
udp_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVORIGDSTADDR, 1)
|
||||
if dns_listener:
|
||||
dns_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
|
||||
if dns_listener.v4 is not None:
|
||||
dns_listener.v4.setsockopt(
|
||||
socket.SOL_IP, IP_RECVORIGDSTADDR, 1)
|
||||
if dns_listener.v6 is not None:
|
||||
dns_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVORIGDSTADDR, 1)
|
||||
|
||||
if fw.method == "pf":
|
||||
global pf_command_file
|
||||
pf_command_file = fw.pfile
|
||||
|
||||
try:
|
||||
return _main(tcp_listener, udp_listener, fw, ssh_cmd, remotename,
|
||||
python, latency_control, dns_listener,
|
||||
fw.method, seed_hosts, auto_nets, syslog,
|
||||
daemon)
|
||||
finally:
|
||||
try:
|
||||
if daemon:
|
||||
# it's not our child anymore; can't waitpid
|
||||
fw.p.returncode = 0
|
||||
fw.done()
|
||||
finally:
|
||||
if daemon:
|
||||
daemon_cleanup()
|
0
src/compat/__init__.py
Normal file
0
src/compat/__init__.py
Normal file
1305
src/compat/ssubprocess.py
Normal file
1305
src/compat/ssubprocess.py
Normal file
File diff suppressed because it is too large
Load Diff
7
src/default.8.do
Normal file
7
src/default.8.do
Normal file
@ -0,0 +1,7 @@
|
||||
exec >&2
|
||||
if pandoc </dev/null 2>/dev/null; then
|
||||
pandoc -s -r markdown -w man -o $3 $2.md
|
||||
else
|
||||
echo "Warning: pandoc not installed; can't generate manpages."
|
||||
redo-always
|
||||
fi
|
175
src/do
Executable file
175
src/do
Executable file
@ -0,0 +1,175 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# A minimal alternative to djb redo that doesn't support incremental builds.
|
||||
# For the full version, visit http://github.com/apenwarr/redo
|
||||
#
|
||||
# The author disclaims copyright to this source file and hereby places it in
|
||||
# the public domain. (2010 12 14)
|
||||
#
|
||||
|
||||
# By default, no output coloring.
|
||||
green=""
|
||||
bold=""
|
||||
plain=""
|
||||
|
||||
if [ -n "$TERM" -a "$TERM" != "dumb" ] && tty <&2 >/dev/null 2>&1; then
|
||||
green="$(printf '\033[32m')"
|
||||
bold="$(printf '\033[1m')"
|
||||
plain="$(printf '\033[m')"
|
||||
fi
|
||||
|
||||
_dirsplit()
|
||||
{
|
||||
base=${1##*/}
|
||||
dir=${1%$base}
|
||||
}
|
||||
|
||||
dirname()
|
||||
(
|
||||
_dirsplit "$1"
|
||||
dir=${dir%/}
|
||||
echo "${dir:-.}"
|
||||
)
|
||||
|
||||
_dirsplit "$0"
|
||||
export REDO=$(cd "${dir:-.}" && echo "$PWD/$base")
|
||||
|
||||
DO_TOP=
|
||||
if [ -z "$DO_BUILT" ]; then
|
||||
DO_TOP=1
|
||||
[ -n "$*" ] || set all # only toplevel redo has a default target
|
||||
export DO_BUILT=$PWD/.do_built
|
||||
: >>"$DO_BUILT"
|
||||
echo "Removing previously built files..." >&2
|
||||
sort -u "$DO_BUILT" | tee "$DO_BUILT.new" |
|
||||
while read f; do printf "%s\0%s.did\0" "$f" "$f"; done |
|
||||
xargs -0 rm -f 2>/dev/null
|
||||
mv "$DO_BUILT.new" "$DO_BUILT"
|
||||
DO_PATH=$DO_BUILT.dir
|
||||
export PATH=$DO_PATH:$PATH
|
||||
rm -rf "$DO_PATH"
|
||||
mkdir "$DO_PATH"
|
||||
for d in redo redo-ifchange; do
|
||||
ln -s "$REDO" "$DO_PATH/$d";
|
||||
done
|
||||
[ -e /bin/true ] && TRUE=/bin/true || TRUE=/usr/bin/true
|
||||
for d in redo-ifcreate redo-stamp redo-always; do
|
||||
ln -s $TRUE "$DO_PATH/$d";
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
_find_dofile_pwd()
|
||||
{
|
||||
dofile=default.$1.do
|
||||
while :; do
|
||||
dofile=default.${dofile#default.*.}
|
||||
[ -e "$dofile" -o "$dofile" = default.do ] && break
|
||||
done
|
||||
ext=${dofile#default}
|
||||
ext=${ext%.do}
|
||||
base=${1%$ext}
|
||||
}
|
||||
|
||||
|
||||
_find_dofile()
|
||||
{
|
||||
local prefix=
|
||||
while :; do
|
||||
_find_dofile_pwd "$1"
|
||||
[ -e "$dofile" ] && break
|
||||
[ "$PWD" = "/" ] && break
|
||||
target=${PWD##*/}/$target
|
||||
tmp=${PWD##*/}/$tmp
|
||||
prefix=${PWD##*/}/$prefix
|
||||
cd ..
|
||||
done
|
||||
base=$prefix$base
|
||||
}
|
||||
|
||||
|
||||
_run_dofile()
|
||||
{
|
||||
export DO_DEPTH="$DO_DEPTH "
|
||||
export REDO_TARGET=$PWD/$target
|
||||
local line1
|
||||
set -e
|
||||
read line1 <"$PWD/$dofile"
|
||||
cmd=${line1#"#!/"}
|
||||
if [ "$cmd" != "$line1" ]; then
|
||||
/$cmd "$PWD/$dofile" "$@" >"$tmp.tmp2"
|
||||
else
|
||||
:; . "$PWD/$dofile" >"$tmp.tmp2"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
_do()
|
||||
{
|
||||
local dir=$1 target=$2 tmp=$3
|
||||
if [ ! -e "$target" ] || [ -d "$target" -a ! -e "$target.did" ]; then
|
||||
printf '%sdo %s%s%s%s\n' \
|
||||
"$green" "$DO_DEPTH" "$bold" "$dir$target" "$plain" >&2
|
||||
echo "$PWD/$target" >>"$DO_BUILT"
|
||||
dofile=$target.do
|
||||
base=$target
|
||||
ext=
|
||||
[ -e "$target.do" ] || _find_dofile "$target"
|
||||
if [ ! -e "$dofile" ]; then
|
||||
echo "do: $target: no .do file" >&2
|
||||
return 1
|
||||
fi
|
||||
[ ! -e "$DO_BUILT" ] || [ ! -d "$(dirname "$target")" ] ||
|
||||
: >>"$target.did"
|
||||
( _run_dofile "$target" "$base" "$tmp.tmp" )
|
||||
rv=$?
|
||||
if [ $rv != 0 ]; then
|
||||
printf "do: %s%s\n" "$DO_DEPTH" \
|
||||
"$dir$target: got exit code $rv" >&2
|
||||
rm -f "$tmp.tmp" "$tmp.tmp2"
|
||||
return $rv
|
||||
fi
|
||||
mv "$tmp.tmp" "$target" 2>/dev/null ||
|
||||
! test -s "$tmp.tmp2" ||
|
||||
mv "$tmp.tmp2" "$target" 2>/dev/null
|
||||
rm -f "$tmp.tmp2"
|
||||
else
|
||||
echo "do $DO_DEPTH$target exists." >&2
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Make corrections for directories that don't actually exist yet.
|
||||
_dir_shovel()
|
||||
{
|
||||
local dir base
|
||||
xdir=$1 xbase=$2 xbasetmp=$2
|
||||
while [ ! -d "$xdir" -a -n "$xdir" ]; do
|
||||
_dirsplit "${xdir%/}"
|
||||
xbasetmp=${base}__$xbase
|
||||
xdir=$dir xbase=$base/$xbase
|
||||
echo "xbasetmp='$xbasetmp'" >&2
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
redo()
|
||||
{
|
||||
for i in "$@"; do
|
||||
_dirsplit "$i"
|
||||
_dir_shovel "$dir" "$base"
|
||||
dir=$xdir base=$xbase basetmp=$xbasetmp
|
||||
( cd "$dir" && _do "$dir" "$base" "$basetmp" ) || return 1
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
set -e
|
||||
redo "$@"
|
||||
|
||||
if [ -n "$DO_TOP" ]; then
|
||||
echo "Removing stamp files..." >&2
|
||||
[ ! -e "$DO_BUILT" ] ||
|
||||
while read f; do printf "%s.did\0" "$f"; done <"$DO_BUILT" |
|
||||
xargs -0 rm -f 2>/dev/null
|
||||
fi
|
832
src/firewall.py
Normal file
832
src/firewall.py
Normal file
@ -0,0 +1,832 @@
|
||||
import errno
|
||||
import socket
|
||||
import select
|
||||
import signal
|
||||
import struct
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import ssyslog
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
from helpers import log, debug1, debug3, islocal, Fatal, family_to_string, \
|
||||
resolvconf_nameservers
|
||||
from fcntl import ioctl
|
||||
from ctypes import c_char, c_uint8, c_uint16, c_uint32, Union, Structure, \
|
||||
sizeof, addressof, memmove
|
||||
|
||||
|
||||
# python doesn't have a definition for this
|
||||
IPPROTO_DIVERT = 254
|
||||
|
||||
|
||||
def nonfatal(func, *args):
|
||||
try:
|
||||
func(*args)
|
||||
except Fatal, e:
|
||||
log('error: %s\n' % e)
|
||||
|
||||
|
||||
def ipt_chain_exists(family, table, name):
|
||||
if family == socket.AF_INET6:
|
||||
cmd = 'ip6tables'
|
||||
elif family == socket.AF_INET:
|
||||
cmd = 'iptables'
|
||||
else:
|
||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
||||
argv = [cmd, '-t', table, '-nL']
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||
for line in p.stdout:
|
||||
if line.startswith('Chain %s ' % name):
|
||||
return True
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def _ipt(family, table, *args):
|
||||
if family == socket.AF_INET6:
|
||||
argv = ['ip6tables', '-t', table] + list(args)
|
||||
elif family == socket.AF_INET:
|
||||
argv = ['iptables', '-t', table] + list(args)
|
||||
else:
|
||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv)
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
_no_ttl_module = False
|
||||
|
||||
|
||||
def _ipt_ttl(family, *args):
|
||||
global _no_ttl_module
|
||||
if not _no_ttl_module:
|
||||
# we avoid infinite loops by generating server-side connections
|
||||
# with ttl 42. This makes the client side not recapture those
|
||||
# connections, in case client == server.
|
||||
try:
|
||||
argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42']
|
||||
_ipt(family, *argsplus)
|
||||
except Fatal:
|
||||
_ipt(family, *args)
|
||||
# we only get here if the non-ttl attempt succeeds
|
||||
log('sshuttle: warning: your iptables is missing '
|
||||
'the ttl module.\n')
|
||||
_no_ttl_module = True
|
||||
else:
|
||||
_ipt(family, *args)
|
||||
|
||||
|
||||
# We name the chain based on the transproxy port number so that it's possible
|
||||
# to run multiple copies of sshuttle at the same time. Of course, the
|
||||
# multiple copies shouldn't have overlapping subnets, or only the most-
|
||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
||||
# "-A OUTPUT").
|
||||
def do_iptables_nat(port, dnsport, family, subnets, udp):
|
||||
# only ipv4 supported with NAT
|
||||
if family != socket.AF_INET:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by nat method'
|
||||
% family_to_string(family))
|
||||
if udp:
|
||||
raise Exception("UDP not supported by nat method")
|
||||
|
||||
table = "nat"
|
||||
|
||||
def ipt(*args):
|
||||
return _ipt(family, table, *args)
|
||||
|
||||
def ipt_ttl(*args):
|
||||
return _ipt_ttl(family, table, *args)
|
||||
|
||||
chain = 'sshuttle-%s' % port
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
if ipt_chain_exists(family, table, chain):
|
||||
nonfatal(ipt, '-D', 'OUTPUT', '-j', chain)
|
||||
nonfatal(ipt, '-D', 'PREROUTING', '-j', chain)
|
||||
nonfatal(ipt, '-F', chain)
|
||||
ipt('-X', chain)
|
||||
|
||||
if subnets or dnsport:
|
||||
ipt('-N', chain)
|
||||
ipt('-F', chain)
|
||||
ipt('-I', 'OUTPUT', '1', '-j', chain)
|
||||
ipt('-I', 'PREROUTING', '1', '-j', chain)
|
||||
|
||||
if subnets:
|
||||
# create new subnet entries. Note that we're sorting in a very
|
||||
# particular order: we need to go from most-specific (largest swidth)
|
||||
# to least-specific, and at any given level of specificity, we want
|
||||
# excludes to come first. That's why the columns are in such a non-
|
||||
# intuitive order.
|
||||
for f, swidth, sexclude, snet \
|
||||
in sorted(subnets, key=lambda s: s[1], reverse=True):
|
||||
if sexclude:
|
||||
ipt('-A', chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-p', 'tcp')
|
||||
else:
|
||||
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-p', 'tcp',
|
||||
'--to-ports', str(port))
|
||||
|
||||
if dnsport:
|
||||
nslist = resolvconf_nameservers()
|
||||
for f, ip in filter(lambda i: i[0] == family, nslist):
|
||||
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s/32' % ip,
|
||||
'-p', 'udp',
|
||||
'--dport', '53',
|
||||
'--to-ports', str(dnsport))
|
||||
|
||||
|
||||
def do_iptables_tproxy(port, dnsport, family, subnets, udp):
|
||||
if family not in [socket.AF_INET, socket.AF_INET6]:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by tproxy method'
|
||||
% family_to_string(family))
|
||||
|
||||
table = "mangle"
|
||||
|
||||
def ipt(*args):
|
||||
return _ipt(family, table, *args)
|
||||
|
||||
def ipt_ttl(*args):
|
||||
return _ipt_ttl(family, table, *args)
|
||||
|
||||
mark_chain = 'sshuttle-m-%s' % port
|
||||
tproxy_chain = 'sshuttle-t-%s' % port
|
||||
divert_chain = 'sshuttle-d-%s' % port
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
if ipt_chain_exists(family, table, mark_chain):
|
||||
ipt('-D', 'OUTPUT', '-j', mark_chain)
|
||||
ipt('-F', mark_chain)
|
||||
ipt('-X', mark_chain)
|
||||
|
||||
if ipt_chain_exists(family, table, tproxy_chain):
|
||||
ipt('-D', 'PREROUTING', '-j', tproxy_chain)
|
||||
ipt('-F', tproxy_chain)
|
||||
ipt('-X', tproxy_chain)
|
||||
|
||||
if ipt_chain_exists(family, table, divert_chain):
|
||||
ipt('-F', divert_chain)
|
||||
ipt('-X', divert_chain)
|
||||
|
||||
if subnets or dnsport:
|
||||
ipt('-N', mark_chain)
|
||||
ipt('-F', mark_chain)
|
||||
ipt('-N', divert_chain)
|
||||
ipt('-F', divert_chain)
|
||||
ipt('-N', tproxy_chain)
|
||||
ipt('-F', tproxy_chain)
|
||||
ipt('-I', 'OUTPUT', '1', '-j', mark_chain)
|
||||
ipt('-I', 'PREROUTING', '1', '-j', tproxy_chain)
|
||||
ipt('-A', divert_chain, '-j', 'MARK', '--set-mark', '1')
|
||||
ipt('-A', divert_chain, '-j', 'ACCEPT')
|
||||
ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
|
||||
'-m', 'tcp', '-p', 'tcp')
|
||||
if subnets and udp:
|
||||
ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
|
||||
'-m', 'udp', '-p', 'udp')
|
||||
|
||||
if dnsport:
|
||||
nslist = resolvconf_nameservers()
|
||||
for f, ip in filter(lambda i: i[0] == family, nslist):
|
||||
ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1',
|
||||
'--dest', '%s/32' % ip,
|
||||
'-m', 'udp', '-p', 'udp', '--dport', '53')
|
||||
ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1',
|
||||
'--dest', '%s/32' % ip,
|
||||
'-m', 'udp', '-p', 'udp', '--dport', '53',
|
||||
'--on-port', str(dnsport))
|
||||
|
||||
if subnets:
|
||||
for f, swidth, sexclude, snet \
|
||||
in sorted(subnets, key=lambda s: s[1], reverse=True):
|
||||
if sexclude:
|
||||
ipt('-A', mark_chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'tcp', '-p', 'tcp')
|
||||
ipt('-A', tproxy_chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'tcp', '-p', 'tcp')
|
||||
else:
|
||||
ipt('-A', mark_chain, '-j', 'MARK',
|
||||
'--set-mark', '1',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'tcp', '-p', 'tcp')
|
||||
ipt('-A', tproxy_chain, '-j', 'TPROXY',
|
||||
'--tproxy-mark', '0x1/0x1',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'tcp', '-p', 'tcp',
|
||||
'--on-port', str(port))
|
||||
|
||||
if sexclude and udp:
|
||||
ipt('-A', mark_chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'udp', '-p', 'udp')
|
||||
ipt('-A', tproxy_chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'udp', '-p', 'udp')
|
||||
elif udp:
|
||||
ipt('-A', mark_chain, '-j', 'MARK',
|
||||
'--set-mark', '1',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'udp', '-p', 'udp')
|
||||
ipt('-A', tproxy_chain, '-j', 'TPROXY',
|
||||
'--tproxy-mark', '0x1/0x1',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
'-m', 'udp', '-p', 'udp',
|
||||
'--on-port', str(port))
|
||||
|
||||
|
||||
def ipfw_rule_exists(n):
|
||||
argv = ['ipfw', 'list']
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||
found = False
|
||||
for line in p.stdout:
|
||||
if line.startswith('%05d ' % n):
|
||||
if not ('ipttl 42' in line
|
||||
or ('skipto %d' % (n + 1)) in line
|
||||
or 'check-state' in line):
|
||||
log('non-sshuttle ipfw rule: %r\n' % line.strip())
|
||||
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
|
||||
found = True
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
return found
|
||||
|
||||
|
||||
_oldctls = {}
|
||||
|
||||
|
||||
def _fill_oldctls(prefix):
|
||||
argv = ['sysctl', prefix]
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||
for line in p.stdout:
|
||||
assert(line[-1] == '\n')
|
||||
(k, v) = line[:-1].split(': ', 1)
|
||||
_oldctls[k] = v
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
if not line:
|
||||
raise Fatal('%r returned no data' % (argv,))
|
||||
|
||||
|
||||
def _sysctl_set(name, val):
|
||||
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
return ssubprocess.call(argv, stdout=open('/dev/null', 'w'))
|
||||
|
||||
|
||||
_changedctls = []
|
||||
|
||||
|
||||
def sysctl_set(name, val, permanent=False):
|
||||
PREFIX = 'net.inet.ip'
|
||||
assert(name.startswith(PREFIX + '.'))
|
||||
val = str(val)
|
||||
if not _oldctls:
|
||||
_fill_oldctls(PREFIX)
|
||||
if not (name in _oldctls):
|
||||
debug1('>> No such sysctl: %r\n' % name)
|
||||
return False
|
||||
oldval = _oldctls[name]
|
||||
if val != oldval:
|
||||
rv = _sysctl_set(name, val)
|
||||
if rv == 0 and permanent:
|
||||
debug1('>> ...saving permanently in /etc/sysctl.conf\n')
|
||||
f = open('/etc/sysctl.conf', 'a')
|
||||
f.write('\n'
|
||||
'# Added by sshuttle\n'
|
||||
'%s=%s\n' % (name, val))
|
||||
f.close()
|
||||
else:
|
||||
_changedctls.append(name)
|
||||
return True
|
||||
|
||||
|
||||
def _udp_unpack(p):
|
||||
src = (socket.inet_ntoa(p[12:16]), struct.unpack('!H', p[20:22])[0])
|
||||
dst = (socket.inet_ntoa(p[16:20]), struct.unpack('!H', p[22:24])[0])
|
||||
return src, dst
|
||||
|
||||
|
||||
def _udp_repack(p, src, dst):
|
||||
addrs = socket.inet_aton(src[0]) + socket.inet_aton(dst[0])
|
||||
ports = struct.pack('!HH', src[1], dst[1])
|
||||
return p[:12] + addrs + ports + p[24:]
|
||||
|
||||
|
||||
_real_dns_server = [None]
|
||||
|
||||
|
||||
def _handle_diversion(divertsock, dnsport):
|
||||
p, tag = divertsock.recvfrom(4096)
|
||||
src, dst = _udp_unpack(p)
|
||||
debug3('got diverted packet from %r to %r\n' % (src, dst))
|
||||
if dst[1] == 53:
|
||||
# outgoing DNS
|
||||
debug3('...packet is a DNS request.\n')
|
||||
_real_dns_server[0] = dst
|
||||
dst = ('127.0.0.1', dnsport)
|
||||
elif src[1] == dnsport:
|
||||
if islocal(src[0], divertsock.family):
|
||||
debug3('...packet is a DNS response.\n')
|
||||
src = _real_dns_server[0]
|
||||
else:
|
||||
log('weird?! unexpected divert from %r to %r\n' % (src, dst))
|
||||
assert(0)
|
||||
newp = _udp_repack(p, src, dst)
|
||||
divertsock.sendto(newp, tag)
|
||||
|
||||
|
||||
def ipfw(*args):
|
||||
argv = ['ipfw', '-q'] + list(args)
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv)
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def do_ipfw(port, dnsport, family, subnets, udp):
|
||||
# IPv6 not supported
|
||||
if family not in [socket.AF_INET, ]:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by ipfw method'
|
||||
% family_to_string(family))
|
||||
if udp:
|
||||
raise Exception("UDP not supported by ipfw method")
|
||||
|
||||
sport = str(port)
|
||||
xsport = str(port + 1)
|
||||
|
||||
# cleanup any existing rules
|
||||
if ipfw_rule_exists(port):
|
||||
ipfw('delete', sport)
|
||||
|
||||
while _changedctls:
|
||||
name = _changedctls.pop()
|
||||
oldval = _oldctls[name]
|
||||
_sysctl_set(name, oldval)
|
||||
|
||||
if subnets or dnsport:
|
||||
sysctl_set('net.inet.ip.fw.enable', 1)
|
||||
changed = sysctl_set('net.inet.ip.scopedroute', 0, permanent=True)
|
||||
if changed:
|
||||
log("\n"
|
||||
" WARNING: ONE-TIME NETWORK DISRUPTION:\n"
|
||||
" =====================================\n"
|
||||
"sshuttle has changed a MacOS kernel setting to work around\n"
|
||||
"a bug in MacOS 10.6. This will cause your network to drop\n"
|
||||
"within 5-10 minutes unless you restart your network\n"
|
||||
"interface (change wireless networks or unplug/plug the\n"
|
||||
"ethernet port) NOW, then restart sshuttle. The fix is\n"
|
||||
"permanent; you only have to do this once.\n\n")
|
||||
sys.exit(1)
|
||||
|
||||
ipfw('add', sport, 'check-state', 'ip',
|
||||
'from', 'any', 'to', 'any')
|
||||
|
||||
if subnets:
|
||||
# create new subnet entries
|
||||
for f, swidth, sexclude, snet \
|
||||
in sorted(subnets, key=lambda s: s[1], reverse=True):
|
||||
if sexclude:
|
||||
ipfw('add', sport, 'skipto', xsport,
|
||||
'tcp',
|
||||
'from', 'any', 'to', '%s/%s' % (snet, swidth))
|
||||
else:
|
||||
ipfw('add', sport, 'fwd', '127.0.0.1,%d' % port,
|
||||
'tcp',
|
||||
'from', 'any', 'to', '%s/%s' % (snet, swidth),
|
||||
'not', 'ipttl', '42', 'keep-state', 'setup')
|
||||
|
||||
# This part is much crazier than it is on Linux, because MacOS (at least
|
||||
# 10.6, and probably other versions, and maybe FreeBSD too) doesn't
|
||||
# correctly fixup the dstip/dstport for UDP packets when it puts them
|
||||
# through a 'fwd' rule. It also doesn't fixup the srcip/srcport in the
|
||||
# response packet. In Linux iptables, all that happens magically for us,
|
||||
# so we just redirect the packets and relax.
|
||||
#
|
||||
# On MacOS, we have to fix the ports ourselves. For that, we use a
|
||||
# 'divert' socket, which receives raw packets and lets us mangle them.
|
||||
#
|
||||
# Here's how it works. Let's say the local DNS server is 1.1.1.1:53,
|
||||
# and the remote DNS server is 2.2.2.2:53, and the local transproxy port
|
||||
# is 10.0.0.1:12300, and a client machine is making a request from
|
||||
# 10.0.0.5:9999. We see a packet like this:
|
||||
# 10.0.0.5:9999 -> 1.1.1.1:53
|
||||
# Since the destip:port matches one of our local nameservers, it will
|
||||
# match a 'fwd' rule, thus grabbing it on the local machine. However,
|
||||
# the local kernel will then see a packet addressed to *:53 and
|
||||
# not know what to do with it; there's nobody listening on port 53. Thus,
|
||||
# we divert it, rewriting it into this:
|
||||
# 10.0.0.5:9999 -> 10.0.0.1:12300
|
||||
# This gets proxied out to the server, which sends it to 2.2.2.2:53,
|
||||
# and the answer comes back, and the proxy sends it back out like this:
|
||||
# 10.0.0.1:12300 -> 10.0.0.5:9999
|
||||
# But that's wrong! The original machine expected an answer from
|
||||
# 1.1.1.1:53, so we have to divert the *answer* and rewrite it:
|
||||
# 1.1.1.1:53 -> 10.0.0.5:9999
|
||||
#
|
||||
# See? Easy stuff.
|
||||
if dnsport:
|
||||
divertsock = socket.socket(socket.AF_INET, socket.SOCK_RAW,
|
||||
IPPROTO_DIVERT)
|
||||
divertsock.bind(('0.0.0.0', port)) # IP field is ignored
|
||||
|
||||
nslist = resolvconf_nameservers()
|
||||
for f, ip in filter(lambda i: i[0] == family, nslist):
|
||||
# relabel and then catch outgoing DNS requests
|
||||
ipfw('add', sport, 'divert', sport,
|
||||
'udp',
|
||||
'from', 'any', 'to', '%s/32' % ip, '53',
|
||||
'not', 'ipttl', '42')
|
||||
# relabel DNS responses
|
||||
ipfw('add', sport, 'divert', sport,
|
||||
'udp',
|
||||
'from', 'any', str(dnsport), 'to', 'any',
|
||||
'not', 'ipttl', '42')
|
||||
|
||||
def do_wait():
|
||||
while 1:
|
||||
r, w, x = select.select([sys.stdin, divertsock], [], [])
|
||||
if divertsock in r:
|
||||
_handle_diversion(divertsock, dnsport)
|
||||
if sys.stdin in r:
|
||||
return
|
||||
else:
|
||||
do_wait = None
|
||||
|
||||
return do_wait
|
||||
|
||||
|
||||
def pfctl(args, stdin = None):
|
||||
argv = ['pfctl'] + list(args.split(" "))
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
|
||||
p = ssubprocess.Popen(argv, stdin = ssubprocess.PIPE,
|
||||
stdout = ssubprocess.PIPE,
|
||||
stderr = ssubprocess.PIPE)
|
||||
o = p.communicate(stdin)
|
||||
if p.returncode:
|
||||
raise Fatal('%r returned %d' % (argv, p.returncode))
|
||||
|
||||
return o
|
||||
|
||||
_pf_context = {'started_by_sshuttle': False, 'Xtoken':''}
|
||||
|
||||
def do_pf(port, dnsport, family, subnets, udp):
|
||||
global _pf_started_by_sshuttle
|
||||
tables = []
|
||||
translating_rules = []
|
||||
filtering_rules = []
|
||||
|
||||
if subnets:
|
||||
includes=[]
|
||||
# If a given subnet is both included and excluded, list the exclusion
|
||||
# first; the table will ignore the second, opposite definition
|
||||
for f, swidth, sexclude, snet \
|
||||
in sorted(subnets, key=lambda s: (s[1], s[2]), reverse=True):
|
||||
includes.append("%s%s/%s" % ("!" if sexclude else "", snet, swidth))
|
||||
|
||||
tables.append('table <forward_subnets> {%s}' % ','.join(includes))
|
||||
translating_rules.append('rdr pass on lo0 proto tcp to <forward_subnets> -> 127.0.0.1 port %r' % port)
|
||||
filtering_rules.append('pass out route-to lo0 inet proto tcp to <forward_subnets> keep state')
|
||||
|
||||
if dnsport:
|
||||
nslist = resolvconf_nameservers()
|
||||
tables.append('table <dns_servers> {%s}' % ','.join([ns[1] for ns in nslist]))
|
||||
translating_rules.append('rdr pass on lo0 proto udp to <dns_servers> port 53 -> 127.0.0.1 port %r' % dnsport)
|
||||
filtering_rules.append('pass out route-to lo0 inet proto udp to <dns_servers> port 53 keep state')
|
||||
|
||||
rules = '\n'.join(tables + translating_rules + filtering_rules) + '\n'
|
||||
|
||||
pf_status = pfctl('-s all')[0]
|
||||
if not '\nrdr-anchor "sshuttle" all\n' in pf_status:
|
||||
pf_add_anchor_rule(PF_RDR, "sshuttle")
|
||||
if not '\nanchor "sshuttle" all\n' in pf_status:
|
||||
pf_add_anchor_rule(PF_PASS, "sshuttle")
|
||||
|
||||
pfctl('-a sshuttle -f /dev/stdin', rules)
|
||||
if sys.platform == "darwin":
|
||||
o = pfctl('-E')
|
||||
_pf_context['Xtoken'] = re.search(r'Token : (.+)', o[1]).group(1)
|
||||
elif 'INFO:\nStatus: Disabled' in pf_status:
|
||||
pfctl('-e')
|
||||
_pf_context['started_by_sshuttle'] = True
|
||||
else:
|
||||
pfctl('-a sshuttle -F all')
|
||||
if sys.platform == "darwin":
|
||||
pfctl('-X %s' % _pf_context['Xtoken'])
|
||||
elif _pf_context['started_by_sshuttle']:
|
||||
pfctl('-d')
|
||||
|
||||
|
||||
def program_exists(name):
|
||||
paths = (os.getenv('PATH') or os.defpath).split(os.pathsep)
|
||||
for p in paths:
|
||||
fn = '%s/%s' % (p, name)
|
||||
if os.path.exists(fn):
|
||||
return not os.path.isdir(fn) and os.access(fn, os.X_OK)
|
||||
|
||||
|
||||
hostmap = {}
|
||||
|
||||
|
||||
def rewrite_etc_hosts(port):
|
||||
HOSTSFILE = '/etc/hosts'
|
||||
BAKFILE = '%s.sbak' % HOSTSFILE
|
||||
APPEND = '# sshuttle-firewall-%d AUTOCREATED' % port
|
||||
old_content = ''
|
||||
st = None
|
||||
try:
|
||||
old_content = open(HOSTSFILE).read()
|
||||
st = os.stat(HOSTSFILE)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if old_content.strip() and not os.path.exists(BAKFILE):
|
||||
os.link(HOSTSFILE, BAKFILE)
|
||||
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
|
||||
f = open(tmpname, 'w')
|
||||
for line in old_content.rstrip().split('\n'):
|
||||
if line.find(APPEND) >= 0:
|
||||
continue
|
||||
f.write('%s\n' % line)
|
||||
for (name, ip) in sorted(hostmap.items()):
|
||||
f.write('%-30s %s\n' % ('%s %s' % (ip, name), APPEND))
|
||||
f.close()
|
||||
|
||||
if st:
|
||||
os.chown(tmpname, st.st_uid, st.st_gid)
|
||||
os.chmod(tmpname, st.st_mode)
|
||||
else:
|
||||
os.chown(tmpname, 0, 0)
|
||||
os.chmod(tmpname, 0644)
|
||||
os.rename(tmpname, HOSTSFILE)
|
||||
|
||||
|
||||
def restore_etc_hosts(port):
|
||||
global hostmap
|
||||
hostmap = {}
|
||||
rewrite_etc_hosts(port)
|
||||
|
||||
|
||||
# This are some classes and functions used to support pf in yosemite.
|
||||
class pf_state_xport(Union):
|
||||
_fields_ = [("port", c_uint16),
|
||||
("call_id", c_uint16),
|
||||
("spi", c_uint32)]
|
||||
|
||||
class pf_addr(Structure):
|
||||
class _pfa(Union):
|
||||
_fields_ = [("v4", c_uint32), # struct in_addr
|
||||
("v6", c_uint32 * 4), # struct in6_addr
|
||||
("addr8", c_uint8 * 16),
|
||||
("addr16", c_uint16 * 8),
|
||||
("addr32", c_uint32 * 4)]
|
||||
|
||||
_fields_ = [("pfa", _pfa)]
|
||||
_anonymous_ = ("pfa",)
|
||||
|
||||
class pfioc_natlook(Structure):
|
||||
_fields_ = [("saddr", pf_addr),
|
||||
("daddr", pf_addr),
|
||||
("rsaddr", pf_addr),
|
||||
("rdaddr", pf_addr),
|
||||
("sxport", pf_state_xport),
|
||||
("dxport", pf_state_xport),
|
||||
("rsxport", pf_state_xport),
|
||||
("rdxport", pf_state_xport),
|
||||
("af", c_uint8), # sa_family_t
|
||||
("proto", c_uint8),
|
||||
("proto_variant", c_uint8),
|
||||
("direction", c_uint8)]
|
||||
|
||||
pfioc_rule = c_char * 3104 # sizeof(struct pfioc_rule)
|
||||
|
||||
pfioc_pooladdr = c_char * 1136 # sizeof(struct pfioc_pooladdr)
|
||||
|
||||
MAXPATHLEN = 1024
|
||||
|
||||
DIOCNATLOOK = ((0x40000000L | 0x80000000L) | ((sizeof(pfioc_natlook) & 0x1fff) << 16) | ((ord('D')) << 8) | (23))
|
||||
DIOCCHANGERULE = ((0x40000000L | 0x80000000L) | ((sizeof(pfioc_rule) & 0x1fff) << 16) | ((ord('D')) << 8) | (26))
|
||||
DIOCBEGINADDRS = ((0x40000000L | 0x80000000L) | ((sizeof(pfioc_pooladdr) & 0x1fff) << 16) | ((ord('D')) << 8) | (51))
|
||||
|
||||
PF_CHANGE_ADD_TAIL = 2
|
||||
PF_CHANGE_GET_TICKET = 6
|
||||
|
||||
PF_PASS = 0
|
||||
PF_RDR = 8
|
||||
|
||||
PF_OUT = 2
|
||||
|
||||
_pf_fd = None
|
||||
|
||||
def pf_get_dev():
|
||||
global _pf_fd
|
||||
if _pf_fd == None:
|
||||
_pf_fd = os.open('/dev/pf', os.O_RDWR)
|
||||
|
||||
return _pf_fd
|
||||
|
||||
def pf_query_nat(family, proto, src_ip, src_port, dst_ip, dst_port):
|
||||
[proto, family, src_port, dst_port] = [int(v) for v in [proto, family, src_port, dst_port]]
|
||||
|
||||
length = 4 if family == socket.AF_INET else 16
|
||||
|
||||
pnl = pfioc_natlook()
|
||||
pnl.proto = proto
|
||||
pnl.direction = PF_OUT
|
||||
pnl.af = family
|
||||
memmove(addressof(pnl.saddr), socket.inet_pton(pnl.af, src_ip), length)
|
||||
pnl.sxport.port = socket.htons(src_port)
|
||||
memmove(addressof(pnl.daddr), socket.inet_pton(pnl.af, dst_ip), length)
|
||||
pnl.dxport.port = socket.htons(dst_port)
|
||||
|
||||
ioctl(pf_get_dev(), DIOCNATLOOK, (c_char * sizeof(pnl)).from_address(addressof(pnl)))
|
||||
|
||||
ip = socket.inet_ntop(pnl.af, (c_char * length).from_address(addressof(pnl.rdaddr)))
|
||||
port = socket.ntohs(pnl.rdxport.port)
|
||||
return (ip, port)
|
||||
|
||||
def pf_add_anchor_rule(type, name):
|
||||
ACTION_OFFSET = 0
|
||||
POOL_TICKET_OFFSET = 8
|
||||
ANCHOR_CALL_OFFSET = 1040
|
||||
RULE_ACTION_OFFSET = 3068
|
||||
|
||||
pr = pfioc_rule()
|
||||
ppa = pfioc_pooladdr()
|
||||
|
||||
ioctl(pf_get_dev(), DIOCBEGINADDRS, ppa)
|
||||
|
||||
memmove(addressof(pr) + POOL_TICKET_OFFSET, ppa[4:8], 4) #pool_ticket
|
||||
memmove(addressof(pr) + ANCHOR_CALL_OFFSET, name, min(MAXPATHLEN, len(name))) #anchor_call = name
|
||||
memmove(addressof(pr) + RULE_ACTION_OFFSET, struct.pack('I', type), 4) #rule.action = type
|
||||
|
||||
memmove(addressof(pr) + ACTION_OFFSET, struct.pack('I', PF_CHANGE_GET_TICKET), 4) #action = PF_CHANGE_GET_TICKET
|
||||
ioctl(pf_get_dev(), DIOCCHANGERULE, pr)
|
||||
|
||||
memmove(addressof(pr) + ACTION_OFFSET, struct.pack('I', PF_CHANGE_ADD_TAIL), 4) #action = PF_CHANGE_ADD_TAIL
|
||||
ioctl(pf_get_dev(), DIOCCHANGERULE, pr)
|
||||
|
||||
|
||||
# This is some voodoo for setting up the kernel's transparent
|
||||
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
|
||||
# otherwise we delete it, then make them from scratch.
|
||||
#
|
||||
# This code is supposed to clean up after itself by deleting its rules on
|
||||
# exit. In case that fails, it's not the end of the world; future runs will
|
||||
# supercede it in the transproxy list, at least, so the leftover rules
|
||||
# are hopefully harmless.
|
||||
def main(port_v6, port_v4, dnsport_v6, dnsport_v4, method, udp, syslog):
|
||||
assert(port_v6 >= 0)
|
||||
assert(port_v6 <= 65535)
|
||||
assert(port_v4 >= 0)
|
||||
assert(port_v4 <= 65535)
|
||||
assert(dnsport_v6 >= 0)
|
||||
assert(dnsport_v6 <= 65535)
|
||||
assert(dnsport_v4 >= 0)
|
||||
assert(dnsport_v4 <= 65535)
|
||||
|
||||
if os.getuid() != 0:
|
||||
raise Fatal('you must be root (or enable su/sudo) to set the firewall')
|
||||
|
||||
if method == "auto":
|
||||
if program_exists('ipfw'):
|
||||
method = "ipfw"
|
||||
elif program_exists('iptables'):
|
||||
method = "nat"
|
||||
elif program_exists('pfctl'):
|
||||
method = "pf"
|
||||
else:
|
||||
raise Fatal("can't find either ipfw, iptables or pfctl; check your PATH")
|
||||
|
||||
if method == "nat":
|
||||
do_it = do_iptables_nat
|
||||
elif method == "tproxy":
|
||||
do_it = do_iptables_tproxy
|
||||
elif method == "ipfw":
|
||||
do_it = do_ipfw
|
||||
elif method == "pf":
|
||||
do_it = do_pf
|
||||
else:
|
||||
raise Exception('Unknown method "%s"' % method)
|
||||
|
||||
# because of limitations of the 'su' command, the *real* stdin/stdout
|
||||
# are both attached to stdout initially. Clone stdout into stdin so we
|
||||
# can read from it.
|
||||
os.dup2(1, 0)
|
||||
|
||||
if syslog:
|
||||
ssyslog.start_syslog()
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
debug1('firewall manager ready method %s.\n' % method)
|
||||
sys.stdout.write('READY %s\n' % method)
|
||||
sys.stdout.flush()
|
||||
|
||||
# don't disappear if our controlling terminal or stdout/stderr
|
||||
# disappears; we still have to clean up.
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
# ctrl-c shouldn't be passed along to me. When the main sshuttle dies,
|
||||
# I'll die automatically.
|
||||
os.setsid()
|
||||
|
||||
# we wait until we get some input before creating the rules. That way,
|
||||
# sshuttle can launch us as early as possible (and get sudo password
|
||||
# authentication as early in the startup process as possible).
|
||||
line = sys.stdin.readline(128)
|
||||
if not line:
|
||||
return # parent died; nothing to do
|
||||
|
||||
subnets = []
|
||||
if line != 'ROUTES\n':
|
||||
raise Fatal('firewall: expected ROUTES but got %r' % line)
|
||||
while 1:
|
||||
line = sys.stdin.readline(128)
|
||||
if not line:
|
||||
raise Fatal('firewall: expected route but got %r' % line)
|
||||
elif line == 'GO\n':
|
||||
break
|
||||
try:
|
||||
(family, width, exclude, ip) = line.strip().split(',', 3)
|
||||
except:
|
||||
raise Fatal('firewall: expected route or GO but got %r' % line)
|
||||
subnets.append((int(family), int(width), bool(int(exclude)), ip))
|
||||
|
||||
try:
|
||||
if line:
|
||||
debug1('firewall manager: starting transproxy.\n')
|
||||
|
||||
subnets_v6 = filter(lambda i: i[0] == socket.AF_INET6, subnets)
|
||||
if port_v6:
|
||||
do_wait = do_it(
|
||||
port_v6, dnsport_v6, socket.AF_INET6, subnets_v6, udp)
|
||||
elif len(subnets_v6) > 0:
|
||||
debug1("IPv6 subnets defined but IPv6 disabled\n")
|
||||
|
||||
subnets_v4 = filter(lambda i: i[0] == socket.AF_INET, subnets)
|
||||
if port_v4:
|
||||
do_wait = do_it(
|
||||
port_v4, dnsport_v4, socket.AF_INET, subnets_v4, udp)
|
||||
elif len(subnets_v4) > 0:
|
||||
debug1('IPv4 subnets defined but IPv4 disabled\n')
|
||||
|
||||
sys.stdout.write('STARTED\n')
|
||||
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except IOError:
|
||||
# the parent process died for some reason; he's surely been loud
|
||||
# enough, so no reason to report another error
|
||||
return
|
||||
|
||||
# Now we wait until EOF or any other kind of exception. We need
|
||||
# to stay running so that we don't need a *second* password
|
||||
# authentication at shutdown time - that cleanup is important!
|
||||
while 1:
|
||||
if do_wait:
|
||||
do_wait()
|
||||
line = sys.stdin.readline(128)
|
||||
if line.startswith('HOST '):
|
||||
(name, ip) = line[5:].strip().split(',', 1)
|
||||
hostmap[name] = ip
|
||||
rewrite_etc_hosts(port_v6 or port_v4)
|
||||
elif line.startswith('QUERY_PF_NAT '):
|
||||
try:
|
||||
dst = pf_query_nat(*(line[13:].split(',')))
|
||||
sys.stdout.write('QUERY_PF_NAT_SUCCESS %s,%r\n' % dst)
|
||||
except IOError, e:
|
||||
sys.stdout.write('QUERY_PF_NAT_FAILURE %s\n' % e)
|
||||
|
||||
sys.stdout.flush()
|
||||
elif line:
|
||||
raise Fatal('expected EOF, got %r' % line)
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
try:
|
||||
debug1('firewall manager: undoing changes.\n')
|
||||
except:
|
||||
pass
|
||||
if port_v6:
|
||||
do_it(port_v6, 0, socket.AF_INET6, [], udp)
|
||||
if port_v4:
|
||||
do_it(port_v4, 0, socket.AF_INET, [], udp)
|
||||
restore_etc_hosts(port_v6 or port_v4)
|
91
src/helpers.py
Normal file
91
src/helpers.py
Normal file
@ -0,0 +1,91 @@
|
||||
import sys
|
||||
import socket
|
||||
import errno
|
||||
|
||||
logprefix = ''
|
||||
verbose = 0
|
||||
|
||||
|
||||
def log(s):
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.write(logprefix + s)
|
||||
sys.stderr.flush()
|
||||
except IOError:
|
||||
# this could happen if stderr gets forcibly disconnected, eg. because
|
||||
# our tty closes. That sucks, but it's no reason to abort the program.
|
||||
pass
|
||||
|
||||
|
||||
def debug1(s):
|
||||
if verbose >= 1:
|
||||
log(s)
|
||||
|
||||
|
||||
def debug2(s):
|
||||
if verbose >= 2:
|
||||
log(s)
|
||||
|
||||
|
||||
def debug3(s):
|
||||
if verbose >= 3:
|
||||
log(s)
|
||||
|
||||
|
||||
class Fatal(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def list_contains_any(l, sub):
|
||||
for i in sub:
|
||||
if i in l:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def resolvconf_nameservers():
|
||||
l = []
|
||||
for line in open('/etc/resolv.conf'):
|
||||
words = line.lower().split()
|
||||
if len(words) >= 2 and words[0] == 'nameserver':
|
||||
if ':' in words[1]:
|
||||
l.append((socket.AF_INET6, words[1]))
|
||||
else:
|
||||
l.append((socket.AF_INET, words[1]))
|
||||
return l
|
||||
|
||||
|
||||
def resolvconf_random_nameserver():
|
||||
l = resolvconf_nameservers()
|
||||
if l:
|
||||
if len(l) > 1:
|
||||
# don't import this unless we really need it
|
||||
import random
|
||||
random.shuffle(l)
|
||||
return l[0]
|
||||
else:
|
||||
return (socket.AF_INET, '127.0.0.1')
|
||||
|
||||
|
||||
def islocal(ip, family):
|
||||
sock = socket.socket(family)
|
||||
try:
|
||||
try:
|
||||
sock.bind((ip, 0))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.EADDRNOTAVAIL:
|
||||
return False # not a local IP
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
sock.close()
|
||||
return True # it's a local IP, or there would have been an error
|
||||
|
||||
|
||||
def family_to_string(family):
|
||||
if family == socket.AF_INET6:
|
||||
return "AF_INET6"
|
||||
elif family == socket.AF_INET:
|
||||
return "AF_INET"
|
||||
else:
|
||||
return str(family)
|
287
src/hostwatch.py
Normal file
287
src/hostwatch.py
Normal file
@ -0,0 +1,287 @@
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
import select
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
if not globals().get('skip_imports'):
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers
|
||||
from helpers import log, debug1, debug2, debug3
|
||||
|
||||
POLL_TIME = 60 * 15
|
||||
NETSTAT_POLL_TIME = 30
|
||||
CACHEFILE = os.path.expanduser('~/.sshuttle.hosts')
|
||||
|
||||
|
||||
_nmb_ok = True
|
||||
_smb_ok = True
|
||||
hostnames = {}
|
||||
queue = {}
|
||||
try:
|
||||
null = open('/dev/null', 'wb')
|
||||
except IOError, e:
|
||||
log('warning: %s\n' % e)
|
||||
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
|
||||
|
||||
|
||||
def _is_ip(s):
|
||||
return re.match(r'\d+\.\d+\.\d+\.\d+$', s)
|
||||
|
||||
|
||||
def write_host_cache():
|
||||
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
|
||||
try:
|
||||
f = open(tmpname, 'wb')
|
||||
for name, ip in sorted(hostnames.items()):
|
||||
f.write('%s,%s\n' % (name, ip))
|
||||
f.close()
|
||||
os.rename(tmpname, CACHEFILE)
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmpname)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def read_host_cache():
|
||||
try:
|
||||
f = open(CACHEFILE)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
for line in f:
|
||||
words = line.strip().split(',')
|
||||
if len(words) == 2:
|
||||
(name, ip) = words
|
||||
name = re.sub(r'[^-\w]', '-', name).strip()
|
||||
ip = re.sub(r'[^0-9.]', '', ip).strip()
|
||||
if name and ip:
|
||||
found_host(name, ip)
|
||||
|
||||
|
||||
def found_host(hostname, ip):
|
||||
hostname = re.sub(r'\..*', '', hostname)
|
||||
hostname = re.sub(r'[^-\w]', '_', hostname)
|
||||
if (ip.startswith('127.') or ip.startswith('255.')
|
||||
or hostname == 'localhost'):
|
||||
return
|
||||
oldip = hostnames.get(hostname)
|
||||
if oldip != ip:
|
||||
hostnames[hostname] = ip
|
||||
debug1('Found: %s: %s\n' % (hostname, ip))
|
||||
sys.stdout.write('%s,%s\n' % (hostname, ip))
|
||||
write_host_cache()
|
||||
|
||||
|
||||
def _check_etc_hosts():
|
||||
debug2(' > hosts\n')
|
||||
for line in open('/etc/hosts'):
|
||||
line = re.sub(r'#.*', '', line)
|
||||
words = line.strip().split()
|
||||
if not words:
|
||||
continue
|
||||
ip = words[0]
|
||||
names = words[1:]
|
||||
if _is_ip(ip):
|
||||
debug3('< %s %r\n' % (ip, names))
|
||||
for n in names:
|
||||
check_host(n)
|
||||
found_host(n, ip)
|
||||
|
||||
|
||||
def _check_revdns(ip):
|
||||
debug2(' > rev: %s\n' % ip)
|
||||
try:
|
||||
r = socket.gethostbyaddr(ip)
|
||||
debug3('< %s\n' % r[0])
|
||||
check_host(r[0])
|
||||
found_host(r[0], ip)
|
||||
except socket.herror:
|
||||
pass
|
||||
|
||||
|
||||
def _check_dns(hostname):
|
||||
debug2(' > dns: %s\n' % hostname)
|
||||
try:
|
||||
ip = socket.gethostbyname(hostname)
|
||||
debug3('< %s\n' % ip)
|
||||
check_host(ip)
|
||||
found_host(hostname, ip)
|
||||
except socket.gaierror:
|
||||
pass
|
||||
|
||||
|
||||
def _check_netstat():
|
||||
debug2(' > netstat\n')
|
||||
argv = ['netstat', '-n']
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
content = p.stdout.read()
|
||||
p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
return
|
||||
|
||||
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
|
||||
debug3('< %s\n' % ip)
|
||||
check_host(ip)
|
||||
|
||||
|
||||
def _check_smb(hostname):
|
||||
return
|
||||
global _smb_ok
|
||||
if not _smb_ok:
|
||||
return
|
||||
argv = ['smbclient', '-U', '%', '-L', hostname]
|
||||
debug2(' > smb: %s\n' % hostname)
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
lines = p.stdout.readlines()
|
||||
p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
_smb_ok = False
|
||||
return
|
||||
|
||||
lines.reverse()
|
||||
|
||||
# junk at top
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if re.match(r'Server\s+', line):
|
||||
break
|
||||
|
||||
# server list section:
|
||||
# Server Comment
|
||||
# ------ -------
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if not line or re.match(r'-+\s+-+', line):
|
||||
continue
|
||||
if re.match(r'Workgroup\s+Master', line):
|
||||
break
|
||||
words = line.split()
|
||||
hostname = words[0].lower()
|
||||
debug3('< %s\n' % hostname)
|
||||
check_host(hostname)
|
||||
|
||||
# workgroup list section:
|
||||
# Workgroup Master
|
||||
# --------- ------
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if re.match(r'-+\s+', line):
|
||||
continue
|
||||
if not line:
|
||||
break
|
||||
words = line.split()
|
||||
(workgroup, hostname) = (words[0].lower(), words[1].lower())
|
||||
debug3('< group(%s) -> %s\n' % (workgroup, hostname))
|
||||
check_host(hostname)
|
||||
check_workgroup(workgroup)
|
||||
|
||||
if lines:
|
||||
assert(0)
|
||||
|
||||
|
||||
def _check_nmb(hostname, is_workgroup, is_master):
|
||||
return
|
||||
global _nmb_ok
|
||||
if not _nmb_ok:
|
||||
return
|
||||
argv = ['nmblookup'] + ['-M'] * is_master + ['--', hostname]
|
||||
debug2(' > n%d%d: %s\n' % (is_workgroup, is_master, hostname))
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
lines = p.stdout.readlines()
|
||||
rv = p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
_nmb_ok = False
|
||||
return
|
||||
if rv:
|
||||
log('%r returned %d\n' % (argv, rv))
|
||||
return
|
||||
for line in lines:
|
||||
m = re.match(r'(\d+\.\d+\.\d+\.\d+) (\w+)<\w\w>\n', line)
|
||||
if m:
|
||||
g = m.groups()
|
||||
(ip, name) = (g[0], g[1].lower())
|
||||
debug3('< %s -> %s\n' % (name, ip))
|
||||
if is_workgroup:
|
||||
_enqueue(_check_smb, ip)
|
||||
else:
|
||||
found_host(name, ip)
|
||||
check_host(name)
|
||||
|
||||
|
||||
def check_host(hostname):
|
||||
if _is_ip(hostname):
|
||||
_enqueue(_check_revdns, hostname)
|
||||
else:
|
||||
_enqueue(_check_dns, hostname)
|
||||
_enqueue(_check_smb, hostname)
|
||||
_enqueue(_check_nmb, hostname, False, False)
|
||||
|
||||
|
||||
def check_workgroup(hostname):
|
||||
_enqueue(_check_nmb, hostname, True, False)
|
||||
_enqueue(_check_nmb, hostname, True, True)
|
||||
|
||||
|
||||
def _enqueue(op, *args):
|
||||
t = (op, args)
|
||||
if queue.get(t) is None:
|
||||
queue[t] = 0
|
||||
|
||||
|
||||
def _stdin_still_ok(timeout):
|
||||
r, w, x = select.select([sys.stdin.fileno()], [], [], timeout)
|
||||
if r:
|
||||
b = os.read(sys.stdin.fileno(), 4096)
|
||||
if not b:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def hw_main(seed_hosts):
|
||||
if helpers.verbose >= 2:
|
||||
helpers.logprefix = 'HH: '
|
||||
else:
|
||||
helpers.logprefix = 'hostwatch: '
|
||||
|
||||
read_host_cache()
|
||||
|
||||
_enqueue(_check_etc_hosts)
|
||||
_enqueue(_check_netstat)
|
||||
check_host('localhost')
|
||||
check_host(socket.gethostname())
|
||||
check_workgroup('workgroup')
|
||||
check_workgroup('-')
|
||||
for h in seed_hosts:
|
||||
check_host(h)
|
||||
|
||||
while 1:
|
||||
now = time.time()
|
||||
for t, last_polled in queue.items():
|
||||
(op, args) = t
|
||||
if not _stdin_still_ok(0):
|
||||
break
|
||||
maxtime = POLL_TIME
|
||||
if op == _check_netstat:
|
||||
maxtime = NETSTAT_POLL_TIME
|
||||
if now - last_polled > maxtime:
|
||||
queue[t] = time.time()
|
||||
op(*args)
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except IOError:
|
||||
break
|
||||
|
||||
# FIXME: use a smarter timeout based on oldest last_polled
|
||||
if not _stdin_still_ok(1):
|
||||
break
|
230
src/main.py
Normal file
230
src/main.py
Normal file
@ -0,0 +1,230 @@
|
||||
import sys
|
||||
import re
|
||||
import socket
|
||||
import helpers
|
||||
import options
|
||||
import client
|
||||
import server
|
||||
import firewall
|
||||
import hostwatch
|
||||
from helpers import log, Fatal
|
||||
|
||||
|
||||
# 1.2.3.4/5 or just 1.2.3.4
|
||||
def parse_subnet4(s):
|
||||
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP subnet format' % s)
|
||||
(a, b, c, d, width) = m.groups()
|
||||
(a, b, c, d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
|
||||
if width is None:
|
||||
width = 32
|
||||
else:
|
||||
width = int(width)
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
|
||||
if width > 32:
|
||||
raise Fatal('*/%d is greater than the maximum of 32' % width)
|
||||
return(socket.AF_INET, '%d.%d.%d.%d' % (a, b, c, d), width)
|
||||
|
||||
|
||||
# 1:2::3/64 or just 1:2::3
|
||||
def parse_subnet6(s):
|
||||
m = re.match(r'(?:([a-fA-F\d:]+))?(?:/(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP subnet format' % s)
|
||||
(net, width) = m.groups()
|
||||
if width is None:
|
||||
width = 128
|
||||
else:
|
||||
width = int(width)
|
||||
if width > 128:
|
||||
raise Fatal('*/%d is greater than the maximum of 128' % width)
|
||||
return(socket.AF_INET6, net, width)
|
||||
|
||||
|
||||
# Subnet file, supporting empty lines and hash-started comment lines
|
||||
def parse_subnet_file(s):
|
||||
try:
|
||||
handle = open(s, 'r')
|
||||
except OSError:
|
||||
raise Fatal('Unable to open subnet file: %s' % s)
|
||||
|
||||
raw_config_lines = handle.readlines()
|
||||
config_lines = []
|
||||
for line_no, line in enumerate(raw_config_lines):
|
||||
line = line.strip()
|
||||
if len(line) == 0:
|
||||
continue
|
||||
if line[0] == '#':
|
||||
continue
|
||||
config_lines.append(line)
|
||||
|
||||
return config_lines
|
||||
|
||||
|
||||
# list of:
|
||||
# 1.2.3.4/5 or just 1.2.3.4
|
||||
# 1:2::3/64 or just 1:2::3
|
||||
def parse_subnets(subnets_str):
|
||||
subnets = []
|
||||
for s in subnets_str:
|
||||
if ':' in s:
|
||||
subnet = parse_subnet6(s)
|
||||
else:
|
||||
subnet = parse_subnet4(s)
|
||||
subnets.append(subnet)
|
||||
return subnets
|
||||
|
||||
|
||||
# 1.2.3.4:567 or just 1.2.3.4 or just 567
|
||||
def parse_ipport4(s):
|
||||
s = str(s)
|
||||
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP:port format' % s)
|
||||
(a, b, c, d, port) = m.groups()
|
||||
(a, b, c, d, port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
|
||||
int(port or 0))
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
|
||||
if port > 65535:
|
||||
raise Fatal('*:%d is greater than the maximum of 65535' % port)
|
||||
if a is None:
|
||||
a = b = c = d = 0
|
||||
return ('%d.%d.%d.%d' % (a, b, c, d), port)
|
||||
|
||||
|
||||
# [1:2::3]:456 or [1:2::3] or 456
|
||||
def parse_ipport6(s):
|
||||
s = str(s)
|
||||
m = re.match(r'(?:\[([^]]*)])?(?::)?(?:(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%s is not a valid IP:port format' % s)
|
||||
(ip, port) = m.groups()
|
||||
(ip, port) = (ip or '::', int(port or 0))
|
||||
return (ip, port)
|
||||
|
||||
|
||||
optspec = """
|
||||
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
|
||||
sshuttle --server
|
||||
sshuttle --firewall <port> <subnets...>
|
||||
sshuttle --hostwatch
|
||||
--
|
||||
l,listen= transproxy to this ip address and port number
|
||||
H,auto-hosts scan for remote hostnames and update local /etc/hosts
|
||||
N,auto-nets automatically determine subnets to route
|
||||
dns capture local DNS requests and forward to the remote DNS server
|
||||
method= auto, nat, tproxy, pf or ipfw
|
||||
python= path to python interpreter on the remote server
|
||||
r,remote= ssh hostname (and optional username) of remote sshuttle server
|
||||
x,exclude= exclude this subnet (can be used more than once)
|
||||
X,exclude-from= exclude the subnets in a file (whitespace separated)
|
||||
v,verbose increase debug message verbosity
|
||||
e,ssh-cmd= the command to use to connect to the remote [ssh]
|
||||
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
|
||||
no-latency-control sacrifice latency to improve bandwidth benchmarks
|
||||
wrap= restart counting channel numbers after this number (for testing)
|
||||
D,daemon run in the background as a daemon
|
||||
s,subnets= file where the subnets are stored, instead of on the command line
|
||||
syslog send log messages to syslog (default if you use --daemon)
|
||||
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
|
||||
server (internal use only)
|
||||
firewall (internal use only)
|
||||
hostwatch (internal use only)
|
||||
"""
|
||||
o = options.Options(optspec)
|
||||
(opt, flags, extra) = o.parse(sys.argv[2:])
|
||||
|
||||
if opt.daemon:
|
||||
opt.syslog = 1
|
||||
if opt.wrap:
|
||||
import ssnet
|
||||
ssnet.MAX_CHANNEL = int(opt.wrap)
|
||||
helpers.verbose = opt.verbose
|
||||
|
||||
try:
|
||||
if opt.server:
|
||||
if len(extra) != 0:
|
||||
o.fatal('no arguments expected')
|
||||
server.latency_control = opt.latency_control
|
||||
sys.exit(server.main())
|
||||
elif opt.firewall:
|
||||
if len(extra) != 6:
|
||||
o.fatal('exactly six arguments expected')
|
||||
sys.exit(firewall.main(int(extra[0]), int(extra[1]),
|
||||
int(extra[2]), int(extra[3]),
|
||||
extra[4], int(extra[5]), opt.syslog))
|
||||
elif opt.hostwatch:
|
||||
sys.exit(hostwatch.hw_main(extra))
|
||||
else:
|
||||
if len(extra) < 1 and not opt.auto_nets and not opt.subnets:
|
||||
o.fatal('at least one subnet, subnet file, or -N expected')
|
||||
includes = extra
|
||||
excludes = ['127.0.0.0/8']
|
||||
for k, v in flags:
|
||||
if k in ('-x', '--exclude'):
|
||||
excludes.append(v)
|
||||
if k in ('-X', '--exclude-from'):
|
||||
excludes += open(v).read().split()
|
||||
remotename = opt.remote
|
||||
if remotename == '' or remotename == '-':
|
||||
remotename = None
|
||||
if opt.seed_hosts and not opt.auto_hosts:
|
||||
o.fatal('--seed-hosts only works if you also use -H')
|
||||
if opt.seed_hosts:
|
||||
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
||||
elif opt.auto_hosts:
|
||||
sh = []
|
||||
else:
|
||||
sh = None
|
||||
if opt.subnets:
|
||||
includes = parse_subnet_file(opt.subnets)
|
||||
if not opt.method:
|
||||
method = "auto"
|
||||
elif opt.method in ["auto", "nat", "tproxy", "ipfw", "pf"]:
|
||||
method = opt.method
|
||||
else:
|
||||
o.fatal("method %s not supported" % opt.method)
|
||||
if not opt.listen:
|
||||
if opt.method == "tproxy":
|
||||
ipport_v6 = parse_ipport6('[::1]:0')
|
||||
else:
|
||||
ipport_v6 = None
|
||||
ipport_v4 = parse_ipport4('127.0.0.1:0')
|
||||
else:
|
||||
ipport_v6 = None
|
||||
ipport_v4 = None
|
||||
list = opt.listen.split(",")
|
||||
for ip in list:
|
||||
if '[' in ip and ']' in ip and opt.method == "tproxy":
|
||||
ipport_v6 = parse_ipport6(ip)
|
||||
else:
|
||||
ipport_v4 = parse_ipport4(ip)
|
||||
return_code = client.main(ipport_v6, ipport_v4,
|
||||
opt.ssh_cmd,
|
||||
remotename,
|
||||
opt.python,
|
||||
opt.latency_control,
|
||||
opt.dns,
|
||||
method,
|
||||
sh,
|
||||
opt.auto_nets,
|
||||
parse_subnets(includes),
|
||||
parse_subnets(excludes),
|
||||
opt.syslog, opt.daemon, opt.pidfile)
|
||||
|
||||
if return_code == 0:
|
||||
log('Normal exit code, exiting...')
|
||||
else:
|
||||
log('Abnormal exit code detected, failing...' % return_code)
|
||||
sys.exit(return_code)
|
||||
|
||||
except Fatal, e:
|
||||
log('fatal: %s\n' % e)
|
||||
sys.exit(99)
|
||||
except KeyboardInterrupt:
|
||||
log('\n')
|
||||
log('Keyboard interrupt: exiting.\n')
|
||||
sys.exit(1)
|
215
src/options.py
Normal file
215
src/options.py
Normal file
@ -0,0 +1,215 @@
|
||||
"""Command-line options parser.
|
||||
With the help of an options spec string, easily parse command-line options.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import textwrap
|
||||
import getopt
|
||||
import re
|
||||
import struct
|
||||
|
||||
|
||||
class OptDict:
|
||||
|
||||
def __init__(self):
|
||||
self._opts = {}
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
k = k[3:]
|
||||
v = not v
|
||||
self._opts[k] = v
|
||||
|
||||
def __getitem__(self, k):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return not self._opts[k[3:]]
|
||||
return self._opts[k]
|
||||
|
||||
def __getattr__(self, k):
|
||||
return self[k]
|
||||
|
||||
|
||||
def _default_onabort(msg):
|
||||
sys.exit(97)
|
||||
|
||||
|
||||
def _intify(v):
|
||||
try:
|
||||
vv = int(v or '')
|
||||
if str(vv) == v:
|
||||
return vv
|
||||
except ValueError:
|
||||
pass
|
||||
return v
|
||||
|
||||
|
||||
def _atoi(v):
|
||||
try:
|
||||
return int(v or 0)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def _remove_negative_kv(k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return k[3:], not v
|
||||
return k, v
|
||||
|
||||
|
||||
def _remove_negative_k(k):
|
||||
return _remove_negative_kv(k, None)[0]
|
||||
|
||||
|
||||
def _tty_width():
|
||||
if not hasattr(sys.stderr, "fileno"):
|
||||
return _atoi(os.environ.get('WIDTH')) or 70
|
||||
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||
try:
|
||||
import fcntl
|
||||
import termios
|
||||
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
|
||||
except (IOError, ImportError):
|
||||
return _atoi(os.environ.get('WIDTH')) or 70
|
||||
(ysize, xsize, ypix, xpix) = struct.unpack('HHHH', s)
|
||||
return xsize or 70
|
||||
|
||||
|
||||
class Options:
|
||||
|
||||
"""Option parser.
|
||||
When constructed, two strings are mandatory. The first one is the command
|
||||
name showed before error messages. The second one is a string called an
|
||||
optspec that specifies the synopsis and option flags and their description.
|
||||
For more information about optspecs, consult the bup-options(1) man page.
|
||||
|
||||
Two optional arguments specify an alternative parsing function and an
|
||||
alternative behaviour on abort (after having output the usage string).
|
||||
|
||||
By default, the parser function is getopt.gnu_getopt, and the abort
|
||||
behaviour is to exit the program.
|
||||
"""
|
||||
|
||||
def __init__(self, optspec, optfunc=getopt.gnu_getopt,
|
||||
onabort=_default_onabort):
|
||||
self.optspec = optspec
|
||||
self._onabort = onabort
|
||||
self.optfunc = optfunc
|
||||
self._aliases = {}
|
||||
self._shortopts = 'h?'
|
||||
self._longopts = ['help']
|
||||
self._hasparms = {}
|
||||
self._defaults = {}
|
||||
self._usagestr = self._gen_usage()
|
||||
|
||||
def _gen_usage(self):
|
||||
out = []
|
||||
lines = self.optspec.strip().split('\n')
|
||||
lines.reverse()
|
||||
first_syn = True
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l == '--':
|
||||
break
|
||||
out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l))
|
||||
first_syn = False
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l.startswith(' '):
|
||||
out.append('%s%s\n' % (last_was_option and '\n' or '',
|
||||
l.lstrip()))
|
||||
last_was_option = False
|
||||
elif l:
|
||||
(flags, extra) = l.split(' ', 1)
|
||||
extra = extra.strip()
|
||||
if flags.endswith('='):
|
||||
flags = flags[:-1]
|
||||
has_parm = 1
|
||||
else:
|
||||
has_parm = 0
|
||||
g = re.search(r'\[([^\]]*)\]$', extra)
|
||||
if g:
|
||||
defval = g.group(1)
|
||||
else:
|
||||
defval = None
|
||||
flagl = flags.split(',')
|
||||
flagl_nice = []
|
||||
for _f in flagl:
|
||||
f, dvi = _remove_negative_kv(_f, _intify(defval))
|
||||
self._aliases[f] = _remove_negative_k(flagl[0])
|
||||
self._hasparms[f] = has_parm
|
||||
self._defaults[f] = dvi
|
||||
if len(f) == 1:
|
||||
self._shortopts += f + (has_parm and ':' or '')
|
||||
flagl_nice.append('-' + f)
|
||||
else:
|
||||
f_nice = re.sub(r'\W', '_', f)
|
||||
self._aliases[f_nice] = _remove_negative_k(flagl[0])
|
||||
self._longopts.append(f + (has_parm and '=' or ''))
|
||||
self._longopts.append('no-' + f)
|
||||
flagl_nice.append('--' + _f)
|
||||
flags_nice = ', '.join(flagl_nice)
|
||||
if has_parm:
|
||||
flags_nice += ' ...'
|
||||
prefix = ' %-20s ' % flags_nice
|
||||
argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(),
|
||||
initial_indent=prefix,
|
||||
subsequent_indent=' ' * 28))
|
||||
out.append(argtext + '\n')
|
||||
last_was_option = True
|
||||
else:
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
return ''.join(out).rstrip() + '\n'
|
||||
|
||||
def usage(self, msg=""):
|
||||
"""Print usage string to stderr and abort."""
|
||||
sys.stderr.write(self._usagestr)
|
||||
e = self._onabort and self._onabort(msg) or None
|
||||
if e:
|
||||
raise e
|
||||
|
||||
def fatal(self, s):
|
||||
"""Print an error message to stderr and abort with usage string."""
|
||||
msg = 'error: %s\n' % s
|
||||
sys.stderr.write(msg)
|
||||
return self.usage(msg)
|
||||
|
||||
def parse(self, args):
|
||||
"""Parse a list of arguments and return (options, flags, extra).
|
||||
|
||||
In the returned tuple, "options" is an OptDict with known options,
|
||||
"flags" is a list of option flags that were used on the command-line,
|
||||
and "extra" is a list of positional arguments.
|
||||
"""
|
||||
try:
|
||||
(flags, extra) = self.optfunc(
|
||||
args, self._shortopts, self._longopts)
|
||||
except getopt.GetoptError, e:
|
||||
self.fatal(e)
|
||||
|
||||
opt = OptDict()
|
||||
|
||||
for k, v in self._defaults.iteritems():
|
||||
k = self._aliases[k]
|
||||
opt[k] = v
|
||||
|
||||
for (k, v) in flags:
|
||||
k = k.lstrip('-')
|
||||
if k in ('h', '?', 'help'):
|
||||
self.usage()
|
||||
if k.startswith('no-'):
|
||||
k = self._aliases[k[3:]]
|
||||
v = 0
|
||||
else:
|
||||
k = self._aliases[k]
|
||||
if not self._hasparms[k]:
|
||||
assert(v == '')
|
||||
v = (opt._opts.get(k) or 0) + 1
|
||||
else:
|
||||
v = _intify(v)
|
||||
opt[k] = v
|
||||
for (f1, f2) in self._aliases.iteritems():
|
||||
opt[f1] = opt._opts.get(f2)
|
||||
return (opt, flags, extra)
|
335
src/server.py
Normal file
335
src/server.py
Normal file
@ -0,0 +1,335 @@
|
||||
import re
|
||||
import struct
|
||||
import socket
|
||||
import traceback
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
if not globals().get('skip_imports'):
|
||||
import ssnet
|
||||
import helpers
|
||||
import hostwatch
|
||||
import compat.ssubprocess as ssubprocess
|
||||
from ssnet import Handler, Proxy, Mux, MuxWrapper
|
||||
from helpers import log, debug1, debug2, debug3, Fatal, \
|
||||
resolvconf_random_nameserver
|
||||
|
||||
|
||||
if not globals().get('latency_control'):
|
||||
latency_control = None
|
||||
|
||||
|
||||
def _ipmatch(ipstr):
|
||||
if ipstr == 'default':
|
||||
ipstr = '0.0.0.0/0'
|
||||
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
|
||||
if m:
|
||||
g = m.groups()
|
||||
ips = g[0]
|
||||
width = int(g[4] or 32)
|
||||
if g[1] is None:
|
||||
ips += '.0.0.0'
|
||||
width = min(width, 8)
|
||||
elif g[2] is None:
|
||||
ips += '.0.0'
|
||||
width = min(width, 16)
|
||||
elif g[3] is None:
|
||||
ips += '.0'
|
||||
width = min(width, 24)
|
||||
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
|
||||
|
||||
|
||||
def _ipstr(ip, width):
|
||||
if width >= 32:
|
||||
return ip
|
||||
else:
|
||||
return "%s/%d" % (ip, width)
|
||||
|
||||
|
||||
def _maskbits(netmask):
|
||||
if not netmask:
|
||||
return 32
|
||||
for i in range(32):
|
||||
if netmask[0] & _shl(1, i):
|
||||
return 32 - i
|
||||
return 0
|
||||
|
||||
|
||||
def _shl(n, bits):
|
||||
return n * int(2 ** bits)
|
||||
|
||||
|
||||
def _list_routes():
|
||||
argv = ['netstat', '-rn']
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||
routes = []
|
||||
for line in p.stdout:
|
||||
cols = re.split(r'\s+', line)
|
||||
ipw = _ipmatch(cols[0])
|
||||
if not ipw:
|
||||
continue # some lines won't be parseable; never mind
|
||||
maskw = _ipmatch(cols[2]) # linux only
|
||||
mask = _maskbits(maskw) # returns 32 if maskw is null
|
||||
width = min(ipw[1], mask)
|
||||
ip = ipw[0] & _shl(_shl(1, width) - 1, 32 - width)
|
||||
routes.append(
|
||||
(socket.AF_INET, socket.inet_ntoa(struct.pack('!I', ip)), width))
|
||||
rv = p.wait()
|
||||
if rv != 0:
|
||||
log('WARNING: %r returned %d\n' % (argv, rv))
|
||||
log('WARNING: That prevents --auto-nets from working.\n')
|
||||
return routes
|
||||
|
||||
|
||||
def list_routes():
|
||||
for (family, ip, width) in _list_routes():
|
||||
if not ip.startswith('0.') and not ip.startswith('127.'):
|
||||
yield (family, ip, width)
|
||||
|
||||
|
||||
def _exc_dump():
|
||||
exc_info = sys.exc_info()
|
||||
return ''.join(traceback.format_exception(*exc_info))
|
||||
|
||||
|
||||
def start_hostwatch(seed_hosts):
|
||||
s1, s2 = socket.socketpair()
|
||||
pid = os.fork()
|
||||
if not pid:
|
||||
# child
|
||||
rv = 99
|
||||
try:
|
||||
try:
|
||||
s2.close()
|
||||
os.dup2(s1.fileno(), 1)
|
||||
os.dup2(s1.fileno(), 0)
|
||||
s1.close()
|
||||
rv = hostwatch.hw_main(seed_hosts) or 0
|
||||
except Exception:
|
||||
log('%s\n' % _exc_dump())
|
||||
rv = 98
|
||||
finally:
|
||||
os._exit(rv)
|
||||
s1.close()
|
||||
return pid, s2
|
||||
|
||||
|
||||
class Hostwatch:
|
||||
|
||||
def __init__(self):
|
||||
self.pid = 0
|
||||
self.sock = None
|
||||
|
||||
|
||||
class DnsProxy(Handler):
|
||||
|
||||
def __init__(self, mux, chan, request):
|
||||
# FIXME! IPv4 specific
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
Handler.__init__(self, [sock])
|
||||
self.timeout = time.time() + 30
|
||||
self.mux = mux
|
||||
self.chan = chan
|
||||
self.tries = 0
|
||||
self.peer = None
|
||||
self.request = request
|
||||
self.sock = sock
|
||||
# FIXME! IPv4 specific
|
||||
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||
self.try_send()
|
||||
|
||||
def try_send(self):
|
||||
if self.tries >= 3:
|
||||
return
|
||||
self.tries += 1
|
||||
# FIXME! Support IPv6 nameservers
|
||||
self.peer = resolvconf_random_nameserver()[1]
|
||||
self.sock.connect((self.peer, 53))
|
||||
debug2('DNS: sending to %r\n' % self.peer)
|
||||
try:
|
||||
self.sock.send(self.request)
|
||||
except socket.error, e:
|
||||
if e.args[0] in ssnet.NET_ERRS:
|
||||
# might have been spurious; try again.
|
||||
# Note: these errors sometimes are reported by recv(),
|
||||
# and sometimes by send(). We have to catch both.
|
||||
debug2('DNS send to %r: %s\n' % (self.peer, e))
|
||||
self.try_send()
|
||||
return
|
||||
else:
|
||||
log('DNS send to %r: %s\n' % (self.peer, e))
|
||||
return
|
||||
|
||||
def callback(self):
|
||||
try:
|
||||
data = self.sock.recv(4096)
|
||||
except socket.error, e:
|
||||
if e.args[0] in ssnet.NET_ERRS:
|
||||
# might have been spurious; try again.
|
||||
# Note: these errors sometimes are reported by recv(),
|
||||
# and sometimes by send(). We have to catch both.
|
||||
debug2('DNS recv from %r: %s\n' % (self.peer, e))
|
||||
self.try_send()
|
||||
return
|
||||
else:
|
||||
log('DNS recv from %r: %s\n' % (self.peer, e))
|
||||
return
|
||||
debug2('DNS response: %d bytes\n' % len(data))
|
||||
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
|
||||
self.ok = False
|
||||
|
||||
|
||||
class UdpProxy(Handler):
|
||||
|
||||
def __init__(self, mux, chan, family):
|
||||
sock = socket.socket(family, socket.SOCK_DGRAM)
|
||||
Handler.__init__(self, [sock])
|
||||
self.timeout = time.time() + 30
|
||||
self.mux = mux
|
||||
self.chan = chan
|
||||
self.sock = sock
|
||||
if family == socket.AF_INET:
|
||||
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||
|
||||
def send(self, dstip, data):
|
||||
debug2('UDP: sending to %r port %d\n' % dstip)
|
||||
try:
|
||||
self.sock.sendto(data, dstip)
|
||||
except socket.error, e:
|
||||
log('UDP send to %r port %d: %s\n' % (dstip[0], dstip[1], e))
|
||||
return
|
||||
|
||||
def callback(self):
|
||||
try:
|
||||
data, peer = self.sock.recvfrom(4096)
|
||||
except socket.error, e:
|
||||
log('UDP recv from %r port %d: %s\n' % (peer[0], peer[1], e))
|
||||
return
|
||||
debug2('UDP response: %d bytes\n' % len(data))
|
||||
hdr = "%s,%r," % (peer[0], peer[1])
|
||||
self.mux.send(self.chan, ssnet.CMD_UDP_DATA, hdr + data)
|
||||
|
||||
|
||||
def main():
|
||||
if helpers.verbose >= 1:
|
||||
helpers.logprefix = ' s: '
|
||||
else:
|
||||
helpers.logprefix = 'server: '
|
||||
debug1('latency control setting = %r\n' % latency_control)
|
||||
|
||||
routes = list(list_routes())
|
||||
debug1('available routes:\n')
|
||||
for r in routes:
|
||||
debug1(' %d/%s/%d\n' % r)
|
||||
|
||||
# synchronization header
|
||||
sys.stdout.write('\0\0SSHUTTLE0001')
|
||||
sys.stdout.flush()
|
||||
|
||||
handlers = []
|
||||
mux = Mux(socket.fromfd(sys.stdin.fileno(),
|
||||
socket.AF_INET, socket.SOCK_STREAM),
|
||||
socket.fromfd(sys.stdout.fileno(),
|
||||
socket.AF_INET, socket.SOCK_STREAM))
|
||||
handlers.append(mux)
|
||||
routepkt = ''
|
||||
for r in routes:
|
||||
routepkt += '%d,%s,%d\n' % r
|
||||
mux.send(0, ssnet.CMD_ROUTES, routepkt)
|
||||
|
||||
hw = Hostwatch()
|
||||
hw.leftover = ''
|
||||
|
||||
def hostwatch_ready():
|
||||
assert(hw.pid)
|
||||
content = hw.sock.recv(4096)
|
||||
if content:
|
||||
lines = (hw.leftover + content).split('\n')
|
||||
if lines[-1]:
|
||||
# no terminating newline: entry isn't complete yet!
|
||||
hw.leftover = lines.pop()
|
||||
lines.append('')
|
||||
else:
|
||||
hw.leftover = ''
|
||||
mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
|
||||
else:
|
||||
raise Fatal('hostwatch process died')
|
||||
|
||||
def got_host_req(data):
|
||||
if not hw.pid:
|
||||
(hw.pid, hw.sock) = start_hostwatch(data.strip().split())
|
||||
handlers.append(Handler(socks=[hw.sock],
|
||||
callback=hostwatch_ready))
|
||||
mux.got_host_req = got_host_req
|
||||
|
||||
def new_channel(channel, data):
|
||||
(family, dstip, dstport) = data.split(',', 2)
|
||||
family = int(family)
|
||||
dstport = int(dstport)
|
||||
outwrap = ssnet.connect_dst(family, dstip, dstport)
|
||||
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
|
||||
mux.new_channel = new_channel
|
||||
|
||||
dnshandlers = {}
|
||||
|
||||
def dns_req(channel, data):
|
||||
debug2('Incoming DNS request channel=%d.\n' % channel)
|
||||
h = DnsProxy(mux, channel, data)
|
||||
handlers.append(h)
|
||||
dnshandlers[channel] = h
|
||||
mux.got_dns_req = dns_req
|
||||
|
||||
udphandlers = {}
|
||||
|
||||
def udp_req(channel, cmd, data):
|
||||
debug2('Incoming UDP request channel=%d, cmd=%d\n' % (channel, cmd))
|
||||
if cmd == ssnet.CMD_UDP_DATA:
|
||||
(dstip, dstport, data) = data.split(",", 2)
|
||||
dstport = int(dstport)
|
||||
debug2('is incoming UDP data. %r %d.\n' % (dstip, dstport))
|
||||
h = udphandlers[channel]
|
||||
h.send((dstip, dstport), data)
|
||||
elif cmd == ssnet.CMD_UDP_CLOSE:
|
||||
debug2('is incoming UDP close\n')
|
||||
h = udphandlers[channel]
|
||||
h.ok = False
|
||||
del mux.channels[channel]
|
||||
|
||||
def udp_open(channel, data):
|
||||
debug2('Incoming UDP open.\n')
|
||||
family = int(data)
|
||||
mux.channels[channel] = lambda cmd, data: udp_req(channel, cmd, data)
|
||||
if channel in udphandlers:
|
||||
raise Fatal('UDP connection channel %d already open' % channel)
|
||||
else:
|
||||
h = UdpProxy(mux, channel, family)
|
||||
handlers.append(h)
|
||||
udphandlers[channel] = h
|
||||
mux.got_udp_open = udp_open
|
||||
|
||||
while mux.ok:
|
||||
if hw.pid:
|
||||
assert(hw.pid > 0)
|
||||
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
|
||||
if rpid:
|
||||
raise Fatal(
|
||||
'hostwatch exited unexpectedly: code 0x%04x\n' % rv)
|
||||
|
||||
ssnet.runonce(handlers, mux)
|
||||
if latency_control:
|
||||
mux.check_fullness()
|
||||
mux.callback()
|
||||
|
||||
if dnshandlers:
|
||||
now = time.time()
|
||||
for channel, h in dnshandlers.items():
|
||||
if h.timeout < now or not h.ok:
|
||||
debug3('expiring dnsreqs channel=%d\n' % channel)
|
||||
del dnshandlers[channel]
|
||||
h.ok = False
|
||||
if udphandlers:
|
||||
for channel, h in udphandlers.items():
|
||||
if not h.ok:
|
||||
debug3('expiring UDP channel=%d\n' % channel)
|
||||
del udphandlers[channel]
|
||||
h.ok = False
|
104
src/ssh.py
Normal file
104
src/ssh.py
Normal file
@ -0,0 +1,104 @@
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import zlib
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers
|
||||
from helpers import debug2
|
||||
|
||||
|
||||
def readfile(name):
|
||||
basedir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||
path = [basedir] + sys.path
|
||||
for d in path:
|
||||
fullname = os.path.join(d, name)
|
||||
if os.path.exists(fullname):
|
||||
return open(fullname, 'rb').read()
|
||||
raise Exception("can't find file %r in any of %r" % (name, path))
|
||||
|
||||
|
||||
def empackage(z, filename, data=None):
|
||||
(path, basename) = os.path.split(filename)
|
||||
if not data:
|
||||
data = readfile(filename)
|
||||
content = z.compress(data)
|
||||
content += z.flush(zlib.Z_SYNC_FLUSH)
|
||||
return '%s\n%d\n%s' % (basename, len(content), content)
|
||||
|
||||
|
||||
def connect(ssh_cmd, rhostport, python, stderr, options):
|
||||
portl = []
|
||||
|
||||
if (rhostport or '').count(':') > 1:
|
||||
if rhostport.count(']') or rhostport.count('['):
|
||||
result = rhostport.split(']')
|
||||
rhost = result[0].strip('[')
|
||||
if len(result) > 1:
|
||||
result[1] = result[1].strip(':')
|
||||
if result[1] is not '':
|
||||
portl = ['-p', str(int(result[1]))]
|
||||
# can't disambiguate IPv6 colons and a port number. pass the hostname
|
||||
# through.
|
||||
else:
|
||||
rhost = rhostport
|
||||
else: # IPv4
|
||||
l = (rhostport or '').split(':', 1)
|
||||
rhost = l[0]
|
||||
if len(l) > 1:
|
||||
portl = ['-p', str(int(l[1]))]
|
||||
|
||||
if rhost == '-':
|
||||
rhost = None
|
||||
|
||||
z = zlib.compressobj(1)
|
||||
content = readfile('assembler.py')
|
||||
optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in options.items())
|
||||
content2 = (empackage(z, 'cmdline_options.py', optdata) +
|
||||
empackage(z, 'helpers.py') +
|
||||
empackage(z, 'compat/ssubprocess.py') +
|
||||
empackage(z, 'ssnet.py') +
|
||||
empackage(z, 'hostwatch.py') +
|
||||
empackage(z, 'server.py') +
|
||||
"\n")
|
||||
|
||||
pyscript = r"""
|
||||
import sys;
|
||||
skip_imports=1;
|
||||
verbosity=%d;
|
||||
exec compile(sys.stdin.read(%d), "assembler.py", "exec")
|
||||
""" % (helpers.verbose or 0, len(content))
|
||||
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
|
||||
|
||||
if not rhost:
|
||||
# ignore the --python argument when running locally; we already know
|
||||
# which python version works.
|
||||
argv = [sys.argv[1], '-c', pyscript]
|
||||
else:
|
||||
if ssh_cmd:
|
||||
sshl = ssh_cmd.split(' ')
|
||||
else:
|
||||
sshl = ['ssh']
|
||||
if python:
|
||||
pycmd = "'%s' -c '%s'" % (python, pyscript)
|
||||
else:
|
||||
pycmd = ("P=python2; $P -V 2>/dev/null || P=python; "
|
||||
"exec \"$P\" -c '%s'") % pyscript
|
||||
argv = (sshl +
|
||||
portl +
|
||||
[rhost, '--', pycmd])
|
||||
(s1, s2) = socket.socketpair()
|
||||
|
||||
def setup():
|
||||
# runs in the child process
|
||||
s2.close()
|
||||
s1a, s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
|
||||
s1.close()
|
||||
debug2('executing: %r\n' % argv)
|
||||
p = ssubprocess.Popen(argv, stdin=s1a, stdout=s1b, preexec_fn=setup,
|
||||
close_fds=True, stderr=stderr)
|
||||
os.close(s1a)
|
||||
os.close(s1b)
|
||||
s2.sendall(content)
|
||||
s2.sendall(content2)
|
||||
return p, s2
|
12
src/sshuttle
Executable file
12
src/sshuttle
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
EXE=$0
|
||||
for i in 1 2 3 4 5 6 7 8 9 10; do
|
||||
[ -L "$EXE" ] || break
|
||||
EXE=$(readlink "$EXE")
|
||||
done
|
||||
DIR=$(dirname "$EXE")
|
||||
if python2 -V 2>/dev/null; then
|
||||
exec python2 "$DIR/main.py" python2 "$@"
|
||||
else
|
||||
exec python "$DIR/main.py" python "$@"
|
||||
fi
|
284
src/sshuttle.md
Normal file
284
src/sshuttle.md
Normal file
@ -0,0 +1,284 @@
|
||||
% sshuttle(8) Sshuttle 0.46
|
||||
% Avery Pennarun <apenwarr@gmail.com>
|
||||
% 2011-01-25
|
||||
|
||||
# NAME
|
||||
|
||||
sshuttle - a transparent proxy-based VPN using ssh
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
sshuttle [options...] [-r [username@]sshserver[:port]] \<subnets...\>
|
||||
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
sshuttle allows you to create a VPN connection from your
|
||||
machine to any remote server that you can connect to via
|
||||
ssh, as long as that server has python 2.3 or higher.
|
||||
|
||||
To work, you must have root access on the local machine,
|
||||
but you can have a normal account on the server.
|
||||
|
||||
It's valid to run sshuttle more than once simultaneously on
|
||||
a single client machine, connecting to a different server
|
||||
every time, so you can be on more than one VPN at once.
|
||||
|
||||
If run on a router, sshuttle can forward traffic for your
|
||||
entire subnet to the VPN.
|
||||
|
||||
|
||||
# OPTIONS
|
||||
|
||||
\<subnets...\>
|
||||
: a list of subnets to route over the VPN, in the form
|
||||
`a.b.c.d[/width]`. Valid examples are 1.2.3.4 (a
|
||||
single IP address), 1.2.3.4/32 (equivalent to 1.2.3.4),
|
||||
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0
|
||||
netmask), and 0/0 ('just route everything through the
|
||||
VPN').
|
||||
|
||||
-l, --listen=*[ip:]port*
|
||||
: use this ip address and port number as the transparent
|
||||
proxy port. By default sshuttle finds an available
|
||||
port automatically and listens on IP 127.0.0.1
|
||||
(localhost), so you don't need to override it, and
|
||||
connections are only proxied from the local machine,
|
||||
not from outside machines. If you want to accept
|
||||
connections from other machines on your network (ie. to
|
||||
run sshuttle on a router) try enabling IP Forwarding in
|
||||
your kernel, then using `--listen 0.0.0.0:0`.
|
||||
|
||||
-H, --auto-hosts
|
||||
: scan for remote hostnames and update the local /etc/hosts
|
||||
file with matching entries for as long as the VPN is
|
||||
open. This is nicer than changing your system's DNS
|
||||
(/etc/resolv.conf) settings, for several reasons. First,
|
||||
hostnames are added without domain names attached, so
|
||||
you can `ssh thatserver` without worrying if your local
|
||||
domain matches the remote one. Second, if you sshuttle
|
||||
into more than one VPN at a time, it's impossible to
|
||||
use more than one DNS server at once anyway, but
|
||||
sshuttle correctly merges /etc/hosts entries between
|
||||
all running copies. Third, if you're only routing a
|
||||
few subnets over the VPN, you probably would prefer to
|
||||
keep using your local DNS server for everything else.
|
||||
|
||||
-N, --auto-nets
|
||||
: in addition to the subnets provided on the command
|
||||
line, ask the server which subnets it thinks we should
|
||||
route, and route those automatically. The suggestions
|
||||
are taken automatically from the server's routing
|
||||
table.
|
||||
|
||||
--dns
|
||||
: capture local DNS requests and forward to the remote DNS
|
||||
server.
|
||||
|
||||
--python
|
||||
: specify the name/path of the remote python interpreter.
|
||||
The default is just `python`, which means to use the
|
||||
default python interpreter on the remote system's PATH.
|
||||
|
||||
-r, --remote=*[username@]sshserver[:port]*
|
||||
: the remote hostname and optional username and ssh
|
||||
port number to use for connecting to the remote server.
|
||||
For example, example.com, testuser@example.com,
|
||||
testuser@example.com:2222, or example.com:2244.
|
||||
|
||||
-x, --exclude=*subnet*
|
||||
: explicitly exclude this subnet from forwarding. The
|
||||
format of this option is the same as the `<subnets>`
|
||||
option. To exclude more than one subnet, specify the
|
||||
`-x` option more than once. You can say something like
|
||||
`0/0 -x 1.2.3.0/24` to forward everything except the
|
||||
local subnet over the VPN, for example.
|
||||
|
||||
-X, --exclude-from=*file*
|
||||
: exclude the subnets specified in a file, one subnet per
|
||||
line. Useful when you have lots of subnets to exclude.
|
||||
|
||||
-v, --verbose
|
||||
: print more information about the session. This option
|
||||
can be used more than once for increased verbosity. By
|
||||
default, sshuttle prints only error messages.
|
||||
|
||||
-e, --ssh-cmd
|
||||
: the command to use to connect to the remote server. The
|
||||
default is just `ssh`. Use this if your ssh client is
|
||||
in a non-standard location or you want to provide extra
|
||||
options to the ssh command, for example, `-e 'ssh -v'`.
|
||||
|
||||
--seed-hosts
|
||||
: a comma-separated list of hostnames to use to
|
||||
initialize the `--auto-hosts` scan algorithm.
|
||||
`--auto-hosts` does things like poll local SMB servers
|
||||
for lists of local hostnames, but can speed things up
|
||||
if you use this option to give it a few names to start
|
||||
from.
|
||||
|
||||
--no-latency-control
|
||||
: sacrifice latency to improve bandwidth benchmarks. ssh
|
||||
uses really big socket buffers, which can overload the
|
||||
connection if you start doing large file transfers,
|
||||
thus making all your other sessions inside the same
|
||||
tunnel go slowly. Normally, sshuttle tries to avoid
|
||||
this problem using a "fullness check" that allows only
|
||||
a certain amount of outstanding data to be buffered at
|
||||
a time. But on high-bandwidth links, this can leave a
|
||||
lot of your bandwidth underutilized. It also makes
|
||||
sshuttle seem slow in bandwidth benchmarks (benchmarks
|
||||
rarely test ping latency, which is what sshuttle is
|
||||
trying to control). This option disables the latency
|
||||
control feature, maximizing bandwidth usage. Use at
|
||||
your own risk.
|
||||
|
||||
-D, --daemon
|
||||
: automatically fork into the background after connecting
|
||||
to the remote server. Implies `--syslog`.
|
||||
|
||||
--syslog
|
||||
: after connecting, send all log messages to the
|
||||
`syslog`(3) service instead of stderr. This is
|
||||
implicit if you use `--daemon`.
|
||||
|
||||
--pidfile=*pidfilename*
|
||||
: when using `--daemon`, save sshuttle's pid to
|
||||
*pidfilename*. The default is `sshuttle.pid` in the
|
||||
current directory.
|
||||
|
||||
--server
|
||||
: (internal use only) run the sshuttle server on
|
||||
stdin/stdout. This is what the client runs on
|
||||
the remote end.
|
||||
|
||||
--firewall
|
||||
: (internal use only) run the firewall manager. This is
|
||||
the only part of sshuttle that must run as root. If
|
||||
you start sshuttle as a non-root user, it will
|
||||
automatically run `sudo` or `su` to start the firewall
|
||||
manager, but the core of sshuttle still runs as a
|
||||
normal user.
|
||||
|
||||
--hostwatch
|
||||
: (internal use only) run the hostwatch daemon. This
|
||||
process runs on the server side and collects hostnames for
|
||||
the `--auto-hosts` option. Using this option by itself
|
||||
makes it a lot easier to debug and test the `--auto-hosts`
|
||||
feature.
|
||||
|
||||
|
||||
# EXAMPLES
|
||||
|
||||
Test locally by proxying all local connections, without using ssh:
|
||||
|
||||
$ sshuttle -v 0/0
|
||||
|
||||
Starting sshuttle proxy.
|
||||
Listening on ('0.0.0.0', 12300).
|
||||
[local sudo] Password:
|
||||
firewall manager ready.
|
||||
c : connecting to server...
|
||||
s: available routes:
|
||||
s: 192.168.42.0/24
|
||||
c : connected.
|
||||
firewall manager: starting transproxy.
|
||||
c : Accept: 192.168.42.106:50035 -> 192.168.42.121:139.
|
||||
c : Accept: 192.168.42.121:47523 -> 77.141.99.22:443.
|
||||
...etc...
|
||||
^C
|
||||
firewall manager: undoing changes.
|
||||
KeyboardInterrupt
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW#8:192.168.42.121:47523: deleting
|
||||
c : SW#6:192.168.42.106:50035: deleting
|
||||
|
||||
Test connection to a remote server, with automatic hostname
|
||||
and subnet guessing:
|
||||
|
||||
$ sshuttle -vNHr example.org
|
||||
|
||||
Starting sshuttle proxy.
|
||||
Listening on ('0.0.0.0', 12300).
|
||||
firewall manager ready.
|
||||
c : connecting to server...
|
||||
s: available routes:
|
||||
s: 77.141.99.0/24
|
||||
c : connected.
|
||||
c : seed_hosts: []
|
||||
firewall manager: starting transproxy.
|
||||
hostwatch: Found: testbox1: 1.2.3.4
|
||||
hostwatch: Found: mytest2: 5.6.7.8
|
||||
hostwatch: Found: domaincontroller: 99.1.2.3
|
||||
c : Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||
^C
|
||||
firewall manager: undoing changes.
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW#6:192.168.42.121:60554: deleting
|
||||
|
||||
|
||||
# DISCUSSION
|
||||
|
||||
When it starts, sshuttle creates an ssh session to the
|
||||
server specified by the `-r` option. If `-r` is omitted,
|
||||
it will start both its client and server locally, which is
|
||||
sometimes useful for testing.
|
||||
|
||||
After connecting to the remote server, sshuttle uploads its
|
||||
(python) source code to the remote end and executes it
|
||||
there. Thus, you don't need to install sshuttle on the
|
||||
remote server, and there are never sshuttle version
|
||||
conflicts between client and server.
|
||||
|
||||
Unlike most VPNs, sshuttle forwards sessions, not packets.
|
||||
That is, it uses kernel transparent proxying (`iptables
|
||||
REDIRECT` rules on Linux, or `ipfw fwd` rules on BSD) to
|
||||
capture outgoing TCP sessions, then creates entirely
|
||||
separate TCP sessions out to the original destination at
|
||||
the other end of the tunnel.
|
||||
|
||||
Packet-level forwarding (eg. using the tun/tap devices on
|
||||
Linux) seems elegant at first, but it results in
|
||||
several problems, notably the 'tcp over tcp' problem. The
|
||||
tcp protocol depends fundamentally on packets being dropped
|
||||
in order to implement its congestion control agorithm; if
|
||||
you pass tcp packets through a tcp-based tunnel (such as
|
||||
ssh), the inner tcp packets will never be dropped, and so
|
||||
the inner tcp stream's congestion control will be
|
||||
completely broken, and performance will be terrible. Thus,
|
||||
packet-based VPNs (such as IPsec and openvpn) cannot use
|
||||
tcp-based encrypted streams like ssh or ssl, and have to
|
||||
implement their own encryption from scratch, which is very
|
||||
complex and error prone.
|
||||
|
||||
sshuttle's simplicity comes from the fact that it can
|
||||
safely use the existing ssh encrypted tunnel without
|
||||
incurring a performance penalty. It does this by letting
|
||||
the client-side kernel manage the incoming tcp stream, and
|
||||
the server-side kernel manage the outgoing tcp stream;
|
||||
there is no need for congestion control to be shared
|
||||
between the two separate streams, so a tcp-based tunnel is
|
||||
fine.
|
||||
|
||||
|
||||
# BUGS
|
||||
|
||||
On MacOS 10.6 (at least up to 10.6.6), your network will
|
||||
stop responding about 10 minutes after the first time you
|
||||
start sshuttle, because of a MacOS kernel bug relating to
|
||||
arp and the net.inet.ip.scopedroute sysctl. To fix it,
|
||||
just switch your wireless off and on. Sshuttle makes the
|
||||
kernel setting it changes permanent, so this won't happen
|
||||
again, even after a reboot.
|
||||
|
||||
On MacOS, sshuttle will set the kernel boot flag
|
||||
net.inet.ip.scopedroute to 0, which interferes with OS X
|
||||
Internet Sharing and some VPN clients. To reset this flag,
|
||||
you can remove any reference to net.inet.ip.scopedroute from
|
||||
/Library/Preferences/SystemConfiguration/com.apple.Boot.plist
|
||||
and reboot.
|
||||
|
||||
|
||||
# SEE ALSO
|
||||
|
||||
`ssh`(1), `python`(1)
|
@ -1,21 +1,22 @@
|
||||
import sys
|
||||
import struct
|
||||
import socket
|
||||
import errno
|
||||
import select
|
||||
import os
|
||||
|
||||
from sshuttle.helpers import b, log, debug1, debug2, debug3, Fatal, set_non_blocking_io
|
||||
if not globals().get('skip_imports'):
|
||||
from helpers import log, debug1, debug2, debug3, Fatal
|
||||
|
||||
MAX_CHANNEL = 65535
|
||||
LATENCY_BUFFER_SIZE = 32768
|
||||
|
||||
# these don't exist in the socket module in python 2.3!
|
||||
SHUT_RD = 0
|
||||
SHUT_WR = 1
|
||||
SHUT_RDWR = 2
|
||||
|
||||
|
||||
HDR_LEN = 8
|
||||
|
||||
|
||||
CMD_EXIT = 0x4200
|
||||
CMD_PING = 0x4201
|
||||
CMD_PONG = 0x4202
|
||||
@ -53,19 +54,17 @@ cmd_to_name = {
|
||||
|
||||
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
|
||||
errno.EHOSTUNREACH, errno.ENETUNREACH,
|
||||
errno.EHOSTDOWN, errno.ENETDOWN,
|
||||
errno.ENETUNREACH, errno.ECONNABORTED,
|
||||
errno.ECONNRESET]
|
||||
errno.EHOSTDOWN, errno.ENETDOWN]
|
||||
|
||||
|
||||
def _add(socks, elem):
|
||||
if elem not in socks:
|
||||
socks.append(elem)
|
||||
def _add(l, elem):
|
||||
if not elem in l:
|
||||
l.append(elem)
|
||||
|
||||
|
||||
def _fds(socks):
|
||||
def _fds(l):
|
||||
out = []
|
||||
for i in socks:
|
||||
for i in l:
|
||||
try:
|
||||
out.append(i.fileno())
|
||||
except AttributeError:
|
||||
@ -77,13 +76,11 @@ def _fds(socks):
|
||||
def _nb_clean(func, *args):
|
||||
try:
|
||||
return func(*args)
|
||||
except (OSError, socket.error):
|
||||
# Note: In python2 socket.error != OSError (In python3, they are same)
|
||||
_, e = sys.exc_info()[:2]
|
||||
except OSError, e:
|
||||
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||
raise
|
||||
else:
|
||||
debug3('%s: err was: %s' % (func.__name__, e))
|
||||
debug3('%s: err was: %s\n' % (func.__name__, e))
|
||||
return None
|
||||
|
||||
|
||||
@ -92,14 +89,9 @@ def _try_peername(sock):
|
||||
pn = sock.getpeername()
|
||||
if pn:
|
||||
return '%s:%s' % (pn[0], pn[1])
|
||||
except socket.error:
|
||||
_, e = sys.exc_info()[:2]
|
||||
if e.args[0] == errno.EINVAL:
|
||||
pass
|
||||
elif e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
|
||||
except socket.error, e:
|
||||
if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
|
||||
raise
|
||||
except AttributeError:
|
||||
pass
|
||||
return 'unknown'
|
||||
|
||||
|
||||
@ -111,7 +103,7 @@ class SockWrapper:
|
||||
def __init__(self, rsock, wsock, connect_to=None, peername=None):
|
||||
global _swcount
|
||||
_swcount += 1
|
||||
debug3('creating new SockWrapper (%d now exist)' % _swcount)
|
||||
debug3('creating new SockWrapper (%d now exist)\n' % _swcount)
|
||||
self.exc = None
|
||||
self.rsock = rsock
|
||||
self.wsock = wsock
|
||||
@ -124,9 +116,9 @@ class SockWrapper:
|
||||
def __del__(self):
|
||||
global _swcount
|
||||
_swcount -= 1
|
||||
debug1('%r: deleting (%d remain)' % (self, _swcount))
|
||||
debug1('%r: deleting (%d remain)\n' % (self, _swcount))
|
||||
if self.exc:
|
||||
debug1('%r: error was: %s' % (self, self.exc))
|
||||
debug1('%r: error was: %s\n' % (self, self.exc))
|
||||
|
||||
def __repr__(self):
|
||||
if self.rsock == self.wsock:
|
||||
@ -148,14 +140,13 @@ class SockWrapper:
|
||||
if not self.connect_to:
|
||||
return # already connected
|
||||
self.rsock.setblocking(False)
|
||||
debug3('%r: trying connect to %r' % (self, self.connect_to))
|
||||
debug3('%r: trying connect to %r\n' % (self, self.connect_to))
|
||||
try:
|
||||
self.rsock.connect(self.connect_to)
|
||||
# connected successfully (Linux)
|
||||
self.connect_to = None
|
||||
except socket.error:
|
||||
_, e = sys.exc_info()[:2]
|
||||
debug3('%r: connect result: %s' % (self, e))
|
||||
except socket.error, e:
|
||||
debug3('%r: connect result: %s\n' % (self, e))
|
||||
if e.args[0] == errno.EINVAL:
|
||||
# this is what happens when you call connect() on a socket
|
||||
# that is now connected but returned EINPROGRESS last time,
|
||||
@ -165,16 +156,10 @@ class SockWrapper:
|
||||
realerr = self.rsock.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_ERROR)
|
||||
e = socket.error(realerr, os.strerror(realerr))
|
||||
debug3('%r: fixed connect result: %s' % (self, e))
|
||||
debug3('%r: fixed connect result: %s\n' % (self, e))
|
||||
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
|
||||
pass # not connected yet
|
||||
elif sys.platform == 'win32' and e.args[0] == errno.WSAEWOULDBLOCK: # 10035
|
||||
pass # not connected yet
|
||||
elif e.args[0] == 0:
|
||||
if sys.platform == 'win32':
|
||||
# On Windows "real" error of EINVAL could be 0, when socket is in connecting state
|
||||
pass
|
||||
else:
|
||||
# connected successfully (weird Linux bug?)
|
||||
# Sometimes Linux seems to return EINVAL when it isn't
|
||||
# invalid. This *may* be caused by a race condition
|
||||
@ -186,7 +171,7 @@ class SockWrapper:
|
||||
# when we added this, however.
|
||||
self.connect_to = None
|
||||
elif e.args[0] == errno.EISCONN:
|
||||
# connected successfully (BSD + Windows)
|
||||
# connected successfully (BSD)
|
||||
self.connect_to = None
|
||||
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
|
||||
# a "normal" kind of error
|
||||
@ -197,21 +182,20 @@ class SockWrapper:
|
||||
|
||||
def noread(self):
|
||||
if not self.shut_read:
|
||||
debug2('%r: done reading' % self)
|
||||
debug2('%r: done reading\n' % self)
|
||||
self.shut_read = True
|
||||
# self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
|
||||
|
||||
def nowrite(self):
|
||||
if not self.shut_write:
|
||||
debug2('%r: done writing' % self)
|
||||
debug2('%r: done writing\n' % self)
|
||||
self.shut_write = True
|
||||
try:
|
||||
self.wsock.shutdown(SHUT_WR)
|
||||
except socket.error:
|
||||
_, e = sys.exc_info()[:2]
|
||||
except socket.error, e:
|
||||
self.seterr('nowrite: %s' % e)
|
||||
|
||||
@staticmethod
|
||||
def too_full():
|
||||
def too_full(self):
|
||||
return False # fullness is determined by the socket's select() state
|
||||
|
||||
def uwrite(self, buf):
|
||||
@ -219,11 +203,10 @@ class SockWrapper:
|
||||
return 0 # still connecting
|
||||
self.wsock.setblocking(False)
|
||||
try:
|
||||
return _nb_clean(self.wsock.send, buf)
|
||||
except OSError:
|
||||
_, e = sys.exc_info()[:2]
|
||||
return _nb_clean(os.write, self.wsock.fileno(), buf)
|
||||
except OSError, e:
|
||||
if e.errno == errno.EPIPE:
|
||||
debug1('%r: uwrite: got EPIPE' % self)
|
||||
debug1('%r: uwrite: got EPIPE\n' % self)
|
||||
self.nowrite()
|
||||
return 0
|
||||
else:
|
||||
@ -232,7 +215,7 @@ class SockWrapper:
|
||||
return 0
|
||||
|
||||
def write(self, buf):
|
||||
assert buf
|
||||
assert(buf)
|
||||
return self.uwrite(buf)
|
||||
|
||||
def uread(self):
|
||||
@ -242,11 +225,10 @@ class SockWrapper:
|
||||
return
|
||||
self.rsock.setblocking(False)
|
||||
try:
|
||||
return _nb_clean(self.rsock.recv, 65536)
|
||||
except OSError:
|
||||
_, e = sys.exc_info()[:2]
|
||||
return _nb_clean(os.read, self.rsock.fileno(), 65536)
|
||||
except OSError, e:
|
||||
self.seterr('uread: %s' % e)
|
||||
return b('') # unexpected error... we'll call it EOF
|
||||
return '' # unexpected error... we'll call it EOF
|
||||
|
||||
def fill(self):
|
||||
if self.buf:
|
||||
@ -254,7 +236,7 @@ class SockWrapper:
|
||||
rb = self.uread()
|
||||
if rb:
|
||||
self.buf.append(rb)
|
||||
if rb == b(''): # empty string means EOF; None means temporarily empty
|
||||
if rb == '': # empty string means EOF; None means temporarily empty
|
||||
self.noread()
|
||||
|
||||
def copy_to(self, outwrap):
|
||||
@ -279,13 +261,13 @@ class Handler:
|
||||
for i in self.socks:
|
||||
_add(r, i)
|
||||
|
||||
def callback(self, sock):
|
||||
log('--no callback defined-- %r' % self)
|
||||
(r, _, _) = select.select(self.socks, [], [], 0)
|
||||
def callback(self):
|
||||
log('--no callback defined-- %r\n' % self)
|
||||
(r, w, x) = select.select(self.socks, [], [], 0)
|
||||
for s in r:
|
||||
v = s.recv(4096)
|
||||
if not v:
|
||||
log('--closed-- %r' % self)
|
||||
log('--closed-- %r\n' % self)
|
||||
self.socks = []
|
||||
self.ok = False
|
||||
|
||||
@ -320,7 +302,7 @@ class Proxy(Handler):
|
||||
elif not self.wrap2.shut_read:
|
||||
_add(r, self.wrap2.rsock)
|
||||
|
||||
def callback(self, sock):
|
||||
def callback(self):
|
||||
self.wrap1.try_connect()
|
||||
self.wrap2.try_connect()
|
||||
self.wrap1.fill()
|
||||
@ -342,25 +324,25 @@ class Proxy(Handler):
|
||||
|
||||
class Mux(Handler):
|
||||
|
||||
def __init__(self, rfile, wfile):
|
||||
Handler.__init__(self, [rfile, wfile])
|
||||
self.rfile = rfile
|
||||
self.wfile = wfile
|
||||
def __init__(self, rsock, wsock):
|
||||
Handler.__init__(self, [rsock, wsock])
|
||||
self.rsock = rsock
|
||||
self.wsock = wsock
|
||||
self.new_channel = self.got_dns_req = self.got_routes = None
|
||||
self.got_udp_open = self.got_udp_data = self.got_udp_close = None
|
||||
self.got_host_req = self.got_host_list = None
|
||||
self.channels = {}
|
||||
self.chani = 0
|
||||
self.want = 0
|
||||
self.inbuf = b('')
|
||||
self.inbuf = ''
|
||||
self.outbuf = []
|
||||
self.fullness = 0
|
||||
self.too_full = False
|
||||
self.send(0, CMD_PING, b('chicken'))
|
||||
self.send(0, CMD_PING, 'chicken')
|
||||
|
||||
def next_channel(self):
|
||||
# channel 0 is special, so we never allocate it
|
||||
for _ in range(1024):
|
||||
for timeout in xrange(1024):
|
||||
self.chani += 1
|
||||
if self.chani > MAX_CHANNEL:
|
||||
self.chani = 1
|
||||
@ -369,50 +351,52 @@ class Mux(Handler):
|
||||
|
||||
def amount_queued(self):
|
||||
total = 0
|
||||
for byte in self.outbuf:
|
||||
total += len(byte)
|
||||
for b in self.outbuf:
|
||||
total += len(b)
|
||||
return total
|
||||
|
||||
def check_fullness(self):
|
||||
if self.fullness > LATENCY_BUFFER_SIZE:
|
||||
if self.fullness > 32768:
|
||||
if not self.too_full:
|
||||
self.send(0, CMD_PING, b('rttest'))
|
||||
self.send(0, CMD_PING, 'rttest')
|
||||
self.too_full = True
|
||||
#ob = []
|
||||
# for b in self.outbuf:
|
||||
# (s1,s2,c) = struct.unpack('!ccH', b[:4])
|
||||
# ob.append(c)
|
||||
#log('outbuf: %d %r\n' % (self.amount_queued(), ob))
|
||||
|
||||
def send(self, channel, cmd, data):
|
||||
assert isinstance(data, bytes)
|
||||
assert len(data) <= 65535
|
||||
p = struct.pack('!ccHHH', b('S'), b('S'), channel, cmd, len(data)) \
|
||||
+ data
|
||||
data = str(data)
|
||||
assert(len(data) <= 65535)
|
||||
p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data
|
||||
self.outbuf.append(p)
|
||||
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)'
|
||||
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n'
|
||||
% (channel, cmd_to_name.get(cmd, hex(cmd)),
|
||||
len(data), self.fullness))
|
||||
# debug3('>>> data: %r' % data)
|
||||
self.fullness += len(data)
|
||||
|
||||
def got_packet(self, channel, cmd, data):
|
||||
debug2('< channel=%d cmd=%s len=%d'
|
||||
debug2('< channel=%d cmd=%s len=%d\n'
|
||||
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
|
||||
# debug3('<<< data: %r' % data)
|
||||
if cmd == CMD_PING:
|
||||
self.send(0, CMD_PONG, data)
|
||||
elif cmd == CMD_PONG:
|
||||
debug2('received PING response')
|
||||
debug2('received PING response\n')
|
||||
self.too_full = False
|
||||
self.fullness = 0
|
||||
elif cmd == CMD_EXIT:
|
||||
self.ok = False
|
||||
elif cmd == CMD_TCP_CONNECT:
|
||||
assert not self.channels.get(channel)
|
||||
assert(not self.channels.get(channel))
|
||||
if self.new_channel:
|
||||
self.new_channel(channel, data)
|
||||
elif cmd == CMD_DNS_REQ:
|
||||
assert not self.channels.get(channel)
|
||||
assert(not self.channels.get(channel))
|
||||
if self.got_dns_req:
|
||||
self.got_dns_req(channel, data)
|
||||
elif cmd == CMD_UDP_OPEN:
|
||||
assert not self.channels.get(channel)
|
||||
assert(not self.channels.get(channel))
|
||||
if self.got_udp_open:
|
||||
self.got_udp_open(channel, data)
|
||||
elif cmd == CMD_ROUTES:
|
||||
@ -433,46 +417,43 @@ class Mux(Handler):
|
||||
else:
|
||||
callback = self.channels.get(channel)
|
||||
if not callback:
|
||||
log('warning: closed channel %d got cmd=%s len=%d'
|
||||
log('warning: closed channel %d got cmd=%s len=%d\n'
|
||||
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
|
||||
else:
|
||||
callback(cmd, data)
|
||||
|
||||
def flush(self):
|
||||
set_non_blocking_io(self.wfile.fileno())
|
||||
self.wsock.setblocking(False)
|
||||
if self.outbuf and self.outbuf[0]:
|
||||
wrote = _nb_clean(self.wfile.write, self.outbuf[0])
|
||||
# self.wfile.flush()
|
||||
debug2('mux wrote: %r/%d' % (wrote, len(self.outbuf[0])))
|
||||
wrote = _nb_clean(os.write, self.wsock.fileno(), self.outbuf[0])
|
||||
debug2('mux wrote: %r/%d\n' % (wrote, len(self.outbuf[0])))
|
||||
if wrote:
|
||||
self.outbuf[0] = self.outbuf[0][wrote:]
|
||||
while self.outbuf and not self.outbuf[0]:
|
||||
self.outbuf[0:1] = []
|
||||
|
||||
def fill(self):
|
||||
set_non_blocking_io(self.rfile.fileno())
|
||||
self.rsock.setblocking(False)
|
||||
try:
|
||||
# If LATENCY_BUFFER_SIZE is inappropriately large, we will
|
||||
# get a MemoryError here. Read no more than 1MiB.
|
||||
read = _nb_clean(self.rfile.read, min(1048576, LATENCY_BUFFER_SIZE))
|
||||
debug2('mux read: %r' % len(read))
|
||||
except OSError:
|
||||
_, e = sys.exc_info()[:2]
|
||||
b = _nb_clean(os.read, self.rsock.fileno(), 32768)
|
||||
except OSError, e:
|
||||
raise Fatal('other end: %r' % e)
|
||||
# log('<<< %r' % b)
|
||||
if read == b(''): # EOF
|
||||
#log('<<< %r\n' % b)
|
||||
if b == '': # EOF
|
||||
self.ok = False
|
||||
if read:
|
||||
self.inbuf += read
|
||||
if b:
|
||||
self.inbuf += b
|
||||
|
||||
def handle(self):
|
||||
self.fill()
|
||||
# log('inbuf is: (%d,%d) %r\n'
|
||||
# % (self.want, len(self.inbuf), self.inbuf))
|
||||
while 1:
|
||||
if len(self.inbuf) >= (self.want or HDR_LEN):
|
||||
(s1, s2, channel, cmd, datalen) = \
|
||||
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
|
||||
assert s1 == b('S')
|
||||
assert s2 == b('S')
|
||||
assert(s1 == 'S')
|
||||
assert(s2 == 'S')
|
||||
self.want = datalen + HDR_LEN
|
||||
if self.want and len(self.inbuf) >= self.want:
|
||||
data = self.inbuf[HDR_LEN:self.want]
|
||||
@ -483,27 +464,27 @@ class Mux(Handler):
|
||||
break
|
||||
|
||||
def pre_select(self, r, w, x):
|
||||
_add(r, self.rfile)
|
||||
_add(r, self.rsock)
|
||||
if self.outbuf:
|
||||
_add(w, self.wfile)
|
||||
_add(w, self.wsock)
|
||||
|
||||
def callback(self, sock):
|
||||
(r, w, _) = select.select([self.rfile], [self.wfile], [], 0)
|
||||
if self.rfile in r:
|
||||
def callback(self):
|
||||
(r, w, x) = select.select([self.rsock], [self.wsock], [], 0)
|
||||
if self.rsock in r:
|
||||
self.handle()
|
||||
if self.outbuf and self.wfile in w:
|
||||
if self.outbuf and self.wsock in w:
|
||||
self.flush()
|
||||
|
||||
|
||||
class MuxWrapper(SockWrapper):
|
||||
|
||||
def __init__(self, mux, channel):
|
||||
SockWrapper.__init__(self, mux.rfile, mux.wfile)
|
||||
SockWrapper.__init__(self, mux.rsock, mux.wsock)
|
||||
self.mux = mux
|
||||
self.channel = channel
|
||||
self.mux.channels[channel] = self.got_packet
|
||||
self.socks = []
|
||||
debug2('new channel: %d' % channel)
|
||||
debug2('new channel: %d\n' % channel)
|
||||
|
||||
def __del__(self):
|
||||
self.nowrite()
|
||||
@ -514,29 +495,18 @@ class MuxWrapper(SockWrapper):
|
||||
|
||||
def noread(self):
|
||||
if not self.shut_read:
|
||||
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, b(''))
|
||||
self.setnoread()
|
||||
|
||||
def setnoread(self):
|
||||
if not self.shut_read:
|
||||
debug2('%r: done reading' % self)
|
||||
self.shut_read = True
|
||||
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, '')
|
||||
self.maybe_close()
|
||||
|
||||
def nowrite(self):
|
||||
if not self.shut_write:
|
||||
self.mux.send(self.channel, CMD_TCP_EOF, b(''))
|
||||
self.setnowrite()
|
||||
|
||||
def setnowrite(self):
|
||||
if not self.shut_write:
|
||||
debug2('%r: done writing' % self)
|
||||
self.shut_write = True
|
||||
self.mux.send(self.channel, CMD_TCP_EOF, '')
|
||||
self.maybe_close()
|
||||
|
||||
def maybe_close(self):
|
||||
if self.shut_read and self.shut_write:
|
||||
debug2('%r: closing connection' % self)
|
||||
# remove the mux's reference to us. The python garbage collector
|
||||
# will then be able to reap our object.
|
||||
self.mux.channels[self.channel] = None
|
||||
@ -554,17 +524,15 @@ class MuxWrapper(SockWrapper):
|
||||
|
||||
def uread(self):
|
||||
if self.shut_read:
|
||||
return b('') # EOF
|
||||
return '' # EOF
|
||||
else:
|
||||
return None # no data available right now
|
||||
|
||||
def got_packet(self, cmd, data):
|
||||
if cmd == CMD_TCP_EOF:
|
||||
# Remote side already knows the status - set flag but don't notify
|
||||
self.setnoread()
|
||||
self.noread()
|
||||
elif cmd == CMD_TCP_STOP_SENDING:
|
||||
# Remote side already knows the status - set flag but don't notify
|
||||
self.setnowrite()
|
||||
self.nowrite()
|
||||
elif cmd == CMD_TCP_DATA:
|
||||
self.buf.append(data)
|
||||
else:
|
||||
@ -573,37 +541,37 @@ class MuxWrapper(SockWrapper):
|
||||
|
||||
|
||||
def connect_dst(family, ip, port):
|
||||
debug2('Connecting to %s:%d' % (ip, port))
|
||||
debug2('Connecting to %s:%d\n' % (ip, port))
|
||||
outsock = socket.socket(family)
|
||||
|
||||
outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||
return SockWrapper(outsock, outsock,
|
||||
connect_to=(ip, port),
|
||||
peername='%s:%d' % (ip, port))
|
||||
peername = '%s:%d' % (ip, port))
|
||||
|
||||
|
||||
def runonce(handlers, mux):
|
||||
r = []
|
||||
w = []
|
||||
x = []
|
||||
to_remove = [s for s in handlers if not s.ok]
|
||||
to_remove = filter(lambda s: not s.ok, handlers)
|
||||
for h in to_remove:
|
||||
handlers.remove(h)
|
||||
|
||||
for s in handlers:
|
||||
s.pre_select(r, w, x)
|
||||
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)'
|
||||
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
|
||||
% (len(handlers), _fds(r), _fds(w), _fds(x),
|
||||
mux.fullness, mux.too_full))
|
||||
(r, w, x) = select.select(r, w, x)
|
||||
debug2(' Ready: %d r=%r w=%r x=%r'
|
||||
debug2(' Ready: %d r=%r w=%r x=%r\n'
|
||||
% (len(handlers), _fds(r), _fds(w), _fds(x)))
|
||||
ready = r + w + x
|
||||
did = {}
|
||||
for h in handlers:
|
||||
for s in h.socks:
|
||||
if s in ready:
|
||||
h.callback(s)
|
||||
h.callback()
|
||||
did[s] = 1
|
||||
for s in ready:
|
||||
if s not in did:
|
||||
if not s in did:
|
||||
raise Fatal('socket %r was not used by any handler' % s)
|
19
src/ssyslog.py
Normal file
19
src/ssyslog.py
Normal file
@ -0,0 +1,19 @@
|
||||
import sys
|
||||
import os
|
||||
from compat import ssubprocess
|
||||
|
||||
|
||||
_p = None
|
||||
|
||||
|
||||
def start_syslog():
|
||||
global _p
|
||||
_p = ssubprocess.Popen(['logger',
|
||||
'-p', 'daemon.notice',
|
||||
'-t', 'sshuttle'], stdin=ssubprocess.PIPE)
|
||||
|
||||
|
||||
def stderr_to_syslog():
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os.dup2(_p.stdin.fileno(), 2)
|
89
src/stresstest.py
Executable file
89
src/stresstest.py
Executable file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python
|
||||
import socket
|
||||
import select
|
||||
import struct
|
||||
import time
|
||||
|
||||
listener = socket.socket()
|
||||
listener.bind(('127.0.0.1', 0))
|
||||
listener.listen(500)
|
||||
|
||||
servers = []
|
||||
clients = []
|
||||
remain = {}
|
||||
|
||||
NUMCLIENTS = 50
|
||||
count = 0
|
||||
|
||||
|
||||
while 1:
|
||||
if len(clients) < NUMCLIENTS:
|
||||
c = socket.socket()
|
||||
c.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
c.bind(('0.0.0.0', 0))
|
||||
c.connect(listener.getsockname())
|
||||
count += 1
|
||||
if count >= 16384:
|
||||
count = 1
|
||||
print 'cli CREATING %d' % count
|
||||
b = struct.pack('I', count) + 'x' * count
|
||||
remain[c] = count
|
||||
print 'cli >> %r' % len(b)
|
||||
c.send(b)
|
||||
c.shutdown(socket.SHUT_WR)
|
||||
clients.append(c)
|
||||
r = [listener]
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
r = [listener] + servers + clients
|
||||
print 'select(%d)' % len(r)
|
||||
r, w, x = select.select(r, [], [], 5)
|
||||
assert(r)
|
||||
for i in r:
|
||||
if i == listener:
|
||||
s, addr = listener.accept()
|
||||
servers.append(s)
|
||||
elif i in servers:
|
||||
b = i.recv(4096)
|
||||
print 'srv << %r' % len(b)
|
||||
if not i in remain:
|
||||
assert(len(b) >= 4)
|
||||
want = struct.unpack('I', b[:4])[0]
|
||||
b = b[4:]
|
||||
# i.send('y'*want)
|
||||
else:
|
||||
want = remain[i]
|
||||
if want < len(b):
|
||||
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||
assert(want >= len(b))
|
||||
want -= len(b)
|
||||
remain[i] = want
|
||||
if not b: # EOF
|
||||
if want:
|
||||
print 'weird: eof but wanted %d more' % want
|
||||
assert(want == 0)
|
||||
i.close()
|
||||
servers.remove(i)
|
||||
del remain[i]
|
||||
else:
|
||||
print 'srv >> %r' % len(b)
|
||||
i.send('y' * len(b))
|
||||
if not want:
|
||||
i.shutdown(socket.SHUT_WR)
|
||||
elif i in clients:
|
||||
b = i.recv(4096)
|
||||
print 'cli << %r' % len(b)
|
||||
want = remain[i]
|
||||
if want < len(b):
|
||||
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||
assert(want >= len(b))
|
||||
want -= len(b)
|
||||
remain[i] = want
|
||||
if not b: # EOF
|
||||
if want:
|
||||
print 'weird: eof but wanted %d more' % want
|
||||
assert(want == 0)
|
||||
i.close()
|
||||
clients.remove(i)
|
||||
del remain[i]
|
||||
listener.accept()
|
8
src/ui-macos/.gitignore
vendored
Normal file
8
src/ui-macos/.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
*.pyc
|
||||
*~
|
||||
/*.nib
|
||||
/debug.app
|
||||
/sources.list
|
||||
/Sshuttle VPN.app
|
||||
/*.tar.gz
|
||||
/*.zip
|
BIN
src/ui-macos/ChickenErrorTemplate.pdf
Normal file
BIN
src/ui-macos/ChickenErrorTemplate.pdf
Normal file
Binary file not shown.
BIN
src/ui-macos/ChickenIdleTemplate.pdf
Normal file
BIN
src/ui-macos/ChickenIdleTemplate.pdf
Normal file
Binary file not shown.
BIN
src/ui-macos/ChickenRunningTemplate.pdf
Normal file
BIN
src/ui-macos/ChickenRunningTemplate.pdf
Normal file
Binary file not shown.
40
src/ui-macos/Info.plist
Normal file
40
src/ui-macos/Info.plist
Normal file
@ -0,0 +1,40 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>English</string>
|
||||
<key>CFBundleDisplayName</key>
|
||||
<string>Sshuttle VPN</string>
|
||||
<key>CFBundleExecutable</key>
|
||||
<string>Sshuttle</string>
|
||||
<key>CFBundleIconFile</key>
|
||||
<string>app.icns</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>ca.apenwarr.Sshuttle</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundleName</key>
|
||||
<string>Sshuttle VPN</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>0.0.0</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>0.0.0</string>
|
||||
<key>LSUIElement</key>
|
||||
<string>1</string>
|
||||
<key>LSHasLocalizedDisplayName</key>
|
||||
<false/>
|
||||
<key>NSAppleScriptEnabled</key>
|
||||
<false/>
|
||||
<key>NSHumanReadableCopyright</key>
|
||||
<string>GNU LGPL Version 2</string>
|
||||
<key>NSMainNibFile</key>
|
||||
<string>MainMenu</string>
|
||||
<key>NSPrincipalClass</key>
|
||||
<string>NSApplication</string>
|
||||
</dict>
|
||||
</plist>
|
2609
src/ui-macos/MainMenu.xib
Normal file
2609
src/ui-macos/MainMenu.xib
Normal file
File diff suppressed because it is too large
Load Diff
10
src/ui-macos/UserDefaults.plist
Normal file
10
src/ui-macos/UserDefaults.plist
Normal file
@ -0,0 +1,10 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>startAtLogin</key>
|
||||
<false/>
|
||||
<key>autoReconnect</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
1
src/ui-macos/all.do
Normal file
1
src/ui-macos/all.do
Normal file
@ -0,0 +1 @@
|
||||
redo-ifchange debug.app dist
|
BIN
src/ui-macos/app.icns
Normal file
BIN
src/ui-macos/app.icns
Normal file
Binary file not shown.
30
src/ui-macos/askpass.py
Normal file
30
src/ui-macos/askpass.py
Normal file
@ -0,0 +1,30 @@
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
|
||||
def askpass(prompt):
|
||||
prompt = prompt.replace('"', "'")
|
||||
|
||||
if 'yes/no' in prompt:
|
||||
return "yes"
|
||||
|
||||
script = """
|
||||
tell application "Finder"
|
||||
activate
|
||||
display dialog "%s" \
|
||||
with title "Sshuttle SSH Connection" \
|
||||
default answer "" \
|
||||
with icon caution \
|
||||
with hidden answer
|
||||
end tell
|
||||
""" % prompt
|
||||
|
||||
p = subprocess.Popen(['osascript', '-e', script], stdout=subprocess.PIPE)
|
||||
out = p.stdout.read()
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
return None
|
||||
g = re.match("text returned:(.*), button returned:.*", out)
|
||||
if not g:
|
||||
return None
|
||||
return g.group(1)
|
1
src/ui-macos/bits/.gitignore
vendored
Normal file
1
src/ui-macos/bits/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/runpython
|
1
src/ui-macos/bits/PkgInfo
Normal file
1
src/ui-macos/bits/PkgInfo
Normal file
@ -0,0 +1 @@
|
||||
APPL????
|
23
src/ui-macos/bits/runpython.c
Normal file
23
src/ui-macos/bits/runpython.c
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* This rather pointless program acts like the python interpreter, except
|
||||
* it's intended to sit inside a MacOS .app package, so that its argv[0]
|
||||
* will point inside the package.
|
||||
*
|
||||
* NSApplicationMain() looks for Info.plist using the path in argv[0], which
|
||||
* goes wrong if your interpreter is /usr/bin/python.
|
||||
*/
|
||||
#include <Python/Python.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char *path = strdup(argv[0]), *cptr;
|
||||
char *args[] = {argv[0], "../Resources/main.py", NULL};
|
||||
cptr = strrchr(path, '/');
|
||||
if (cptr)
|
||||
*cptr = 0;
|
||||
chdir(path);
|
||||
free(path);
|
||||
return Py_Main(2, args);
|
||||
}
|
20
src/ui-macos/bits/runpython.do
Normal file
20
src/ui-macos/bits/runpython.do
Normal file
@ -0,0 +1,20 @@
|
||||
exec >&2
|
||||
redo-ifchange runpython.c
|
||||
ARCHES=""
|
||||
printf "Platforms: "
|
||||
if [ -d /usr/libexec/gcc/darwin ]; then
|
||||
for d in /usr/libexec/gcc/darwin/*; do
|
||||
PLAT=$(basename "$d")
|
||||
[ "$PLAT" != "ppc64" ] || continue # fails for some reason on my Mac
|
||||
ARCHES="$ARCHES -arch $PLAT"
|
||||
printf "$PLAT "
|
||||
done
|
||||
fi
|
||||
printf "\n"
|
||||
PYTHON_LDFLAGS=$(python-config --ldflags)
|
||||
PYTHON_INCLUDES=$(python-config --includes)
|
||||
gcc $ARCHES \
|
||||
-Wall -o $3 runpython.c \
|
||||
$PYTHON_INCLUDES \
|
||||
$PYTHON_LDFLAGS \
|
||||
-framework Python
|
4
src/ui-macos/clean.do
Normal file
4
src/ui-macos/clean.do
Normal file
@ -0,0 +1,4 @@
|
||||
exec >&2
|
||||
find . -name '*~' | xargs rm -f
|
||||
rm -rf *.app *.zip *.tar.gz
|
||||
rm -f bits/runpython *.nib sources.list
|
15
src/ui-macos/debug.app.do
Normal file
15
src/ui-macos/debug.app.do
Normal file
@ -0,0 +1,15 @@
|
||||
redo-ifchange bits/runpython MainMenu.nib
|
||||
rm -rf debug.app
|
||||
mkdir debug.app debug.app/Contents
|
||||
cd debug.app/Contents
|
||||
ln -s ../.. Resources
|
||||
ln -s ../.. English.lproj
|
||||
ln -s ../../Info.plist .
|
||||
ln -s ../../app.icns .
|
||||
|
||||
mkdir MacOS
|
||||
cd MacOS
|
||||
ln -s ../../../bits/runpython Sshuttle
|
||||
|
||||
cd ../../..
|
||||
redo-ifchange $(find debug.app -type f)
|
28
src/ui-macos/default.app.do
Normal file
28
src/ui-macos/default.app.do
Normal file
@ -0,0 +1,28 @@
|
||||
TOP=$PWD
|
||||
redo-ifchange sources.list
|
||||
redo-ifchange Info.plist bits/runpython \
|
||||
$(while read name newname; do echo "$name"; done <sources.list)
|
||||
|
||||
rm -rf "$2.app"
|
||||
mkdir "$2.app" "$2.app/Contents"
|
||||
cd "$2.app/Contents"
|
||||
|
||||
cp "$TOP/Info.plist" .
|
||||
|
||||
mkdir MacOS
|
||||
cp "$TOP/bits/runpython" MacOS/Sshuttle
|
||||
|
||||
mkdir Resources
|
||||
|
||||
cd "$TOP"
|
||||
while read name newname; do
|
||||
[ -z "$name" ] && continue
|
||||
: "${newname:=$name}"
|
||||
outname=$2.app/Contents/Resources/$newname
|
||||
outdir=$(dirname "$outname")
|
||||
[ -d "$outdir" ] || mkdir "$outdir"
|
||||
cp "${name-$newname}" "$outname"
|
||||
done <sources.list
|
||||
|
||||
cd "$2.app"
|
||||
redo-ifchange $(find . -type f)
|
5
src/ui-macos/default.app.tar.gz.do
Normal file
5
src/ui-macos/default.app.tar.gz.do
Normal file
@ -0,0 +1,5 @@
|
||||
exec >&2
|
||||
IFS="
|
||||
"
|
||||
redo-ifchange $2.app
|
||||
tar -czf $3 $2.app/
|
5
src/ui-macos/default.app.zip.do
Normal file
5
src/ui-macos/default.app.zip.do
Normal file
@ -0,0 +1,5 @@
|
||||
exec >&2
|
||||
IFS="
|
||||
"
|
||||
redo-ifchange $2.app
|
||||
zip -q -r $3 $2.app/
|
2
src/ui-macos/default.nib.do
Normal file
2
src/ui-macos/default.nib.do
Normal file
@ -0,0 +1,2 @@
|
||||
redo-ifchange $2.xib
|
||||
ibtool --compile $3 $2.xib
|
1
src/ui-macos/dist.do
Normal file
1
src/ui-macos/dist.do
Normal file
@ -0,0 +1 @@
|
||||
redo-ifchange "Sshuttle VPN.app.zip" "Sshuttle VPN.app.tar.gz"
|
19
src/ui-macos/git-export.do
Normal file
19
src/ui-macos/git-export.do
Normal file
@ -0,0 +1,19 @@
|
||||
# update a local branch with pregenerated output files, so people can download
|
||||
# the completed tarballs from github. Since we don't have any real binaries,
|
||||
# our final distribution package contains mostly blobs from the source code,
|
||||
# so this doesn't cost us much extra space in the repo.
|
||||
BRANCH=dist/macos
|
||||
redo-ifchange 'Sshuttle VPN.app'
|
||||
git update-ref refs/heads/$BRANCH origin/$BRANCH '' 2>/dev/null || true
|
||||
|
||||
export GIT_INDEX_FILE=$PWD/gitindex.tmp
|
||||
rm -f "$GIT_INDEX_FILE"
|
||||
git add -f 'Sshuttle VPN.app'
|
||||
|
||||
MSG="MacOS precompiled app package for $(git describe)"
|
||||
TREE=$(git write-tree --prefix=ui-macos)
|
||||
git show-ref refs/heads/$BRANCH >/dev/null && PARENT="-p refs/heads/$BRANCH"
|
||||
COMMITID=$(echo "$MSG" | git commit-tree $TREE $PARENT)
|
||||
|
||||
git update-ref refs/heads/$BRANCH $COMMITID
|
||||
rm -f "$GIT_INDEX_FILE"
|
401
src/ui-macos/main.py
Normal file
401
src/ui-macos/main.py
Normal file
@ -0,0 +1,401 @@
|
||||
import sys
|
||||
import os
|
||||
import pty
|
||||
from AppKit import (
|
||||
objc,
|
||||
NSApp,
|
||||
NSApplicationMain,
|
||||
NSAttributedString,
|
||||
NSFileHandle,
|
||||
NSFileHandleDataAvailableNotification,
|
||||
NSImage,
|
||||
NSMenu,
|
||||
NSMenuItem,
|
||||
NSNotificationCenter,
|
||||
NSObject,
|
||||
NSStatusBar,
|
||||
NSVariableStatusItemLength,
|
||||
)
|
||||
import my
|
||||
import models
|
||||
import askpass
|
||||
|
||||
|
||||
def sshuttle_args(host, auto_nets, auto_hosts, dns, nets, debug,
|
||||
no_latency_control):
|
||||
argv = [my.bundle_path('sshuttle/sshuttle', ''), '-r', host]
|
||||
assert(argv[0])
|
||||
if debug:
|
||||
argv.append('-v')
|
||||
if auto_nets:
|
||||
argv.append('--auto-nets')
|
||||
if auto_hosts:
|
||||
argv.append('--auto-hosts')
|
||||
if dns:
|
||||
argv.append('--dns')
|
||||
if no_latency_control:
|
||||
argv.append('--no-latency-control')
|
||||
argv += nets
|
||||
return argv
|
||||
|
||||
|
||||
class _Callback(NSObject):
|
||||
|
||||
def initWithFunc_(self, func):
|
||||
self = super(_Callback, self).init()
|
||||
self.func = func
|
||||
return self
|
||||
|
||||
def func_(self, obj):
|
||||
return self.func(obj)
|
||||
|
||||
|
||||
class Callback:
|
||||
|
||||
def __init__(self, func):
|
||||
self.obj = _Callback.alloc().initWithFunc_(func)
|
||||
self.sel = self.obj.func_
|
||||
|
||||
|
||||
class Runner:
|
||||
|
||||
def __init__(self, argv, logfunc, promptfunc, serverobj):
|
||||
print 'in __init__'
|
||||
self.id = argv
|
||||
self.rv = None
|
||||
self.pid = None
|
||||
self.fd = None
|
||||
self.logfunc = logfunc
|
||||
self.promptfunc = promptfunc
|
||||
self.serverobj = serverobj
|
||||
self.buf = ''
|
||||
self.logfunc('\nConnecting to %s.\n' % self.serverobj.host())
|
||||
print 'will run: %r' % argv
|
||||
self.serverobj.setConnected_(False)
|
||||
pid, fd = pty.fork()
|
||||
if pid == 0:
|
||||
# child
|
||||
try:
|
||||
os.execvp(argv[0], argv)
|
||||
except Exception, e:
|
||||
sys.stderr.write('failed to start: %r\n' % e)
|
||||
raise
|
||||
finally:
|
||||
os._exit(42)
|
||||
# parent
|
||||
self.pid = pid
|
||||
self.file = NSFileHandle.alloc()\
|
||||
.initWithFileDescriptor_closeOnDealloc_(fd, True)
|
||||
self.cb = Callback(self.gotdata)
|
||||
NSNotificationCenter.defaultCenter()\
|
||||
.addObserver_selector_name_object_(
|
||||
self.cb.obj, self.cb.sel,
|
||||
NSFileHandleDataAvailableNotification, self.file)
|
||||
self.file.waitForDataInBackgroundAndNotify()
|
||||
|
||||
def __del__(self):
|
||||
self.wait()
|
||||
|
||||
def _try_wait(self, options):
|
||||
if self.rv is None and self.pid > 0:
|
||||
pid, code = os.waitpid(self.pid, options)
|
||||
if pid == self.pid:
|
||||
if os.WIFEXITED(code):
|
||||
self.rv = os.WEXITSTATUS(code)
|
||||
else:
|
||||
self.rv = -os.WSTOPSIG(code)
|
||||
self.serverobj.setConnected_(False)
|
||||
self.serverobj.setError_('VPN process died')
|
||||
self.logfunc('Disconnected.\n')
|
||||
print 'wait_result: %r' % self.rv
|
||||
return self.rv
|
||||
|
||||
def wait(self):
|
||||
rv = None
|
||||
while rv is None:
|
||||
self.gotdata(None)
|
||||
rv = self._try_wait(os.WNOHANG)
|
||||
|
||||
def poll(self):
|
||||
return self._try_wait(os.WNOHANG)
|
||||
|
||||
def kill(self):
|
||||
assert(self.pid > 0)
|
||||
print 'killing: pid=%r rv=%r' % (self.pid, self.rv)
|
||||
if self.rv is None:
|
||||
self.logfunc('Disconnecting from %s.\n' % self.serverobj.host())
|
||||
os.kill(self.pid, 15)
|
||||
self.wait()
|
||||
|
||||
def gotdata(self, notification):
|
||||
print 'gotdata!'
|
||||
d = str(self.file.availableData())
|
||||
if d:
|
||||
self.logfunc(d)
|
||||
self.buf = self.buf + d
|
||||
if 'Connected.\r\n' in self.buf:
|
||||
self.serverobj.setConnected_(True)
|
||||
self.buf = self.buf[-4096:]
|
||||
if self.buf.strip().endswith(':'):
|
||||
lastline = self.buf.rstrip().split('\n')[-1]
|
||||
resp = self.promptfunc(lastline)
|
||||
add = ' (response)\n'
|
||||
self.buf += add
|
||||
self.logfunc(add)
|
||||
self.file.writeData_(my.Data(resp + '\n'))
|
||||
self.file.waitForDataInBackgroundAndNotify()
|
||||
self.poll()
|
||||
# print 'gotdata done!'
|
||||
|
||||
|
||||
class SshuttleApp(NSObject):
|
||||
|
||||
def initialize(self):
|
||||
d = my.PList('UserDefaults')
|
||||
my.Defaults().registerDefaults_(d)
|
||||
|
||||
|
||||
class SshuttleController(NSObject):
|
||||
# Interface builder outlets
|
||||
startAtLoginField = objc.IBOutlet()
|
||||
autoReconnectField = objc.IBOutlet()
|
||||
debugField = objc.IBOutlet()
|
||||
routingField = objc.IBOutlet()
|
||||
prefsWindow = objc.IBOutlet()
|
||||
serversController = objc.IBOutlet()
|
||||
logField = objc.IBOutlet()
|
||||
latencyControlField = objc.IBOutlet()
|
||||
|
||||
servers = []
|
||||
conns = {}
|
||||
|
||||
def _connect(self, server):
|
||||
host = server.host()
|
||||
print 'connecting %r' % host
|
||||
self.fill_menu()
|
||||
|
||||
def logfunc(msg):
|
||||
print 'log! (%d bytes)' % len(msg)
|
||||
self.logField.textStorage()\
|
||||
.appendAttributedString_(NSAttributedString.alloc()
|
||||
.initWithString_(msg))
|
||||
self.logField.didChangeText()
|
||||
|
||||
def promptfunc(prompt):
|
||||
print 'prompt! %r' % prompt
|
||||
return askpass.askpass(prompt)
|
||||
nets_mode = server.autoNets()
|
||||
if nets_mode == models.NET_MANUAL:
|
||||
manual_nets = ["%s/%d" % (i.subnet(), i.width())
|
||||
for i in server.nets()]
|
||||
elif nets_mode == models.NET_ALL:
|
||||
manual_nets = ['0/0']
|
||||
else:
|
||||
manual_nets = []
|
||||
noLatencyControl = (server.latencyControl() != models.LAT_INTERACTIVE)
|
||||
conn = Runner(sshuttle_args(host,
|
||||
auto_nets=nets_mode == models.NET_AUTO,
|
||||
auto_hosts=server.autoHosts(),
|
||||
dns=server.useDns(),
|
||||
nets=manual_nets,
|
||||
debug=self.debugField.state(),
|
||||
no_latency_control=noLatencyControl),
|
||||
logfunc=logfunc, promptfunc=promptfunc,
|
||||
serverobj=server)
|
||||
self.conns[host] = conn
|
||||
|
||||
def _disconnect(self, server):
|
||||
host = server.host()
|
||||
print 'disconnecting %r' % host
|
||||
conn = self.conns.get(host)
|
||||
if conn:
|
||||
conn.kill()
|
||||
self.fill_menu()
|
||||
self.logField.textStorage().setAttributedString_(
|
||||
NSAttributedString.alloc().initWithString_(''))
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_connect(self, sender):
|
||||
server = sender.representedObject()
|
||||
server.setWantConnect_(True)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_disconnect(self, sender):
|
||||
server = sender.representedObject()
|
||||
server.setWantConnect_(False)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_show(self, sender):
|
||||
self.prefsWindow.makeKeyAndOrderFront_(self)
|
||||
NSApp.activateIgnoringOtherApps_(True)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_quit(self, sender):
|
||||
NSStatusBar.systemStatusBar().removeStatusItem_(self.statusitem)
|
||||
NSApp.performSelector_withObject_afterDelay_(NSApp.terminate_,
|
||||
None, 0.0)
|
||||
|
||||
def fill_menu(self):
|
||||
menu = self.menu
|
||||
menu.removeAllItems()
|
||||
|
||||
def additem(name, func, obj):
|
||||
it = menu.addItemWithTitle_action_keyEquivalent_(name, None, "")
|
||||
it.setRepresentedObject_(obj)
|
||||
it.setTarget_(self)
|
||||
it.setAction_(func)
|
||||
|
||||
def addnote(name):
|
||||
additem(name, None, None)
|
||||
|
||||
any_inprogress = None
|
||||
any_conn = None
|
||||
any_err = None
|
||||
if len(self.servers):
|
||||
for i in self.servers:
|
||||
host = i.host()
|
||||
title = i.title()
|
||||
want = i.wantConnect()
|
||||
connected = i.connected()
|
||||
numnets = len(list(i.nets()))
|
||||
if not host:
|
||||
additem('Connect Untitled', None, i)
|
||||
elif i.autoNets() == models.NET_MANUAL and not numnets:
|
||||
additem('Connect %s (no routes)' % host, None, i)
|
||||
elif want:
|
||||
any_conn = i
|
||||
additem('Disconnect %s' % title, self.cmd_disconnect, i)
|
||||
else:
|
||||
additem('Connect %s' % title, self.cmd_connect, i)
|
||||
if not want:
|
||||
msg = 'Off'
|
||||
elif i.error():
|
||||
msg = 'ERROR - try reconnecting'
|
||||
any_err = i
|
||||
elif connected:
|
||||
msg = 'Connected'
|
||||
else:
|
||||
msg = 'Connecting...'
|
||||
any_inprogress = i
|
||||
addnote(' State: %s' % msg)
|
||||
else:
|
||||
addnote('No servers defined yet')
|
||||
|
||||
menu.addItem_(NSMenuItem.separatorItem())
|
||||
additem('Preferences...', self.cmd_show, None)
|
||||
additem('Quit Sshuttle VPN', self.cmd_quit, None)
|
||||
|
||||
if any_err:
|
||||
self.statusitem.setImage_(self.img_err)
|
||||
self.statusitem.setTitle_('Error!')
|
||||
elif any_conn:
|
||||
self.statusitem.setImage_(self.img_running)
|
||||
if any_inprogress:
|
||||
self.statusitem.setTitle_('Connecting...')
|
||||
else:
|
||||
self.statusitem.setTitle_('')
|
||||
else:
|
||||
self.statusitem.setImage_(self.img_idle)
|
||||
self.statusitem.setTitle_('')
|
||||
|
||||
def load_servers(self):
|
||||
l = my.Defaults().arrayForKey_('servers') or []
|
||||
sl = []
|
||||
for s in l:
|
||||
host = s.get('host', None)
|
||||
if not host:
|
||||
continue
|
||||
|
||||
nets = s.get('nets', [])
|
||||
nl = []
|
||||
for n in nets:
|
||||
subnet = n[0]
|
||||
width = n[1]
|
||||
net = models.SshuttleNet.alloc().init()
|
||||
net.setSubnet_(subnet)
|
||||
net.setWidth_(width)
|
||||
nl.append(net)
|
||||
|
||||
autoNets = s.get('autoNets', models.NET_AUTO)
|
||||
autoHosts = s.get('autoHosts', True)
|
||||
useDns = s.get('useDns', autoNets == models.NET_ALL)
|
||||
latencyControl = s.get('latencyControl', models.LAT_INTERACTIVE)
|
||||
srv = models.SshuttleServer.alloc().init()
|
||||
srv.setHost_(host)
|
||||
srv.setAutoNets_(autoNets)
|
||||
srv.setAutoHosts_(autoHosts)
|
||||
srv.setNets_(nl)
|
||||
srv.setUseDns_(useDns)
|
||||
srv.setLatencyControl_(latencyControl)
|
||||
sl.append(srv)
|
||||
self.serversController.addObjects_(sl)
|
||||
self.serversController.setSelectionIndex_(0)
|
||||
|
||||
def save_servers(self):
|
||||
l = []
|
||||
for s in self.servers:
|
||||
host = s.host()
|
||||
if not host:
|
||||
continue
|
||||
nets = []
|
||||
for n in s.nets():
|
||||
subnet = n.subnet()
|
||||
if not subnet:
|
||||
continue
|
||||
nets.append((subnet, n.width()))
|
||||
d = dict(host=s.host(),
|
||||
nets=nets,
|
||||
autoNets=s.autoNets(),
|
||||
autoHosts=s.autoHosts(),
|
||||
useDns=s.useDns(),
|
||||
latencyControl=s.latencyControl())
|
||||
l.append(d)
|
||||
my.Defaults().setObject_forKey_(l, 'servers')
|
||||
self.fill_menu()
|
||||
|
||||
def awakeFromNib(self):
|
||||
self.routingField.removeAllItems()
|
||||
tf = self.routingField.addItemWithTitle_
|
||||
tf('Send all traffic through this server')
|
||||
tf('Determine automatically')
|
||||
tf('Custom...')
|
||||
|
||||
self.latencyControlField.removeAllItems()
|
||||
tf = self.latencyControlField.addItemWithTitle_
|
||||
tf('Fast transfer')
|
||||
tf('Low latency')
|
||||
|
||||
# Hmm, even when I mark this as !enabled in the .nib, it still comes
|
||||
# through as enabled. So let's just disable it here (since we don't
|
||||
# support this feature yet).
|
||||
self.startAtLoginField.setEnabled_(False)
|
||||
self.startAtLoginField.setState_(False)
|
||||
self.autoReconnectField.setEnabled_(False)
|
||||
self.autoReconnectField.setState_(False)
|
||||
|
||||
self.load_servers()
|
||||
|
||||
# Initialize our menu item
|
||||
self.menu = NSMenu.alloc().initWithTitle_('Sshuttle')
|
||||
bar = NSStatusBar.systemStatusBar()
|
||||
statusitem = bar.statusItemWithLength_(NSVariableStatusItemLength)
|
||||
self.statusitem = statusitem
|
||||
self.img_idle = NSImage.imageNamed_('ChickenIdleTemplate')
|
||||
self.img_running = NSImage.imageNamed_('ChickenRunningTemplate')
|
||||
self.img_err = NSImage.imageNamed_('ChickenErrorTemplate')
|
||||
statusitem.setImage_(self.img_idle)
|
||||
statusitem.setMenu_(self.menu)
|
||||
self.fill_menu()
|
||||
|
||||
models.configchange_callback = my.DelayedCallback(self.save_servers)
|
||||
|
||||
def sc(server):
|
||||
if server.wantConnect():
|
||||
self._connect(server)
|
||||
else:
|
||||
self._disconnect(server)
|
||||
models.setconnect_callback = sc
|
||||
|
||||
|
||||
# Note: NSApplicationMain calls sys.exit(), so this never returns.
|
||||
NSApplicationMain(sys.argv)
|
189
src/ui-macos/models.py
Normal file
189
src/ui-macos/models.py
Normal file
@ -0,0 +1,189 @@
|
||||
from AppKit import (objc, NSObject)
|
||||
import my
|
||||
|
||||
|
||||
configchange_callback = setconnect_callback = None
|
||||
objc_validator = objc.signature('@@:N^@o^@')
|
||||
|
||||
|
||||
def config_changed():
|
||||
if configchange_callback:
|
||||
configchange_callback()
|
||||
|
||||
|
||||
def _validate_ip(v):
|
||||
parts = v.split('.')[:4]
|
||||
if len(parts) < 4:
|
||||
parts += ['0'] * (4 - len(parts))
|
||||
for i in range(4):
|
||||
n = my.atoi(parts[i])
|
||||
if n < 0:
|
||||
n = 0
|
||||
elif n > 255:
|
||||
n = 255
|
||||
parts[i] = str(n)
|
||||
return '.'.join(parts)
|
||||
|
||||
|
||||
def _validate_width(v):
|
||||
n = my.atoi(v)
|
||||
if n < 0:
|
||||
n = 0
|
||||
elif n > 32:
|
||||
n = 32
|
||||
return n
|
||||
|
||||
|
||||
class SshuttleNet(NSObject):
|
||||
|
||||
def subnet(self):
|
||||
return getattr(self, '_k_subnet', None)
|
||||
|
||||
def setSubnet_(self, v):
|
||||
self._k_subnet = v
|
||||
config_changed()
|
||||
|
||||
@objc_validator
|
||||
def validateSubnet_error_(self, value, error):
|
||||
# print 'validateSubnet!'
|
||||
return True, _validate_ip(value), error
|
||||
|
||||
def width(self):
|
||||
return getattr(self, '_k_width', 24)
|
||||
|
||||
def setWidth_(self, v):
|
||||
self._k_width = v
|
||||
config_changed()
|
||||
|
||||
@objc_validator
|
||||
def validateWidth_error_(self, value, error):
|
||||
# print 'validateWidth!'
|
||||
return True, _validate_width(value), error
|
||||
|
||||
NET_ALL = 0
|
||||
NET_AUTO = 1
|
||||
NET_MANUAL = 2
|
||||
|
||||
LAT_BANDWIDTH = 0
|
||||
LAT_INTERACTIVE = 1
|
||||
|
||||
|
||||
class SshuttleServer(NSObject):
|
||||
|
||||
def init(self):
|
||||
self = super(SshuttleServer, self).init()
|
||||
config_changed()
|
||||
return self
|
||||
|
||||
def wantConnect(self):
|
||||
return getattr(self, '_k_wantconnect', False)
|
||||
|
||||
def setWantConnect_(self, v):
|
||||
self._k_wantconnect = v
|
||||
self.setError_(None)
|
||||
config_changed()
|
||||
if setconnect_callback:
|
||||
setconnect_callback(self)
|
||||
|
||||
def connected(self):
|
||||
return getattr(self, '_k_connected', False)
|
||||
|
||||
def setConnected_(self, v):
|
||||
print 'setConnected of %r to %r' % (self, v)
|
||||
self._k_connected = v
|
||||
if v:
|
||||
self.setError_(None) # connected ok, so no error
|
||||
config_changed()
|
||||
|
||||
def error(self):
|
||||
return getattr(self, '_k_error', None)
|
||||
|
||||
def setError_(self, v):
|
||||
self._k_error = v
|
||||
config_changed()
|
||||
|
||||
def isValid(self):
|
||||
if not self.host():
|
||||
return False
|
||||
if self.autoNets() == NET_MANUAL and not len(list(self.nets())):
|
||||
return False
|
||||
return True
|
||||
|
||||
def title(self):
|
||||
host = self.host()
|
||||
if not host:
|
||||
return host
|
||||
an = self.autoNets()
|
||||
suffix = ""
|
||||
if an == NET_ALL:
|
||||
suffix = " (all traffic)"
|
||||
elif an == NET_MANUAL:
|
||||
n = self.nets()
|
||||
suffix = ' (%d subnet%s)' % (len(n), len(n) != 1 and 's' or '')
|
||||
return self.host() + suffix
|
||||
|
||||
def setTitle_(self, v):
|
||||
# title is always auto-generated
|
||||
config_changed()
|
||||
|
||||
def host(self):
|
||||
return getattr(self, '_k_host', None)
|
||||
|
||||
def setHost_(self, v):
|
||||
self._k_host = v
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
|
||||
@objc_validator
|
||||
def validateHost_error_(self, value, error):
|
||||
# print 'validatehost! %r %r %r' % (self, value, error)
|
||||
while value.startswith('-'):
|
||||
value = value[1:]
|
||||
return True, value, error
|
||||
|
||||
def nets(self):
|
||||
return getattr(self, '_k_nets', [])
|
||||
|
||||
def setNets_(self, v):
|
||||
self._k_nets = v
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
|
||||
def netsHidden(self):
|
||||
# print 'checking netsHidden'
|
||||
return self.autoNets() != NET_MANUAL
|
||||
|
||||
def setNetsHidden_(self, v):
|
||||
config_changed()
|
||||
# print 'setting netsHidden to %r' % v
|
||||
|
||||
def autoNets(self):
|
||||
return getattr(self, '_k_autoNets', NET_AUTO)
|
||||
|
||||
def setAutoNets_(self, v):
|
||||
self._k_autoNets = v
|
||||
self.setNetsHidden_(-1)
|
||||
self.setUseDns_(v == NET_ALL)
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
|
||||
def autoHosts(self):
|
||||
return getattr(self, '_k_autoHosts', True)
|
||||
|
||||
def setAutoHosts_(self, v):
|
||||
self._k_autoHosts = v
|
||||
config_changed()
|
||||
|
||||
def useDns(self):
|
||||
return getattr(self, '_k_useDns', False)
|
||||
|
||||
def setUseDns_(self, v):
|
||||
self._k_useDns = v
|
||||
config_changed()
|
||||
|
||||
def latencyControl(self):
|
||||
return getattr(self, '_k_latencyControl', LAT_INTERACTIVE)
|
||||
|
||||
def setLatencyControl_(self, v):
|
||||
self._k_latencyControl = v
|
||||
config_changed()
|
70
src/ui-macos/my.py
Normal file
70
src/ui-macos/my.py
Normal file
@ -0,0 +1,70 @@
|
||||
import os
|
||||
from AppKit import (
|
||||
NSBundle,
|
||||
NSData,
|
||||
NSDictionary,
|
||||
NSImage,
|
||||
NSUserDefaults,
|
||||
)
|
||||
import PyObjCTools.AppHelper
|
||||
|
||||
|
||||
def bundle_path(name, typ):
|
||||
if typ:
|
||||
return NSBundle.mainBundle().pathForResource_ofType_(name, typ)
|
||||
else:
|
||||
return os.path.join(NSBundle.mainBundle().resourcePath(), name)
|
||||
|
||||
|
||||
# Load an NSData using a python string
|
||||
def Data(s):
|
||||
return NSData.alloc().initWithBytes_length_(s, len(s))
|
||||
|
||||
|
||||
# Load a property list from a file in the application bundle.
|
||||
def PList(name):
|
||||
path = bundle_path(name, 'plist')
|
||||
return NSDictionary.dictionaryWithContentsOfFile_(path)
|
||||
|
||||
|
||||
# Load an NSImage from a file in the application bundle.
|
||||
def Image(name, ext):
|
||||
bytes = open(bundle_path(name, ext)).read()
|
||||
img = NSImage.alloc().initWithData_(Data(bytes))
|
||||
return img
|
||||
|
||||
|
||||
# Return the NSUserDefaults shared object.
|
||||
def Defaults():
|
||||
return NSUserDefaults.standardUserDefaults()
|
||||
|
||||
|
||||
# Usage:
|
||||
# f = DelayedCallback(func, args...)
|
||||
# later:
|
||||
# f()
|
||||
#
|
||||
# When you call f(), it will schedule a call to func() next time the
|
||||
# ObjC event loop iterates. Multiple calls to f() in a single iteration
|
||||
# will only result in one call to func().
|
||||
#
|
||||
def DelayedCallback(func, *args, **kwargs):
|
||||
flag = [0]
|
||||
|
||||
def _go():
|
||||
if flag[0]:
|
||||
print 'running %r (flag=%r)' % (func, flag)
|
||||
flag[0] = 0
|
||||
func(*args, **kwargs)
|
||||
|
||||
def call():
|
||||
flag[0] += 1
|
||||
PyObjCTools.AppHelper.callAfter(_go)
|
||||
return call
|
||||
|
||||
|
||||
def atoi(s):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return 0
|
4
src/ui-macos/run.do
Normal file
4
src/ui-macos/run.do
Normal file
@ -0,0 +1,4 @@
|
||||
redo-ifchange debug.app
|
||||
exec >&2
|
||||
./debug.app/Contents/MacOS/Sshuttle
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user