mirror of
https://github.com/sshuttle/sshuttle.git
synced 2025-06-20 18:07:44 +02:00
Compare commits
No commits in common. "sshuttle-0.51-macos-bin" and "master" have entirely different histories.
sshuttle-0
...
master
13
.github/dependabot.yml
vendored
Normal file
13
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
version: 2
|
||||
enable-beta-ecosystems: true
|
||||
updates:
|
||||
- package-ecosystem: uv
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
70
.github/workflows/codeql.yml
vendored
Normal file
70
.github/workflows/codeql.yml
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '31 21 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
38
.github/workflows/pythonpackage.yml
vendored
Normal file
38
.github/workflows/pythonpackage.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Python package
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
||||
poetry-version: ["main"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.4.30"
|
||||
enable-cache: true
|
||||
cache-dependency-glob: "uv.lock"
|
||||
- name: Install the project
|
||||
run: uv sync --all-extras --dev
|
||||
- name: Lint with flake8
|
||||
run: uv run flake8 sshuttle tests --count --show-source --statistics
|
||||
- name: Run the automated tests
|
||||
run: uv run pytest -v
|
66
.github/workflows/release-please.yml
vendored
Normal file
66
.github/workflows/release-please.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
name: release-please
|
||||
|
||||
jobs:
|
||||
|
||||
release-please:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
release_created: ${{ steps.release.outputs.release_created }}
|
||||
tag_name: ${{ steps.release.outputs.tag_name }}
|
||||
steps:
|
||||
- uses: googleapis/release-please-action@v4
|
||||
id: release
|
||||
with:
|
||||
token: ${{ secrets.MY_RELEASE_PLEASE_TOKEN }}
|
||||
release-type: python
|
||||
|
||||
build-pypi:
|
||||
name: Build for pypi
|
||||
needs: [release-please]
|
||||
if: ${{ needs.release-please.outputs.release_created == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.12
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.12
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.4.30"
|
||||
enable-cache: true
|
||||
cache-dependency-glob: "uv.lock"
|
||||
- name: Build project
|
||||
run: uv build
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
upload-pypi:
|
||||
name: Upload to pypi
|
||||
needs: [build-pypi]
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/sshuttle
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Publish package distributions to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
20
.gitignore
vendored
Normal file
20
.gitignore
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
/tmp/
|
||||
/.coverage
|
||||
/.cache/
|
||||
/.eggs/
|
||||
/.tox/
|
||||
/build/
|
||||
/dist/
|
||||
/sshuttle.egg-info/
|
||||
/docs/_build/
|
||||
*.pyc
|
||||
*~
|
||||
*.8
|
||||
/.do_built
|
||||
/.do_built.dir
|
||||
/.redo
|
||||
/.pytest_cache/
|
||||
/.python-version
|
||||
/.direnv/
|
||||
/result
|
||||
/.vscode/
|
24
.prospector.yml
Normal file
24
.prospector.yml
Normal file
@ -0,0 +1,24 @@
|
||||
strictness: medium
|
||||
|
||||
pylint:
|
||||
disable:
|
||||
- too-many-statements
|
||||
- too-many-locals
|
||||
- too-many-function-args
|
||||
- too-many-arguments
|
||||
- too-many-branches
|
||||
- bare-except
|
||||
- protected-access
|
||||
- no-else-return
|
||||
- unused-argument
|
||||
- method-hidden
|
||||
- arguments-differ
|
||||
- wrong-import-position
|
||||
- raising-bad-type
|
||||
|
||||
pep8:
|
||||
options:
|
||||
max-line-length: 79
|
||||
|
||||
mccabe:
|
||||
run: false
|
13
.readthedocs.yaml
Normal file
13
.readthedocs.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-20.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
jobs:
|
||||
post_install:
|
||||
- pip install uv
|
||||
- UV_PROJECT_ENVIRONMENT=$READTHEDOCS_VIRTUALENV_PATH uv sync --all-extras --group docs --link-mode=copy
|
||||
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
1
.tool-versions
Normal file
1
.tool-versions
Normal file
@ -0,0 +1 @@
|
||||
python 3.10.6
|
54
CHANGELOG.md
Normal file
54
CHANGELOG.md
Normal file
@ -0,0 +1,54 @@
|
||||
# Changelog
|
||||
|
||||
## [1.3.1](https://github.com/sshuttle/sshuttle/compare/v1.3.0...v1.3.1) (2025-03-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add pycodestyle config ([5942376](https://github.com/sshuttle/sshuttle/commit/5942376090395d0a8dfe38fe012a519268199341))
|
||||
* add python lint tools ([ae3c022](https://github.com/sshuttle/sshuttle/commit/ae3c022d1d67de92f1c4712d06eb8ae76c970624))
|
||||
* correct bad version number at runtime ([7b66253](https://github.com/sshuttle/sshuttle/commit/7b662536ba92d724ed8f86a32a21282fea66047c))
|
||||
* Restore "nft" method ([375810a](https://github.com/sshuttle/sshuttle/commit/375810a9a8910a51db22c9fe4c0658c39b16c9e7))
|
||||
|
||||
## [1.3.0](https://github.com/sshuttle/sshuttle/compare/v1.2.0...v1.3.0) (2025-02-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* switch to a network namespace on Linux ([8a123d9](https://github.com/sshuttle/sshuttle/commit/8a123d9762b84f168a8ca8c75f73e590954e122d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* prevent UnicodeDecodeError parsing iptables rule with comments ([cbe3d1e](https://github.com/sshuttle/sshuttle/commit/cbe3d1e402cac9d3fbc818fe0cb8a87be2e94348))
|
||||
* remove temp build hack ([1f5e6ce](https://github.com/sshuttle/sshuttle/commit/1f5e6cea703db33761fb1c3f999b9624cf3bc7ad))
|
||||
* support ':' sign in password ([7fa927e](https://github.com/sshuttle/sshuttle/commit/7fa927ef8ceea6b1b2848ca433b8b3e3b63f0509))
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* replace nix-env with nix-shell ([340ccc7](https://github.com/sshuttle/sshuttle/commit/340ccc705ebd9499f14f799fcef0b5d2a8055fb4))
|
||||
* update installation instructions ([a2d405a](https://github.com/sshuttle/sshuttle/commit/a2d405a6a7f9d1a301311a109f8411f2fe8deb37))
|
||||
|
||||
## [1.2.0](https://github.com/sshuttle/sshuttle/compare/v1.1.2...v1.2.0) (2025-02-07)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add release-please to build workflow ([d910b64](https://github.com/sshuttle/sshuttle/commit/d910b64be77fd7ef2a5f169b780bfda95e67318d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Add support for Python 3.11 and Python 3.11 ([a3396a4](https://github.com/sshuttle/sshuttle/commit/a3396a443df14d3bafc3d25909d9221aa182b8fc))
|
||||
* bad file descriptor error in windows, fix pytest errors ([d4d0fa9](https://github.com/sshuttle/sshuttle/commit/d4d0fa945d50606360aa7c5f026a0f190b026c68))
|
||||
* drop Python 3.8 support ([1084c0f](https://github.com/sshuttle/sshuttle/commit/1084c0f2458c1595b00963b3bd54bd667e4cfc9f))
|
||||
* ensure poetry works for Python 3.9 ([693ee40](https://github.com/sshuttle/sshuttle/commit/693ee40c485c70f353326eb0e8f721f984850f5c))
|
||||
* fix broken workflow_dispatch CI rule ([4b6f7c6](https://github.com/sshuttle/sshuttle/commit/4b6f7c6a656a752552295863092d3b8af0b42b31))
|
||||
* Remove more references to legacy Python versions ([339b522](https://github.com/sshuttle/sshuttle/commit/339b5221bc33254329f79f2374f6114be6f30aed))
|
||||
* replace requirements.txt files with poetry ([85dc319](https://github.com/sshuttle/sshuttle/commit/85dc3199a332f9f9f0e4c6037c883a8f88dc09ca))
|
||||
* replace requirements.txt files with poetry (2) ([d08f78a](https://github.com/sshuttle/sshuttle/commit/d08f78a2d9777951d7e18f6eaebbcdd279d7683a))
|
||||
* replace requirements.txt files with poetry (3) ([62da705](https://github.com/sshuttle/sshuttle/commit/62da70510e8a1f93e8b38870fdebdbace965cd8e))
|
||||
* replace requirements.txt files with poetry (4) ([9bcedf1](https://github.com/sshuttle/sshuttle/commit/9bcedf19049e5b3a8ae26818299cc518ec03a926))
|
||||
* update nix flake to fix problems ([cda60a5](https://github.com/sshuttle/sshuttle/commit/cda60a52331c7102cff892b9b77c8321e276680a))
|
||||
* use Python >= 3.10 for docs ([bf29464](https://github.com/sshuttle/sshuttle/commit/bf294643e283cef9fb285d44e307e958686caf46))
|
315
CHANGES.rst
Normal file
315
CHANGES.rst
Normal file
@ -0,0 +1,315 @@
|
||||
==========
|
||||
Change log
|
||||
==========
|
||||
Release notes now moved to https://github.com/sshuttle/sshuttle/releases/
|
||||
|
||||
These are the old release notes.
|
||||
|
||||
|
||||
1.0.5 - 2020-12-29
|
||||
------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* IPv6 support in nft method.
|
||||
* Intercept DNS requests sent by systemd-resolved.
|
||||
* Set default tmark.
|
||||
* Fix python2 server compatibility.
|
||||
* Python 3.9 support.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Change license text to LGPL-2.1
|
||||
* Fix #494 sshuttle caught in infinite select() loop.
|
||||
* Include sshuttle version in verbose output.
|
||||
* Add psutil as dependency in setup.py
|
||||
* When subnets and excludes are specified with hostnames, use all IPs.
|
||||
* Update/document client's handling of IPv4 and IPv6.
|
||||
* Update sdnotify.py documentation.
|
||||
* Allow no remote to work.
|
||||
* Make prefixes in verbose output more consistent.
|
||||
* Make nat and nft rules consistent; improve rule ordering.
|
||||
* Make server and client handle resolv.conf differently.
|
||||
* Fix handling OSError in FirewallClient#__init__
|
||||
* Refactor automatic method selection.
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Drop testing of Python 3.5
|
||||
|
||||
|
||||
1.0.4 - 2020-08-24
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Allow Mux() flush/fill to work with python < 3.5
|
||||
* Fix parse_hostport to always return string for host.
|
||||
* Require -r/--remote parameter.
|
||||
* Add missing package in OpenWRT documentation.
|
||||
* Fix doc about --listen option.
|
||||
* README: add Ubuntu.
|
||||
* Increase IP4 ttl to 63 hops instead of 42.
|
||||
* Fix formatting in installation.rst
|
||||
|
||||
|
||||
1.0.3 - 2020-07-12
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Ask setuptools to require Python 3.5 and above.
|
||||
* Add missing import.
|
||||
* Fix formatting typos in usage docs
|
||||
|
||||
|
||||
1.0.2 - 2020-06-18
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Leave use of default port to ssh command.
|
||||
* Remove unwanted references to Python 2.7 in docs.
|
||||
* Replace usage of deprecated imp.
|
||||
* Fix connection with @ sign in username.
|
||||
|
||||
|
||||
1.0.1 - 2020-06-05
|
||||
------------------
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Errors in python long_documentation.
|
||||
|
||||
|
||||
1.0.0 - 2020-06-05
|
||||
------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Python 3.8 support.
|
||||
* sshpass support.
|
||||
* Auto sudoers file (#269).
|
||||
* option for latency control buffer size.
|
||||
* Docs: FreeBSD'.
|
||||
* Docs: Nix'.
|
||||
* Docs: openwrt'.
|
||||
* Docs: install instructions for Fedora'.
|
||||
* Docs: install instructions for Arch Linux'.
|
||||
* Docs: 'My VPN broke and need a solution fast'.
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Python 2.6 support.
|
||||
* Python 2.7 support.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Remove debug message for getpeername failure.
|
||||
* Fix crash triggered by port scans closing socket.
|
||||
* Added "Running as a service" to docs.
|
||||
* Systemd integration.
|
||||
* Trap UnicodeError to handle cases where hostnames returned by DNS are invalid.
|
||||
* Formatting error in CHANGES.rst
|
||||
* Various errors in documentation.
|
||||
* Nftables based method.
|
||||
* Make hostwatch locale-independent (#379).
|
||||
* Add tproxy udp port mark filter that was missed in #144, fixes #367.
|
||||
* Capturing of local DNS servers.
|
||||
* Crashing on ECONNABORTED.
|
||||
* Size of pf_rule, which grew in OpenBSD 6.4.
|
||||
* Use prompt for sudo, not needed for doas.
|
||||
* Arch linux installation instructions.
|
||||
* tests for existing PR-312 (#337).
|
||||
* Hyphen in hostname.
|
||||
* Assembler import (#319).
|
||||
|
||||
|
||||
0.78.5 - 2019-01-28
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* doas support as replacement for sudo on OpenBSD.
|
||||
* Added ChromeOS section to documentation (#262)
|
||||
* Add --no-sudo-pythonpath option
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Fix forwarding to a single port.
|
||||
* Various updates to documentation.
|
||||
* Don't crash if we can't look up peername
|
||||
* Fix missing string formatting argument
|
||||
* Moved sshuttle/tests into tests.
|
||||
* Updated bandit config.
|
||||
* Replace path /dev/null by os.devnull.
|
||||
* Added coverage report to tests.
|
||||
* Fixes support for OpenBSD (6.1+) (#282).
|
||||
* Close stdin, stdout, and stderr when using syslog or forking to daemon (#283).
|
||||
* Changes pf exclusion rules precedence.
|
||||
* Fix deadlock with iptables with large ruleset.
|
||||
* docs: document --ns-hosts --to-ns and update --dns.
|
||||
* Use subprocess.check_output instead of run.
|
||||
* Fix potential deadlock condition in nft_get_handle.
|
||||
* auto-nets: retrieve routes only if using auto-nets.
|
||||
|
||||
|
||||
0.78.4 - 2018-04-02
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Add homebrew instructions.
|
||||
* Route traffic by linux user.
|
||||
* Add nat-like method using nftables instead of iptables.
|
||||
|
||||
Changed
|
||||
~~~~~~~
|
||||
* Talk to custom DNS server on pod, instead of the ones in /etc/resolv.conf.
|
||||
* Add new option for overriding destination DNS server.
|
||||
* Changed subnet parsing. Previously 10/8 become 10.0.0.0/8. Now it gets
|
||||
parsed as 0.0.0.10/8.
|
||||
* Make hostwatch find both fqdn and hostname.
|
||||
* Use versions of python3 greater than 3.5 when available (e.g. 3.6).
|
||||
|
||||
Removed
|
||||
~~~~~~~
|
||||
* Remove Python 2.6 from automatic tests.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Fix case where there is no --dns.
|
||||
* [pf] Avoid port forwarding from loopback address.
|
||||
* Use getaddrinfo to obtain a correct sockaddr.
|
||||
* Skip empty lines on incoming routes data.
|
||||
* Just skip empty lines of routes data instead of stopping processing.
|
||||
* [pf] Load pf kernel module when enabling pf.
|
||||
* [pf] Test double restore (ipv4, ipv6) disables only once; test kldload.
|
||||
* Fixes UDP and DNS proxies binding to the same socket address.
|
||||
* Mock socket bind to avoid depending on local IPs being available in test box.
|
||||
* Fix no value passed for argument auto_hosts in hw_main call.
|
||||
* Fixed incorrect license information in setup.py.
|
||||
* Preserve peer and port properly.
|
||||
* Make --to-dns and --ns-host work well together.
|
||||
* Remove test that fails under OSX.
|
||||
* Specify pip requirements for tests.
|
||||
* Use flake8 to find Python syntax errors or undefined names.
|
||||
* Fix compatibility with the sudoers file.
|
||||
* Stop using SO_REUSEADDR on sockets.
|
||||
* Declare 'verbosity' as global variable to placate linters.
|
||||
* Adds 'cd sshuttle' after 'git' to README and docs.
|
||||
* Documentation for loading options from configuration file.
|
||||
* Load options from a file.
|
||||
* Fix firewall.py.
|
||||
* Move sdnotify after setting up firewall rules.
|
||||
* Fix tests on Macos.
|
||||
|
||||
|
||||
0.78.3 - 2017-07-09
|
||||
-------------------
|
||||
The "I should have done a git pull" first release.
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Order first by port range and only then by swidth
|
||||
|
||||
|
||||
0.78.2 - 2017-07-09
|
||||
-------------------
|
||||
|
||||
Added
|
||||
~~~~~
|
||||
* Adds support for tunneling specific port ranges (#144).
|
||||
* Add support for iproute2.
|
||||
* Allow remote hosts with colons in the username.
|
||||
* Re-introduce ipfw support for sshuttle on FreeBSD with support for --DNS option as well.
|
||||
* Add support for PfSense.
|
||||
* Tests and documentation for systemd integration.
|
||||
* Allow subnets to be given only by file (-s).
|
||||
|
||||
Fixed
|
||||
~~~~~
|
||||
* Work around non tabular headers in BSD netstat.
|
||||
* Fix UDP and DNS support on Python 2.7 with tproxy method.
|
||||
* Fixed tests after adding support for iproute2.
|
||||
* Small refactoring of netstat/iproute parsing.
|
||||
* Set started_by_sshuttle False after disabling pf.
|
||||
* Fix punctuation and explain Type=notify.
|
||||
* Move pytest-runner to tests_require.
|
||||
* Fix warning: closed channel got=STOP_SENDING.
|
||||
* Support sdnotify for better systemd integration.
|
||||
* Fix #117 to allow for no subnets via file (-s).
|
||||
* Fix argument splitting for multi-word arguments.
|
||||
* requirements.rst: Fix mistakes.
|
||||
* Fix typo, space not required here.
|
||||
* Update installation instructions.
|
||||
* Support using run from different directory.
|
||||
* Ensure we update sshuttle/version.py in run.
|
||||
* Don't print python version in run.
|
||||
* Add CWD to PYTHONPATH in run.
|
||||
|
||||
|
||||
0.78.1 - 2016-08-06
|
||||
-------------------
|
||||
* Fix readthedocs versioning.
|
||||
* Don't crash on ENETUNREACH.
|
||||
* Various bug fixes.
|
||||
* Improvements to BSD and OSX support.
|
||||
|
||||
|
||||
0.78.0 - 2016-04-08
|
||||
-------------------
|
||||
|
||||
* Don't force IPv6 if IPv6 nameservers supplied. Fixes #74.
|
||||
* Call /bin/sh as users shell may not be POSIX compliant. Fixes #77.
|
||||
* Use argparse for command line processing. Fixes #75.
|
||||
* Remove useless --server option.
|
||||
* Support multiple -s (subnet) options. Fixes #86.
|
||||
* Make server parts work with old versions of Python. Fixes #81.
|
||||
|
||||
|
||||
0.77.2 - 2016-03-07
|
||||
-------------------
|
||||
|
||||
* Accidentally switched LGPL2 license with GPL2 license in 0.77.1 - now fixed.
|
||||
|
||||
|
||||
0.77.1 - 2016-03-07
|
||||
-------------------
|
||||
|
||||
* Use semantic versioning. http://semver.org/
|
||||
* Update GPL 2 license text.
|
||||
* New release to fix PyPI.
|
||||
|
||||
|
||||
0.77 - 2016-03-03
|
||||
-----------------
|
||||
|
||||
* Various bug fixes.
|
||||
* Fix Documentation.
|
||||
* Add fix for MacOS X issue.
|
||||
* Add support for OpenBSD.
|
||||
|
||||
|
||||
0.76 - 2016-01-17
|
||||
-----------------
|
||||
|
||||
* Add option to disable IPv6 support.
|
||||
* Update documentation.
|
||||
* Move documentation, including man page, to Sphinx.
|
||||
* Use setuptools-scm for automatic versioning.
|
||||
|
||||
|
||||
0.75 - 2016-01-12
|
||||
-----------------
|
||||
|
||||
* Revert change that broke sshuttle entry point.
|
||||
|
||||
|
||||
0.74 - 2016-01-10
|
||||
-----------------
|
||||
|
||||
* Add CHANGES.rst file.
|
||||
* Numerous bug fixes.
|
||||
* Python 3.5 fixes.
|
||||
* PF fixes, especially for BSD.
|
502
LICENSE
Normal file
502
LICENSE
Normal file
@ -0,0 +1,502 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 2.1, February 1999
|
||||
|
||||
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
[This is the first released version of the Lesser GPL. It also counts
|
||||
as the successor of the GNU Library Public License, version 2, hence
|
||||
the version number 2.1.]
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
Licenses are intended to guarantee your freedom to share and change
|
||||
free software--to make sure the software is free for all its users.
|
||||
|
||||
This license, the Lesser General Public License, applies to some
|
||||
specially designated software packages--typically libraries--of the
|
||||
Free Software Foundation and other authors who decide to use it. You
|
||||
can use it too, but we suggest you first think carefully about whether
|
||||
this license or the ordinary General Public License is the better
|
||||
strategy to use in any particular case, based on the explanations below.
|
||||
|
||||
When we speak of free software, we are referring to freedom of use,
|
||||
not price. Our General Public Licenses are designed to make sure that
|
||||
you have the freedom to distribute copies of free software (and charge
|
||||
for this service if you wish); that you receive source code or can get
|
||||
it if you want it; that you can change the software and use pieces of
|
||||
it in new free programs; and that you are informed that you can do
|
||||
these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
distributors to deny you these rights or to ask you to surrender these
|
||||
rights. These restrictions translate to certain responsibilities for
|
||||
you if you distribute copies of the library or if you modify it.
|
||||
|
||||
For example, if you distribute copies of the library, whether gratis
|
||||
or for a fee, you must give the recipients all the rights that we gave
|
||||
you. You must make sure that they, too, receive or can get the source
|
||||
code. If you link other code with the library, you must provide
|
||||
complete object files to the recipients, so that they can relink them
|
||||
with the library after making changes to the library and recompiling
|
||||
it. And you must show them these terms so they know their rights.
|
||||
|
||||
We protect your rights with a two-step method: (1) we copyright the
|
||||
library, and (2) we offer you this license, which gives you legal
|
||||
permission to copy, distribute and/or modify the library.
|
||||
|
||||
To protect each distributor, we want to make it very clear that
|
||||
there is no warranty for the free library. Also, if the library is
|
||||
modified by someone else and passed on, the recipients should know
|
||||
that what they have is not the original version, so that the original
|
||||
author's reputation will not be affected by problems that might be
|
||||
introduced by others.
|
||||
|
||||
Finally, software patents pose a constant threat to the existence of
|
||||
any free program. We wish to make sure that a company cannot
|
||||
effectively restrict the users of a free program by obtaining a
|
||||
restrictive license from a patent holder. Therefore, we insist that
|
||||
any patent license obtained for a version of the library must be
|
||||
consistent with the full freedom of use specified in this license.
|
||||
|
||||
Most GNU software, including some libraries, is covered by the
|
||||
ordinary GNU General Public License. This license, the GNU Lesser
|
||||
General Public License, applies to certain designated libraries, and
|
||||
is quite different from the ordinary General Public License. We use
|
||||
this license for certain libraries in order to permit linking those
|
||||
libraries into non-free programs.
|
||||
|
||||
When a program is linked with a library, whether statically or using
|
||||
a shared library, the combination of the two is legally speaking a
|
||||
combined work, a derivative of the original library. The ordinary
|
||||
General Public License therefore permits such linking only if the
|
||||
entire combination fits its criteria of freedom. The Lesser General
|
||||
Public License permits more lax criteria for linking other code with
|
||||
the library.
|
||||
|
||||
We call this license the "Lesser" General Public License because it
|
||||
does Less to protect the user's freedom than the ordinary General
|
||||
Public License. It also provides other free software developers Less
|
||||
of an advantage over competing non-free programs. These disadvantages
|
||||
are the reason we use the ordinary General Public License for many
|
||||
libraries. However, the Lesser license provides advantages in certain
|
||||
special circumstances.
|
||||
|
||||
For example, on rare occasions, there may be a special need to
|
||||
encourage the widest possible use of a certain library, so that it becomes
|
||||
a de-facto standard. To achieve this, non-free programs must be
|
||||
allowed to use the library. A more frequent case is that a free
|
||||
library does the same job as widely used non-free libraries. In this
|
||||
case, there is little to gain by limiting the free library to free
|
||||
software only, so we use the Lesser General Public License.
|
||||
|
||||
In other cases, permission to use a particular library in non-free
|
||||
programs enables a greater number of people to use a large body of
|
||||
free software. For example, permission to use the GNU C Library in
|
||||
non-free programs enables many more people to use the whole GNU
|
||||
operating system, as well as its variant, the GNU/Linux operating
|
||||
system.
|
||||
|
||||
Although the Lesser General Public License is Less protective of the
|
||||
users' freedom, it does ensure that the user of a program that is
|
||||
linked with the Library has the freedom and the wherewithal to run
|
||||
that program using a modified version of the Library.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow. Pay close attention to the difference between a
|
||||
"work based on the library" and a "work that uses the library". The
|
||||
former contains code derived from the library, whereas the latter must
|
||||
be combined with the library in order to run.
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License Agreement applies to any software library or other
|
||||
program which contains a notice placed by the copyright holder or
|
||||
other authorized party saying it may be distributed under the terms of
|
||||
this Lesser General Public License (also called "this License").
|
||||
Each licensee is addressed as "you".
|
||||
|
||||
A "library" means a collection of software functions and/or data
|
||||
prepared so as to be conveniently linked with application programs
|
||||
(which use some of those functions and data) to form executables.
|
||||
|
||||
The "Library", below, refers to any such software library or work
|
||||
which has been distributed under these terms. A "work based on the
|
||||
Library" means either the Library or any derivative work under
|
||||
copyright law: that is to say, a work containing the Library or a
|
||||
portion of it, either verbatim or with modifications and/or translated
|
||||
straightforwardly into another language. (Hereinafter, translation is
|
||||
included without limitation in the term "modification".)
|
||||
|
||||
"Source code" for a work means the preferred form of the work for
|
||||
making modifications to it. For a library, complete source code means
|
||||
all the source code for all modules it contains, plus any associated
|
||||
interface definition files, plus the scripts used to control compilation
|
||||
and installation of the library.
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running a program using the Library is not restricted, and output from
|
||||
such a program is covered only if its contents constitute a work based
|
||||
on the Library (independent of the use of the Library in a tool for
|
||||
writing it). Whether that is true depends on what the Library does
|
||||
and what the program that uses the Library does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Library's
|
||||
complete source code as you receive it, in any medium, provided that
|
||||
you conspicuously and appropriately publish on each copy an
|
||||
appropriate copyright notice and disclaimer of warranty; keep intact
|
||||
all the notices that refer to this License and to the absence of any
|
||||
warranty; and distribute a copy of this License along with the
|
||||
Library.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy,
|
||||
and you may at your option offer warranty protection in exchange for a
|
||||
fee.
|
||||
|
||||
2. You may modify your copy or copies of the Library or any portion
|
||||
of it, thus forming a work based on the Library, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) The modified work must itself be a software library.
|
||||
|
||||
b) You must cause the files modified to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
c) You must cause the whole of the work to be licensed at no
|
||||
charge to all third parties under the terms of this License.
|
||||
|
||||
d) If a facility in the modified Library refers to a function or a
|
||||
table of data to be supplied by an application program that uses
|
||||
the facility, other than as an argument passed when the facility
|
||||
is invoked, then you must make a good faith effort to ensure that,
|
||||
in the event an application does not supply such function or
|
||||
table, the facility still operates, and performs whatever part of
|
||||
its purpose remains meaningful.
|
||||
|
||||
(For example, a function in a library to compute square roots has
|
||||
a purpose that is entirely well-defined independent of the
|
||||
application. Therefore, Subsection 2d requires that any
|
||||
application-supplied function or table used by this function must
|
||||
be optional: if the application does not supply it, the square
|
||||
root function must still compute square roots.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Library,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Library, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote
|
||||
it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Library.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Library
|
||||
with the Library (or with a work based on the Library) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may opt to apply the terms of the ordinary GNU General Public
|
||||
License instead of this License to a given copy of the Library. To do
|
||||
this, you must alter all the notices that refer to this License, so
|
||||
that they refer to the ordinary GNU General Public License, version 2,
|
||||
instead of to this License. (If a newer version than version 2 of the
|
||||
ordinary GNU General Public License has appeared, then you can specify
|
||||
that version instead if you wish.) Do not make any other change in
|
||||
these notices.
|
||||
|
||||
Once this change is made in a given copy, it is irreversible for
|
||||
that copy, so the ordinary GNU General Public License applies to all
|
||||
subsequent copies and derivative works made from that copy.
|
||||
|
||||
This option is useful when you wish to copy part of the code of
|
||||
the Library into a program that is not a library.
|
||||
|
||||
4. You may copy and distribute the Library (or a portion or
|
||||
derivative of it, under Section 2) in object code or executable form
|
||||
under the terms of Sections 1 and 2 above provided that you accompany
|
||||
it with the complete corresponding machine-readable source code, which
|
||||
must be distributed under the terms of Sections 1 and 2 above on a
|
||||
medium customarily used for software interchange.
|
||||
|
||||
If distribution of object code is made by offering access to copy
|
||||
from a designated place, then offering equivalent access to copy the
|
||||
source code from the same place satisfies the requirement to
|
||||
distribute the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
5. A program that contains no derivative of any portion of the
|
||||
Library, but is designed to work with the Library by being compiled or
|
||||
linked with it, is called a "work that uses the Library". Such a
|
||||
work, in isolation, is not a derivative work of the Library, and
|
||||
therefore falls outside the scope of this License.
|
||||
|
||||
However, linking a "work that uses the Library" with the Library
|
||||
creates an executable that is a derivative of the Library (because it
|
||||
contains portions of the Library), rather than a "work that uses the
|
||||
library". The executable is therefore covered by this License.
|
||||
Section 6 states terms for distribution of such executables.
|
||||
|
||||
When a "work that uses the Library" uses material from a header file
|
||||
that is part of the Library, the object code for the work may be a
|
||||
derivative work of the Library even though the source code is not.
|
||||
Whether this is true is especially significant if the work can be
|
||||
linked without the Library, or if the work is itself a library. The
|
||||
threshold for this to be true is not precisely defined by law.
|
||||
|
||||
If such an object file uses only numerical parameters, data
|
||||
structure layouts and accessors, and small macros and small inline
|
||||
functions (ten lines or less in length), then the use of the object
|
||||
file is unrestricted, regardless of whether it is legally a derivative
|
||||
work. (Executables containing this object code plus portions of the
|
||||
Library will still fall under Section 6.)
|
||||
|
||||
Otherwise, if the work is a derivative of the Library, you may
|
||||
distribute the object code for the work under the terms of Section 6.
|
||||
Any executables containing that work also fall under Section 6,
|
||||
whether or not they are linked directly with the Library itself.
|
||||
|
||||
6. As an exception to the Sections above, you may also combine or
|
||||
link a "work that uses the Library" with the Library to produce a
|
||||
work containing portions of the Library, and distribute that work
|
||||
under terms of your choice, provided that the terms permit
|
||||
modification of the work for the customer's own use and reverse
|
||||
engineering for debugging such modifications.
|
||||
|
||||
You must give prominent notice with each copy of the work that the
|
||||
Library is used in it and that the Library and its use are covered by
|
||||
this License. You must supply a copy of this License. If the work
|
||||
during execution displays copyright notices, you must include the
|
||||
copyright notice for the Library among them, as well as a reference
|
||||
directing the user to the copy of this License. Also, you must do one
|
||||
of these things:
|
||||
|
||||
a) Accompany the work with the complete corresponding
|
||||
machine-readable source code for the Library including whatever
|
||||
changes were used in the work (which must be distributed under
|
||||
Sections 1 and 2 above); and, if the work is an executable linked
|
||||
with the Library, with the complete machine-readable "work that
|
||||
uses the Library", as object code and/or source code, so that the
|
||||
user can modify the Library and then relink to produce a modified
|
||||
executable containing the modified Library. (It is understood
|
||||
that the user who changes the contents of definitions files in the
|
||||
Library will not necessarily be able to recompile the application
|
||||
to use the modified definitions.)
|
||||
|
||||
b) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (1) uses at run time a
|
||||
copy of the library already present on the user's computer system,
|
||||
rather than copying library functions into the executable, and (2)
|
||||
will operate properly with a modified version of the library, if
|
||||
the user installs one, as long as the modified version is
|
||||
interface-compatible with the version that the work was made with.
|
||||
|
||||
c) Accompany the work with a written offer, valid for at
|
||||
least three years, to give the same user the materials
|
||||
specified in Subsection 6a, above, for a charge no more
|
||||
than the cost of performing this distribution.
|
||||
|
||||
d) If distribution of the work is made by offering access to copy
|
||||
from a designated place, offer equivalent access to copy the above
|
||||
specified materials from the same place.
|
||||
|
||||
e) Verify that the user has already received a copy of these
|
||||
materials or that you have already sent this user a copy.
|
||||
|
||||
For an executable, the required form of the "work that uses the
|
||||
Library" must include any data and utility programs needed for
|
||||
reproducing the executable from it. However, as a special exception,
|
||||
the materials to be distributed need not include anything that is
|
||||
normally distributed (in either source or binary form) with the major
|
||||
components (compiler, kernel, and so on) of the operating system on
|
||||
which the executable runs, unless that component itself accompanies
|
||||
the executable.
|
||||
|
||||
It may happen that this requirement contradicts the license
|
||||
restrictions of other proprietary libraries that do not normally
|
||||
accompany the operating system. Such a contradiction means you cannot
|
||||
use both them and the Library together in an executable that you
|
||||
distribute.
|
||||
|
||||
7. You may place library facilities that are a work based on the
|
||||
Library side-by-side in a single library together with other library
|
||||
facilities not covered by this License, and distribute such a combined
|
||||
library, provided that the separate distribution of the work based on
|
||||
the Library and of the other library facilities is otherwise
|
||||
permitted, and provided that you do these two things:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work
|
||||
based on the Library, uncombined with any other library
|
||||
facilities. This must be distributed under the terms of the
|
||||
Sections above.
|
||||
|
||||
b) Give prominent notice with the combined library of the fact
|
||||
that part of it is a work based on the Library, and explaining
|
||||
where to find the accompanying uncombined form of the same work.
|
||||
|
||||
8. You may not copy, modify, sublicense, link with, or distribute
|
||||
the Library except as expressly provided under this License. Any
|
||||
attempt otherwise to copy, modify, sublicense, link with, or
|
||||
distribute the Library is void, and will automatically terminate your
|
||||
rights under this License. However, parties who have received copies,
|
||||
or rights, from you under this License will not have their licenses
|
||||
terminated so long as such parties remain in full compliance.
|
||||
|
||||
9. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Library or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Library (or any work based on the
|
||||
Library), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Library or works based on it.
|
||||
|
||||
10. Each time you redistribute the Library (or any work based on the
|
||||
Library), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute, link with or modify the Library
|
||||
subject to these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties with
|
||||
this License.
|
||||
|
||||
11. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Library at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Library by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Library.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under any
|
||||
particular circumstance, the balance of the section is intended to apply,
|
||||
and the section as a whole is intended to apply in other circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
12. If the distribution and/or use of the Library is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Library under this License may add
|
||||
an explicit geographical distribution limitation excluding those countries,
|
||||
so that distribution is permitted only in or among countries not thus
|
||||
excluded. In such case, this License incorporates the limitation as if
|
||||
written in the body of this License.
|
||||
|
||||
13. The Free Software Foundation may publish revised and/or new
|
||||
versions of the Lesser General Public License from time to time.
|
||||
Such new versions will be similar in spirit to the present version,
|
||||
but may differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Library
|
||||
specifies a version number of this License which applies to it and
|
||||
"any later version", you have the option of following the terms and
|
||||
conditions either of that version or of any later version published by
|
||||
the Free Software Foundation. If the Library does not specify a
|
||||
license version number, you may choose any version ever published by
|
||||
the Free Software Foundation.
|
||||
|
||||
14. If you wish to incorporate parts of the Library into other free
|
||||
programs whose distribution conditions are incompatible with these,
|
||||
write to the author to ask for permission. For software which is
|
||||
copyrighted by the Free Software Foundation, write to the Free
|
||||
Software Foundation; we sometimes make exceptions for this. Our
|
||||
decision will be guided by the two goals of preserving the free status
|
||||
of all derivatives of our free software and of promoting the sharing
|
||||
and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
|
||||
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
|
||||
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
|
||||
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
|
||||
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
|
||||
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
|
||||
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
|
||||
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
|
||||
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
|
||||
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
|
||||
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Libraries
|
||||
|
||||
If you develop a new library, and you want it to be of the greatest
|
||||
possible use to the public, we recommend making it free software that
|
||||
everyone can redistribute and change. You can do so by permitting
|
||||
redistribution under these terms (or, alternatively, under the terms of the
|
||||
ordinary General Public License).
|
||||
|
||||
To apply these terms, attach the following notices to the library. It is
|
||||
safest to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least the
|
||||
"copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the library's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the library, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the
|
||||
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1990
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
14
MANIFEST.in
Normal file
14
MANIFEST.in
Normal file
@ -0,0 +1,14 @@
|
||||
include *.txt
|
||||
include *.rst
|
||||
include *.py
|
||||
include MANIFEST.in
|
||||
include LICENSE
|
||||
include run
|
||||
include tox.ini
|
||||
exclude sshuttle/version.py
|
||||
recursive-include docs *.bat
|
||||
recursive-include docs *.py
|
||||
recursive-include docs *.rst
|
||||
recursive-include docs Makefile
|
||||
recursive-include sshuttle *.py
|
||||
recursive-exclude docs/_build *
|
49
README.rst
Normal file
49
README.rst
Normal file
@ -0,0 +1,49 @@
|
||||
sshuttle: where transparent proxy meets VPN meets ssh
|
||||
=====================================================
|
||||
|
||||
As far as I know, sshuttle is the only program that solves the following
|
||||
common case:
|
||||
|
||||
- Your client machine (or router) is Linux, FreeBSD, MacOS or Windows.
|
||||
|
||||
- You have access to a remote network via ssh.
|
||||
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
|
||||
- The remote network has no VPN, or only stupid/complex VPN
|
||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
||||
admin and you just got frustrated with the awful state of
|
||||
VPN tools.
|
||||
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has `terrible performance`_.
|
||||
|
||||
.. _terrible performance: https://sshuttle.readthedocs.io/en/stable/how-it-works.html
|
||||
|
||||
Obtaining sshuttle
|
||||
------------------
|
||||
|
||||
Please see the documentation_.
|
||||
|
||||
.. _Documentation: https://sshuttle.readthedocs.io/en/stable/installation.html
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
The documentation for the stable version is available at:
|
||||
https://sshuttle.readthedocs.org/
|
||||
|
||||
The documentation for the latest development version is available at:
|
||||
https://sshuttle.readthedocs.org/en/latest/
|
||||
|
||||
|
||||
Running as a service
|
||||
--------------------
|
||||
Sshuttle can also be run as a service and configured using a config management system:
|
||||
https://medium.com/@mike.reider/using-sshuttle-as-a-service-bec2684a65fe
|
@ -1,40 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>English</string>
|
||||
<key>CFBundleDisplayName</key>
|
||||
<string>Sshuttle VPN</string>
|
||||
<key>CFBundleExecutable</key>
|
||||
<string>Sshuttle</string>
|
||||
<key>CFBundleIconFile</key>
|
||||
<string>app.icns</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>ca.apenwarr.Sshuttle</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundleName</key>
|
||||
<string>Sshuttle VPN</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>0.0.0</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>0.0.0</string>
|
||||
<key>LSUIElement</key>
|
||||
<string>1</string>
|
||||
<key>LSHasLocalizedDisplayName</key>
|
||||
<false/>
|
||||
<key>NSAppleScriptEnabled</key>
|
||||
<false/>
|
||||
<key>NSHumanReadableCopyright</key>
|
||||
<string>GNU LGPL Version 2</string>
|
||||
<key>NSMainNibFile</key>
|
||||
<string>MainMenu</string>
|
||||
<key>NSPrincipalClass</key>
|
||||
<string>NSApplication</string>
|
||||
</dict>
|
||||
</plist>
|
Binary file not shown.
Binary file not shown.
@ -1,10 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>startAtLogin</key>
|
||||
<false/>
|
||||
<key>autoReconnect</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
Binary file not shown.
@ -1,28 +0,0 @@
|
||||
import sys, os, re, subprocess
|
||||
|
||||
def askpass(prompt):
|
||||
prompt = prompt.replace('"', "'")
|
||||
|
||||
if 'yes/no' in prompt:
|
||||
return "yes"
|
||||
|
||||
script="""
|
||||
tell application "Finder"
|
||||
activate
|
||||
display dialog "%s" \
|
||||
with title "Sshuttle SSH Connection" \
|
||||
default answer "" \
|
||||
with icon caution \
|
||||
with hidden answer
|
||||
end tell
|
||||
""" % prompt
|
||||
|
||||
p = subprocess.Popen(['osascript', '-e', script], stdout=subprocess.PIPE)
|
||||
out = p.stdout.read()
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
return None
|
||||
g = re.match("text returned:(.*), button returned:.*", out)
|
||||
if not g:
|
||||
return None
|
||||
return g.group(1)
|
Binary file not shown.
Binary file not shown.
Before Width: | Height: | Size: 821 B |
Binary file not shown.
Before Width: | Height: | Size: 789 B |
Binary file not shown.
Before Width: | Height: | Size: 810 B |
@ -1,367 +0,0 @@
|
||||
import sys, os, pty
|
||||
from AppKit import *
|
||||
import my, models, askpass
|
||||
|
||||
def sshuttle_args(host, auto_nets, auto_hosts, dns, nets, debug,
|
||||
no_latency_control):
|
||||
argv = [my.bundle_path('sshuttle/sshuttle', ''), '-r', host]
|
||||
assert(argv[0])
|
||||
if debug:
|
||||
argv.append('-v')
|
||||
if auto_nets:
|
||||
argv.append('--auto-nets')
|
||||
if auto_hosts:
|
||||
argv.append('--auto-hosts')
|
||||
if dns:
|
||||
argv.append('--dns')
|
||||
if no_latency_control:
|
||||
argv.append('--no-latency-control')
|
||||
argv += nets
|
||||
return argv
|
||||
|
||||
|
||||
class _Callback(NSObject):
|
||||
def initWithFunc_(self, func):
|
||||
self = super(_Callback, self).init()
|
||||
self.func = func
|
||||
return self
|
||||
def func_(self, obj):
|
||||
return self.func(obj)
|
||||
|
||||
|
||||
class Callback:
|
||||
def __init__(self, func):
|
||||
self.obj = _Callback.alloc().initWithFunc_(func)
|
||||
self.sel = self.obj.func_
|
||||
|
||||
|
||||
class Runner:
|
||||
def __init__(self, argv, logfunc, promptfunc, serverobj):
|
||||
print 'in __init__'
|
||||
self.id = argv
|
||||
self.rv = None
|
||||
self.pid = None
|
||||
self.fd = None
|
||||
self.logfunc = logfunc
|
||||
self.promptfunc = promptfunc
|
||||
self.serverobj = serverobj
|
||||
self.buf = ''
|
||||
self.logfunc('\nConnecting to %s.\n' % self.serverobj.host())
|
||||
print 'will run: %r' % argv
|
||||
self.serverobj.setConnected_(False)
|
||||
pid,fd = pty.fork()
|
||||
if pid == 0:
|
||||
# child
|
||||
try:
|
||||
os.execvp(argv[0], argv)
|
||||
except Exception, e:
|
||||
sys.stderr.write('failed to start: %r\n' % e)
|
||||
raise
|
||||
finally:
|
||||
os._exit(42)
|
||||
# parent
|
||||
self.pid = pid
|
||||
self.file = NSFileHandle.alloc()\
|
||||
.initWithFileDescriptor_closeOnDealloc_(fd, True)
|
||||
self.cb = Callback(self.gotdata)
|
||||
NSNotificationCenter.defaultCenter()\
|
||||
.addObserver_selector_name_object_(self.cb.obj, self.cb.sel,
|
||||
NSFileHandleDataAvailableNotification, self.file)
|
||||
self.file.waitForDataInBackgroundAndNotify()
|
||||
|
||||
def __del__(self):
|
||||
self.wait()
|
||||
|
||||
def _try_wait(self, options):
|
||||
if self.rv == None and self.pid > 0:
|
||||
pid,code = os.waitpid(self.pid, options)
|
||||
if pid == self.pid:
|
||||
if os.WIFEXITED(code):
|
||||
self.rv = os.WEXITSTATUS(code)
|
||||
else:
|
||||
self.rv = -os.WSTOPSIG(code)
|
||||
self.serverobj.setConnected_(False)
|
||||
self.serverobj.setError_('VPN process died')
|
||||
self.logfunc('Disconnected.\n')
|
||||
print 'wait_result: %r' % self.rv
|
||||
return self.rv
|
||||
|
||||
def wait(self):
|
||||
return self._try_wait(0)
|
||||
|
||||
def poll(self):
|
||||
return self._try_wait(os.WNOHANG)
|
||||
|
||||
def kill(self):
|
||||
assert(self.pid > 0)
|
||||
print 'killing: pid=%r rv=%r' % (self.pid, self.rv)
|
||||
if self.rv == None:
|
||||
self.logfunc('Disconnecting from %s.\n' % self.serverobj.host())
|
||||
os.kill(self.pid, 15)
|
||||
self.wait()
|
||||
|
||||
def gotdata(self, notification):
|
||||
print 'gotdata!'
|
||||
d = str(self.file.availableData())
|
||||
if d:
|
||||
self.logfunc(d)
|
||||
self.buf = self.buf + d
|
||||
if 'Connected.\r\n' in self.buf:
|
||||
self.serverobj.setConnected_(True)
|
||||
self.buf = self.buf[-4096:]
|
||||
if self.buf.strip().endswith(':'):
|
||||
lastline = self.buf.rstrip().split('\n')[-1]
|
||||
resp = self.promptfunc(lastline)
|
||||
add = ' (response)\n'
|
||||
self.buf += add
|
||||
self.logfunc(add)
|
||||
self.file.writeData_(my.Data(resp + '\n'))
|
||||
self.file.waitForDataInBackgroundAndNotify()
|
||||
self.poll()
|
||||
#print 'gotdata done!'
|
||||
|
||||
|
||||
class SshuttleApp(NSObject):
|
||||
def initialize(self):
|
||||
d = my.PList('UserDefaults')
|
||||
my.Defaults().registerDefaults_(d)
|
||||
|
||||
|
||||
class SshuttleController(NSObject):
|
||||
# Interface builder outlets
|
||||
startAtLoginField = objc.IBOutlet()
|
||||
autoReconnectField = objc.IBOutlet()
|
||||
debugField = objc.IBOutlet()
|
||||
routingField = objc.IBOutlet()
|
||||
prefsWindow = objc.IBOutlet()
|
||||
serversController = objc.IBOutlet()
|
||||
logField = objc.IBOutlet()
|
||||
latencyControlField = objc.IBOutlet()
|
||||
|
||||
servers = []
|
||||
conns = {}
|
||||
|
||||
def _connect(self, server):
|
||||
host = server.host()
|
||||
print 'connecting %r' % host
|
||||
self.fill_menu()
|
||||
def logfunc(msg):
|
||||
print 'log! (%d bytes)' % len(msg)
|
||||
self.logField.textStorage()\
|
||||
.appendAttributedString_(NSAttributedString.alloc()\
|
||||
.initWithString_(msg))
|
||||
self.logField.didChangeText()
|
||||
def promptfunc(prompt):
|
||||
print 'prompt! %r' % prompt
|
||||
return askpass.askpass(prompt)
|
||||
nets_mode = server.autoNets()
|
||||
if nets_mode == models.NET_MANUAL:
|
||||
manual_nets = ["%s/%d" % (i.subnet(), i.width())
|
||||
for i in server.nets()]
|
||||
elif nets_mode == models.NET_ALL:
|
||||
manual_nets = ['0/0']
|
||||
else:
|
||||
manual_nets = []
|
||||
noLatencyControl = (server.latencyControl() != models.LAT_INTERACTIVE)
|
||||
conn = Runner(sshuttle_args(host,
|
||||
auto_nets = nets_mode == models.NET_AUTO,
|
||||
auto_hosts = server.autoHosts(),
|
||||
dns = server.useDns(),
|
||||
nets = manual_nets,
|
||||
debug = self.debugField.state(),
|
||||
no_latency_control = noLatencyControl),
|
||||
logfunc=logfunc, promptfunc=promptfunc,
|
||||
serverobj=server)
|
||||
self.conns[host] = conn
|
||||
|
||||
def _disconnect(self, server):
|
||||
host = server.host()
|
||||
print 'disconnecting %r' % host
|
||||
conn = self.conns.get(host)
|
||||
if conn:
|
||||
conn.kill()
|
||||
self.fill_menu()
|
||||
self.logField.textStorage().setAttributedString_(
|
||||
NSAttributedString.alloc().initWithString_(''))
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_connect(self, sender):
|
||||
server = sender.representedObject()
|
||||
server.setWantConnect_(True)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_disconnect(self, sender):
|
||||
server = sender.representedObject()
|
||||
server.setWantConnect_(False)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_show(self, sender):
|
||||
self.prefsWindow.makeKeyAndOrderFront_(self)
|
||||
NSApp.activateIgnoringOtherApps_(True)
|
||||
|
||||
@objc.IBAction
|
||||
def cmd_quit(self, sender):
|
||||
NSApp.performSelector_withObject_afterDelay_(NSApp.terminate_,
|
||||
None, 0.0)
|
||||
|
||||
def fill_menu(self):
|
||||
menu = self.menu
|
||||
menu.removeAllItems()
|
||||
|
||||
def additem(name, func, obj):
|
||||
it = menu.addItemWithTitle_action_keyEquivalent_(name, None, "")
|
||||
it.setRepresentedObject_(obj)
|
||||
it.setTarget_(self)
|
||||
it.setAction_(func)
|
||||
def addnote(name):
|
||||
additem(name, None, None)
|
||||
|
||||
any_inprogress = None
|
||||
any_conn = None
|
||||
any_err = None
|
||||
if len(self.servers):
|
||||
for i in self.servers:
|
||||
host = i.host()
|
||||
title = i.title()
|
||||
want = i.wantConnect()
|
||||
connected = i.connected()
|
||||
numnets = len(list(i.nets()))
|
||||
if not host:
|
||||
additem('Connect Untitled', None, i)
|
||||
elif i.autoNets() == models.NET_MANUAL and not numnets:
|
||||
additem('Connect %s (no routes)' % host, None, i)
|
||||
elif want:
|
||||
any_conn = i
|
||||
additem('Disconnect %s' % title, self.cmd_disconnect, i)
|
||||
else:
|
||||
additem('Connect %s' % title, self.cmd_connect, i)
|
||||
if not want:
|
||||
msg = 'Off'
|
||||
elif i.error():
|
||||
msg = 'ERROR - try reconnecting'
|
||||
any_err = i
|
||||
elif connected:
|
||||
msg = 'Connected'
|
||||
else:
|
||||
msg = 'Connecting...'
|
||||
any_inprogress = i
|
||||
addnote(' State: %s' % msg)
|
||||
else:
|
||||
addnote('No servers defined yet')
|
||||
|
||||
menu.addItem_(NSMenuItem.separatorItem())
|
||||
additem('Preferences...', self.cmd_show, None)
|
||||
additem('Quit Sshuttle VPN', self.cmd_quit, None)
|
||||
|
||||
if any_err:
|
||||
self.statusitem.setImage_(self.img_err)
|
||||
self.statusitem.setTitle_('Error!')
|
||||
elif any_conn:
|
||||
self.statusitem.setImage_(self.img_running)
|
||||
if any_inprogress:
|
||||
self.statusitem.setTitle_('Connecting...')
|
||||
else:
|
||||
self.statusitem.setTitle_('')
|
||||
else:
|
||||
self.statusitem.setImage_(self.img_idle)
|
||||
self.statusitem.setTitle_('')
|
||||
|
||||
def load_servers(self):
|
||||
l = my.Defaults().arrayForKey_('servers') or []
|
||||
sl = []
|
||||
for s in l:
|
||||
host = s.get('host', None)
|
||||
if not host: continue
|
||||
|
||||
nets = s.get('nets', [])
|
||||
nl = []
|
||||
for n in nets:
|
||||
subnet = n[0]
|
||||
width = n[1]
|
||||
net = models.SshuttleNet.alloc().init()
|
||||
net.setSubnet_(subnet)
|
||||
net.setWidth_(width)
|
||||
nl.append(net)
|
||||
|
||||
autoNets = s.get('autoNets', models.NET_AUTO)
|
||||
autoHosts = s.get('autoHosts', True)
|
||||
useDns = s.get('useDns', autoNets == models.NET_ALL)
|
||||
latencyControl = s.get('latencyControl', models.LAT_INTERACTIVE)
|
||||
srv = models.SshuttleServer.alloc().init()
|
||||
srv.setHost_(host)
|
||||
srv.setAutoNets_(autoNets)
|
||||
srv.setAutoHosts_(autoHosts)
|
||||
srv.setNets_(nl)
|
||||
srv.setUseDns_(useDns)
|
||||
srv.setLatencyControl_(latencyControl)
|
||||
sl.append(srv)
|
||||
self.serversController.addObjects_(sl)
|
||||
self.serversController.setSelectionIndex_(0)
|
||||
|
||||
def save_servers(self):
|
||||
l = []
|
||||
for s in self.servers:
|
||||
host = s.host()
|
||||
if not host: continue
|
||||
nets = []
|
||||
for n in s.nets():
|
||||
subnet = n.subnet()
|
||||
if not subnet: continue
|
||||
nets.append((subnet, n.width()))
|
||||
d = dict(host=s.host(),
|
||||
nets=nets,
|
||||
autoNets=s.autoNets(),
|
||||
autoHosts=s.autoHosts(),
|
||||
useDns=s.useDns(),
|
||||
latencyControl=s.latencyControl())
|
||||
l.append(d)
|
||||
my.Defaults().setObject_forKey_(l, 'servers')
|
||||
self.fill_menu()
|
||||
|
||||
def awakeFromNib(self):
|
||||
self.routingField.removeAllItems()
|
||||
tf = self.routingField.addItemWithTitle_
|
||||
tf('Send all traffic through this server')
|
||||
tf('Determine automatically')
|
||||
tf('Custom...')
|
||||
|
||||
self.latencyControlField.removeAllItems()
|
||||
tf = self.latencyControlField.addItemWithTitle_
|
||||
tf('Fast transfer')
|
||||
tf('Low latency')
|
||||
|
||||
# Hmm, even when I mark this as !enabled in the .nib, it still comes
|
||||
# through as enabled. So let's just disable it here (since we don't
|
||||
# support this feature yet).
|
||||
self.startAtLoginField.setEnabled_(False)
|
||||
self.startAtLoginField.setState_(False)
|
||||
self.autoReconnectField.setEnabled_(False)
|
||||
self.autoReconnectField.setState_(False)
|
||||
|
||||
self.load_servers()
|
||||
|
||||
# Initialize our menu item
|
||||
self.menu = NSMenu.alloc().initWithTitle_('Sshuttle')
|
||||
bar = NSStatusBar.systemStatusBar()
|
||||
statusitem = bar.statusItemWithLength_(NSVariableStatusItemLength)
|
||||
self.statusitem = statusitem
|
||||
self.img_idle = my.Image('chicken-tiny-bw', 'png')
|
||||
self.img_running = my.Image('chicken-tiny', 'png')
|
||||
self.img_err = my.Image('chicken-tiny-err', 'png')
|
||||
statusitem.setImage_(self.img_idle)
|
||||
statusitem.setHighlightMode_(True)
|
||||
statusitem.setMenu_(self.menu)
|
||||
self.fill_menu()
|
||||
|
||||
models.configchange_callback = my.DelayedCallback(self.save_servers)
|
||||
|
||||
def sc(server):
|
||||
if server.wantConnect():
|
||||
self._connect(server)
|
||||
else:
|
||||
self._disconnect(server)
|
||||
models.setconnect_callback = sc
|
||||
|
||||
|
||||
# Note: NSApplicationMain calls sys.exit(), so this never returns.
|
||||
NSApplicationMain(sys.argv)
|
@ -1,166 +0,0 @@
|
||||
from AppKit import *
|
||||
import my
|
||||
|
||||
|
||||
configchange_callback = setconnect_callback = None
|
||||
|
||||
|
||||
def config_changed():
|
||||
if configchange_callback:
|
||||
configchange_callback()
|
||||
|
||||
|
||||
def _validate_ip(v):
|
||||
parts = v.split('.')[:4]
|
||||
if len(parts) < 4:
|
||||
parts += ['0'] * (4 - len(parts))
|
||||
for i in range(4):
|
||||
n = my.atoi(parts[i])
|
||||
if n < 0:
|
||||
n = 0
|
||||
elif n > 255:
|
||||
n = 255
|
||||
parts[i] = str(n)
|
||||
return '.'.join(parts)
|
||||
|
||||
|
||||
def _validate_width(v):
|
||||
n = my.atoi(v)
|
||||
if n < 0:
|
||||
n = 0
|
||||
elif n > 32:
|
||||
n = 32
|
||||
return n
|
||||
|
||||
|
||||
class SshuttleNet(NSObject):
|
||||
def subnet(self):
|
||||
return getattr(self, '_k_subnet', None)
|
||||
def setSubnet_(self, v):
|
||||
self._k_subnet = v
|
||||
config_changed()
|
||||
@objc.accessor
|
||||
def validateSubnet_error_(self, value, error):
|
||||
#print 'validateSubnet!'
|
||||
return True, _validate_ip(value), error
|
||||
|
||||
def width(self):
|
||||
return getattr(self, '_k_width', 24)
|
||||
def setWidth_(self, v):
|
||||
self._k_width = v
|
||||
config_changed()
|
||||
@objc.accessor
|
||||
def validateWidth_error_(self, value, error):
|
||||
#print 'validateWidth!'
|
||||
return True, _validate_width(value), error
|
||||
|
||||
NET_ALL = 0
|
||||
NET_AUTO = 1
|
||||
NET_MANUAL = 2
|
||||
|
||||
LAT_BANDWIDTH = 0
|
||||
LAT_INTERACTIVE = 1
|
||||
|
||||
class SshuttleServer(NSObject):
|
||||
def init(self):
|
||||
self = super(SshuttleServer, self).init()
|
||||
config_changed()
|
||||
return self
|
||||
|
||||
def wantConnect(self):
|
||||
return getattr(self, '_k_wantconnect', False)
|
||||
def setWantConnect_(self, v):
|
||||
self._k_wantconnect = v
|
||||
self.setError_(None)
|
||||
config_changed()
|
||||
if setconnect_callback: setconnect_callback(self)
|
||||
|
||||
def connected(self):
|
||||
return getattr(self, '_k_connected', False)
|
||||
def setConnected_(self, v):
|
||||
print 'setConnected of %r to %r' % (self, v)
|
||||
self._k_connected = v
|
||||
if v: self.setError_(None) # connected ok, so no error
|
||||
config_changed()
|
||||
|
||||
def error(self):
|
||||
return getattr(self, '_k_error', None)
|
||||
def setError_(self, v):
|
||||
self._k_error = v
|
||||
config_changed()
|
||||
|
||||
def isValid(self):
|
||||
if not self.host():
|
||||
return False
|
||||
if self.autoNets() == NET_MANUAL and not len(list(self.nets())):
|
||||
return False
|
||||
return True
|
||||
|
||||
def title(self):
|
||||
host = self.host()
|
||||
if not host:
|
||||
return host
|
||||
an = self.autoNets()
|
||||
suffix = ""
|
||||
if an == NET_ALL:
|
||||
suffix = " (all traffic)"
|
||||
elif an == NET_MANUAL:
|
||||
n = self.nets()
|
||||
suffix = ' (%d subnet%s)' % (len(n), len(n)!=1 and 's' or '')
|
||||
return self.host() + suffix
|
||||
def setTitle_(self, v):
|
||||
# title is always auto-generated
|
||||
config_changed()
|
||||
|
||||
def host(self):
|
||||
return getattr(self, '_k_host', None)
|
||||
def setHost_(self, v):
|
||||
self._k_host = v
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
@objc.accessor
|
||||
def validateHost_error_(self, value, error):
|
||||
#print 'validatehost! %r %r %r' % (self, value, error)
|
||||
while value.startswith('-'):
|
||||
value = value[1:]
|
||||
return True, value, error
|
||||
|
||||
def nets(self):
|
||||
return getattr(self, '_k_nets', [])
|
||||
def setNets_(self, v):
|
||||
self._k_nets = v
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
def netsHidden(self):
|
||||
#print 'checking netsHidden'
|
||||
return self.autoNets() != NET_MANUAL
|
||||
def setNetsHidden_(self, v):
|
||||
config_changed()
|
||||
#print 'setting netsHidden to %r' % v
|
||||
|
||||
def autoNets(self):
|
||||
return getattr(self, '_k_autoNets', NET_AUTO)
|
||||
def setAutoNets_(self, v):
|
||||
self._k_autoNets = v
|
||||
self.setNetsHidden_(-1)
|
||||
self.setUseDns_(v == NET_ALL)
|
||||
self.setTitle_(None)
|
||||
config_changed()
|
||||
|
||||
def autoHosts(self):
|
||||
return getattr(self, '_k_autoHosts', True)
|
||||
def setAutoHosts_(self, v):
|
||||
self._k_autoHosts = v
|
||||
config_changed()
|
||||
|
||||
def useDns(self):
|
||||
return getattr(self, '_k_useDns', False)
|
||||
def setUseDns_(self, v):
|
||||
self._k_useDns = v
|
||||
config_changed()
|
||||
|
||||
def latencyControl(self):
|
||||
return getattr(self, '_k_latencyControl', LAT_INTERACTIVE)
|
||||
def setLatencyControl_(self, v):
|
||||
self._k_latencyControl = v
|
||||
config_changed()
|
Binary file not shown.
@ -1,62 +0,0 @@
|
||||
import sys, os
|
||||
from AppKit import *
|
||||
import PyObjCTools.AppHelper
|
||||
|
||||
|
||||
def bundle_path(name, typ):
|
||||
if typ:
|
||||
return NSBundle.mainBundle().pathForResource_ofType_(name, typ)
|
||||
else:
|
||||
return os.path.join(NSBundle.mainBundle().resourcePath(), name)
|
||||
|
||||
|
||||
# Load an NSData using a python string
|
||||
def Data(s):
|
||||
return NSData.alloc().initWithBytes_length_(s, len(s))
|
||||
|
||||
|
||||
# Load a property list from a file in the application bundle.
|
||||
def PList(name):
|
||||
path = bundle_path(name, 'plist')
|
||||
return NSDictionary.dictionaryWithContentsOfFile_(path)
|
||||
|
||||
|
||||
# Load an NSImage from a file in the application bundle.
|
||||
def Image(name, ext):
|
||||
bytes = open(bundle_path(name, ext)).read()
|
||||
img = NSImage.alloc().initWithData_(Data(bytes))
|
||||
return img
|
||||
|
||||
|
||||
# Return the NSUserDefaults shared object.
|
||||
def Defaults():
|
||||
return NSUserDefaults.standardUserDefaults()
|
||||
|
||||
|
||||
# Usage:
|
||||
# f = DelayedCallback(func, args...)
|
||||
# later:
|
||||
# f()
|
||||
#
|
||||
# When you call f(), it will schedule a call to func() next time the
|
||||
# ObjC event loop iterates. Multiple calls to f() in a single iteration
|
||||
# will only result in one call to func().
|
||||
#
|
||||
def DelayedCallback(func, *args, **kwargs):
|
||||
flag = [0]
|
||||
def _go():
|
||||
if flag[0]:
|
||||
print 'running %r (flag=%r)' % (func, flag)
|
||||
flag[0] = 0
|
||||
func(*args, **kwargs)
|
||||
def call():
|
||||
flag[0] += 1
|
||||
PyObjCTools.AppHelper.callAfter(_go)
|
||||
return call
|
||||
|
||||
|
||||
def atoi(s):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return 0
|
Binary file not shown.
@ -1,26 +0,0 @@
|
||||
import sys, zlib
|
||||
|
||||
z = zlib.decompressobj()
|
||||
mainmod = sys.modules[__name__]
|
||||
while 1:
|
||||
name = sys.stdin.readline().strip()
|
||||
if name:
|
||||
nbytes = int(sys.stdin.readline())
|
||||
if verbosity >= 2:
|
||||
sys.stderr.write('server: assembling %r (%d bytes)\n'
|
||||
% (name, nbytes))
|
||||
content = z.decompress(sys.stdin.read(nbytes))
|
||||
exec compile(content, name, "exec")
|
||||
|
||||
# FIXME: this crushes everything into a single module namespace,
|
||||
# then makes each of the module names point at this one. Gross.
|
||||
assert(name.endswith('.py'))
|
||||
modname = name[:-3]
|
||||
mainmod.__dict__[modname] = mainmod
|
||||
else:
|
||||
break
|
||||
|
||||
verbose = verbosity
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
main()
|
@ -1,387 +0,0 @@
|
||||
import struct, socket, select, errno, re, signal, time
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers, ssnet, ssh, ssyslog
|
||||
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
|
||||
from helpers import *
|
||||
|
||||
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||
|
||||
def got_signal(signum, frame):
|
||||
log('exiting on signal %d\n' % signum)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
_pidname = None
|
||||
def check_daemon(pidfile):
|
||||
global _pidname
|
||||
_pidname = os.path.abspath(pidfile)
|
||||
try:
|
||||
oldpid = open(_pidname).read(1024)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return # no pidfile, ok
|
||||
else:
|
||||
raise Fatal("can't read %s: %s" % (_pidname, e))
|
||||
if not oldpid:
|
||||
os.unlink(_pidname)
|
||||
return # invalid pidfile, ok
|
||||
oldpid = int(oldpid.strip() or 0)
|
||||
if oldpid <= 0:
|
||||
os.unlink(_pidname)
|
||||
return # invalid pidfile, ok
|
||||
try:
|
||||
os.kill(oldpid, 0)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ESRCH:
|
||||
os.unlink(_pidname)
|
||||
return # outdated pidfile, ok
|
||||
elif e.errno == errno.EPERM:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
raise Fatal("%s: sshuttle is already running (pid=%d)"
|
||||
% (_pidname, oldpid))
|
||||
|
||||
|
||||
def daemonize():
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
os.setsid()
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
|
||||
outfd = os.open(_pidname, os.O_WRONLY|os.O_CREAT|os.O_EXCL, 0666)
|
||||
try:
|
||||
os.write(outfd, '%d\n' % os.getpid())
|
||||
finally:
|
||||
os.close(outfd)
|
||||
os.chdir("/")
|
||||
|
||||
# Normal exit when killed, or try/finally won't work and the pidfile won't
|
||||
# be deleted.
|
||||
signal.signal(signal.SIGTERM, got_signal)
|
||||
|
||||
si = open('/dev/null', 'r+')
|
||||
os.dup2(si.fileno(), 0)
|
||||
os.dup2(si.fileno(), 1)
|
||||
si.close()
|
||||
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
|
||||
def daemon_cleanup():
|
||||
try:
|
||||
os.unlink(_pidname)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def original_dst(sock):
|
||||
try:
|
||||
SO_ORIGINAL_DST = 80
|
||||
SOCKADDR_MIN = 16
|
||||
sockaddr_in = sock.getsockopt(socket.SOL_IP,
|
||||
SO_ORIGINAL_DST, SOCKADDR_MIN)
|
||||
(proto, port, a,b,c,d) = struct.unpack('!HHBBBB', sockaddr_in[:8])
|
||||
assert(socket.htons(proto) == socket.AF_INET)
|
||||
ip = '%d.%d.%d.%d' % (a,b,c,d)
|
||||
return (ip,port)
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.ENOPROTOOPT:
|
||||
return sock.getsockname()
|
||||
raise
|
||||
|
||||
|
||||
class FirewallClient:
|
||||
def __init__(self, port, subnets_include, subnets_exclude, dnsport):
|
||||
self.port = port
|
||||
self.auto_nets = []
|
||||
self.subnets_include = subnets_include
|
||||
self.subnets_exclude = subnets_exclude
|
||||
self.dnsport = dnsport
|
||||
argvbase = ([sys.argv[0]] +
|
||||
['-v'] * (helpers.verbose or 0) +
|
||||
['--firewall', str(port), str(dnsport)])
|
||||
if ssyslog._p:
|
||||
argvbase += ['--syslog']
|
||||
argv_tries = [
|
||||
['sudo', '-p', '[local sudo] Password: '] + argvbase,
|
||||
['su', '-c', ' '.join(argvbase)],
|
||||
argvbase
|
||||
]
|
||||
|
||||
# we can't use stdin/stdout=subprocess.PIPE here, as we normally would,
|
||||
# because stupid Linux 'su' requires that stdin be attached to a tty.
|
||||
# Instead, attach a *bidirectional* socket to its stdout, and use
|
||||
# that for talking in both directions.
|
||||
(s1,s2) = socket.socketpair()
|
||||
def setup():
|
||||
# run in the child process
|
||||
s2.close()
|
||||
e = None
|
||||
if os.getuid() == 0:
|
||||
argv_tries = argv_tries[-1:] # last entry only
|
||||
for argv in argv_tries:
|
||||
try:
|
||||
if argv[0] == 'su':
|
||||
sys.stderr.write('[local su] ')
|
||||
self.p = ssubprocess.Popen(argv, stdout=s1, preexec_fn=setup)
|
||||
e = None
|
||||
break
|
||||
except OSError, e:
|
||||
pass
|
||||
self.argv = argv
|
||||
s1.close()
|
||||
self.pfile = s2.makefile('wb+')
|
||||
if e:
|
||||
log('Spawning firewall manager: %r\n' % self.argv)
|
||||
raise Fatal(e)
|
||||
line = self.pfile.readline()
|
||||
self.check()
|
||||
if line != 'READY\n':
|
||||
raise Fatal('%r expected READY, got %r' % (self.argv, line))
|
||||
|
||||
def check(self):
|
||||
rv = self.p.poll()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (self.argv, rv))
|
||||
|
||||
def start(self):
|
||||
self.pfile.write('ROUTES\n')
|
||||
for (ip,width) in self.subnets_include+self.auto_nets:
|
||||
self.pfile.write('%d,0,%s\n' % (width, ip))
|
||||
for (ip,width) in self.subnets_exclude:
|
||||
self.pfile.write('%d,1,%s\n' % (width, ip))
|
||||
self.pfile.write('GO\n')
|
||||
self.pfile.flush()
|
||||
line = self.pfile.readline()
|
||||
self.check()
|
||||
if line != 'STARTED\n':
|
||||
raise Fatal('%r expected STARTED, got %r' % (self.argv, line))
|
||||
|
||||
def sethostip(self, hostname, ip):
|
||||
assert(not re.search(r'[^-\w]', hostname))
|
||||
assert(not re.search(r'[^0-9.]', ip))
|
||||
self.pfile.write('HOST %s,%s\n' % (hostname, ip))
|
||||
self.pfile.flush()
|
||||
|
||||
def done(self):
|
||||
self.pfile.close()
|
||||
rv = self.p.wait()
|
||||
if rv:
|
||||
raise Fatal('cleanup: %r returned %d' % (self.argv, rv))
|
||||
|
||||
|
||||
def _main(listener, fw, ssh_cmd, remotename, python, latency_control,
|
||||
dnslistener, seed_hosts, auto_nets,
|
||||
syslog, daemon):
|
||||
handlers = []
|
||||
if helpers.verbose >= 1:
|
||||
helpers.logprefix = 'c : '
|
||||
else:
|
||||
helpers.logprefix = 'client: '
|
||||
debug1('connecting to server...\n')
|
||||
|
||||
try:
|
||||
(serverproc, serversock) = ssh.connect(ssh_cmd, remotename, python,
|
||||
stderr=ssyslog._p and ssyslog._p.stdin,
|
||||
options=dict(latency_control=latency_control))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.EPIPE:
|
||||
raise Fatal("failed to establish ssh session (1)")
|
||||
else:
|
||||
raise
|
||||
mux = Mux(serversock, serversock)
|
||||
handlers.append(mux)
|
||||
|
||||
expected = 'SSHUTTLE0001'
|
||||
try:
|
||||
initstring = serversock.recv(len(expected))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.ECONNRESET:
|
||||
raise Fatal("failed to establish ssh session (2)")
|
||||
else:
|
||||
raise
|
||||
|
||||
rv = serverproc.poll()
|
||||
if rv:
|
||||
raise Fatal('server died with error code %d' % rv)
|
||||
|
||||
if initstring != expected:
|
||||
raise Fatal('expected server init string %r; got %r'
|
||||
% (expected, initstring))
|
||||
debug1('connected.\n')
|
||||
print 'Connected.'
|
||||
sys.stdout.flush()
|
||||
if daemon:
|
||||
daemonize()
|
||||
log('daemonizing (%s).\n' % _pidname)
|
||||
elif syslog:
|
||||
debug1('switching to syslog.\n')
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
def onroutes(routestr):
|
||||
if auto_nets:
|
||||
for line in routestr.strip().split('\n'):
|
||||
(ip,width) = line.split(',', 1)
|
||||
fw.auto_nets.append((ip,int(width)))
|
||||
|
||||
# we definitely want to do this *after* starting ssh, or we might end
|
||||
# up intercepting the ssh connection!
|
||||
#
|
||||
# Moreover, now that we have the --auto-nets option, we have to wait
|
||||
# for the server to send us that message anyway. Even if we haven't
|
||||
# set --auto-nets, we might as well wait for the message first, then
|
||||
# ignore its contents.
|
||||
mux.got_routes = None
|
||||
fw.start()
|
||||
mux.got_routes = onroutes
|
||||
|
||||
def onhostlist(hostlist):
|
||||
debug2('got host list: %r\n' % hostlist)
|
||||
for line in hostlist.strip().split():
|
||||
if line:
|
||||
name,ip = line.split(',', 1)
|
||||
fw.sethostip(name, ip)
|
||||
mux.got_host_list = onhostlist
|
||||
|
||||
def onaccept():
|
||||
global _extra_fd
|
||||
try:
|
||||
sock,srcip = listener.accept()
|
||||
except socket.error, e:
|
||||
if e.args[0] in [errno.EMFILE, errno.ENFILE]:
|
||||
debug1('Rejected incoming connection: too many open files!\n')
|
||||
# free up an fd so we can eat the connection
|
||||
os.close(_extra_fd)
|
||||
try:
|
||||
sock,srcip = listener.accept()
|
||||
sock.close()
|
||||
finally:
|
||||
_extra_fd = os.open('/dev/null', os.O_RDONLY)
|
||||
return
|
||||
else:
|
||||
raise
|
||||
dstip = original_dst(sock)
|
||||
debug1('Accept: %s:%r -> %s:%r.\n' % (srcip[0],srcip[1],
|
||||
dstip[0],dstip[1]))
|
||||
if dstip[1] == listener.getsockname()[1] and islocal(dstip[0]):
|
||||
debug1("-- ignored: that's my address!\n")
|
||||
sock.close()
|
||||
return
|
||||
chan = mux.next_channel()
|
||||
if not chan:
|
||||
log('warning: too many open channels. Discarded connection.\n')
|
||||
sock.close()
|
||||
return
|
||||
mux.send(chan, ssnet.CMD_CONNECT, '%s,%s' % dstip)
|
||||
outwrap = MuxWrapper(mux, chan)
|
||||
handlers.append(Proxy(SockWrapper(sock, sock), outwrap))
|
||||
handlers.append(Handler([listener], onaccept))
|
||||
|
||||
dnsreqs = {}
|
||||
def dns_done(chan, data):
|
||||
peer,timeout = dnsreqs.get(chan) or (None,None)
|
||||
debug3('dns_done: channel=%r peer=%r\n' % (chan, peer))
|
||||
if peer:
|
||||
del dnsreqs[chan]
|
||||
debug3('doing sendto %r\n' % (peer,))
|
||||
dnslistener.sendto(data, peer)
|
||||
def ondns():
|
||||
pkt,peer = dnslistener.recvfrom(4096)
|
||||
now = time.time()
|
||||
if pkt:
|
||||
debug1('DNS request from %r: %d bytes\n' % (peer, len(pkt)))
|
||||
chan = mux.next_channel()
|
||||
dnsreqs[chan] = peer,now+30
|
||||
mux.send(chan, ssnet.CMD_DNS_REQ, pkt)
|
||||
mux.channels[chan] = lambda cmd,data: dns_done(chan,data)
|
||||
for chan,(peer,timeout) in dnsreqs.items():
|
||||
if timeout < now:
|
||||
del dnsreqs[chan]
|
||||
debug3('Remaining DNS requests: %d\n' % len(dnsreqs))
|
||||
if dnslistener:
|
||||
handlers.append(Handler([dnslistener], ondns))
|
||||
|
||||
if seed_hosts != None:
|
||||
debug1('seed_hosts: %r\n' % seed_hosts)
|
||||
mux.send(0, ssnet.CMD_HOST_REQ, '\n'.join(seed_hosts))
|
||||
|
||||
while 1:
|
||||
rv = serverproc.poll()
|
||||
if rv:
|
||||
raise Fatal('server died with error code %d' % rv)
|
||||
|
||||
ssnet.runonce(handlers, mux)
|
||||
if latency_control:
|
||||
mux.check_fullness()
|
||||
mux.callback()
|
||||
|
||||
|
||||
def main(listenip, ssh_cmd, remotename, python, latency_control, dns,
|
||||
seed_hosts, auto_nets,
|
||||
subnets_include, subnets_exclude, syslog, daemon, pidfile):
|
||||
if syslog:
|
||||
ssyslog.start_syslog()
|
||||
if daemon:
|
||||
try:
|
||||
check_daemon(pidfile)
|
||||
except Fatal, e:
|
||||
log("%s\n" % e)
|
||||
return 5
|
||||
debug1('Starting sshuttle proxy.\n')
|
||||
|
||||
if listenip[1]:
|
||||
ports = [listenip[1]]
|
||||
else:
|
||||
ports = xrange(12300,9000,-1)
|
||||
last_e = None
|
||||
bound = False
|
||||
debug2('Binding:')
|
||||
for port in ports:
|
||||
debug2(' %d' % port)
|
||||
listener = socket.socket()
|
||||
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
dnslistener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
dnslistener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
try:
|
||||
listener.bind((listenip[0], port))
|
||||
dnslistener.bind((listenip[0], port))
|
||||
bound = True
|
||||
break
|
||||
except socket.error, e:
|
||||
last_e = e
|
||||
debug2('\n')
|
||||
if not bound:
|
||||
assert(last_e)
|
||||
raise last_e
|
||||
listener.listen(10)
|
||||
listenip = listener.getsockname()
|
||||
debug1('Listening on %r.\n' % (listenip,))
|
||||
|
||||
if dns:
|
||||
dnsip = dnslistener.getsockname()
|
||||
debug1('DNS listening on %r.\n' % (dnsip,))
|
||||
dnsport = dnsip[1]
|
||||
else:
|
||||
dnsport = 0
|
||||
dnslistener = None
|
||||
|
||||
fw = FirewallClient(listenip[1], subnets_include, subnets_exclude, dnsport)
|
||||
|
||||
try:
|
||||
return _main(listener, fw, ssh_cmd, remotename,
|
||||
python, latency_control, dnslistener,
|
||||
seed_hosts, auto_nets, syslog, daemon)
|
||||
finally:
|
||||
try:
|
||||
if daemon:
|
||||
# it's not our child anymore; can't waitpid
|
||||
fw.p.returncode = 0
|
||||
fw.done()
|
||||
finally:
|
||||
if daemon:
|
||||
daemon_cleanup()
|
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@ -1,441 +0,0 @@
|
||||
import re, errno, socket, select, struct
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers, ssyslog
|
||||
from helpers import *
|
||||
|
||||
# python doesn't have a definition for this
|
||||
IPPROTO_DIVERT = 254
|
||||
|
||||
|
||||
def ipt_chain_exists(name):
|
||||
argv = ['iptables', '-t', 'nat', '-nL']
|
||||
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||
for line in p.stdout:
|
||||
if line.startswith('Chain %s ' % name):
|
||||
return True
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def ipt(*args):
|
||||
argv = ['iptables', '-t', 'nat'] + list(args)
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv)
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
_no_ttl_module = False
|
||||
def ipt_ttl(*args):
|
||||
global _no_ttl_module
|
||||
if not _no_ttl_module:
|
||||
# we avoid infinite loops by generating server-side connections
|
||||
# with ttl 42. This makes the client side not recapture those
|
||||
# connections, in case client == server.
|
||||
try:
|
||||
argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42']
|
||||
ipt(*argsplus)
|
||||
except Fatal:
|
||||
ipt(*args)
|
||||
# we only get here if the non-ttl attempt succeeds
|
||||
log('sshuttle: warning: your iptables is missing '
|
||||
'the ttl module.\n')
|
||||
_no_ttl_module = True
|
||||
else:
|
||||
ipt(*args)
|
||||
|
||||
|
||||
|
||||
# We name the chain based on the transproxy port number so that it's possible
|
||||
# to run multiple copies of sshuttle at the same time. Of course, the
|
||||
# multiple copies shouldn't have overlapping subnets, or only the most-
|
||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
||||
# "-A OUTPUT").
|
||||
def do_iptables(port, dnsport, subnets):
|
||||
chain = 'sshuttle-%s' % port
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
if ipt_chain_exists(chain):
|
||||
ipt('-D', 'OUTPUT', '-j', chain)
|
||||
ipt('-D', 'PREROUTING', '-j', chain)
|
||||
ipt('-F', chain)
|
||||
ipt('-X', chain)
|
||||
|
||||
if subnets or dnsport:
|
||||
ipt('-N', chain)
|
||||
ipt('-F', chain)
|
||||
ipt('-I', 'OUTPUT', '1', '-j', chain)
|
||||
ipt('-I', 'PREROUTING', '1', '-j', chain)
|
||||
|
||||
if subnets:
|
||||
# create new subnet entries. Note that we're sorting in a very
|
||||
# particular order: we need to go from most-specific (largest swidth)
|
||||
# to least-specific, and at any given level of specificity, we want
|
||||
# excludes to come first. That's why the columns are in such a non-
|
||||
# intuitive order.
|
||||
for swidth,sexclude,snet in sorted(subnets, reverse=True):
|
||||
if sexclude:
|
||||
ipt('-A', chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet,swidth),
|
||||
'-p', 'tcp')
|
||||
else:
|
||||
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s/%s' % (snet,swidth),
|
||||
'-p', 'tcp',
|
||||
'--to-ports', str(port))
|
||||
|
||||
if dnsport:
|
||||
nslist = resolvconf_nameservers()
|
||||
for ip in nslist:
|
||||
ipt_ttl('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s/32' % ip,
|
||||
'-p', 'udp',
|
||||
'--dport', '53',
|
||||
'--to-ports', str(dnsport))
|
||||
|
||||
|
||||
def ipfw_rule_exists(n):
|
||||
argv = ['ipfw', 'list']
|
||||
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||
found = False
|
||||
for line in p.stdout:
|
||||
if line.startswith('%05d ' % n):
|
||||
if not ('ipttl 42' in line
|
||||
or ('skipto %d' % (n+1)) in line
|
||||
or 'check-state' in line):
|
||||
log('non-sshuttle ipfw rule: %r\n' % line.strip())
|
||||
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
|
||||
found = True
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
return found
|
||||
|
||||
|
||||
_oldctls = {}
|
||||
def _fill_oldctls(prefix):
|
||||
argv = ['sysctl', prefix]
|
||||
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
|
||||
for line in p.stdout:
|
||||
assert(line[-1] == '\n')
|
||||
(k,v) = line[:-1].split(': ', 1)
|
||||
_oldctls[k] = v
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
if not line:
|
||||
raise Fatal('%r returned no data' % (argv,))
|
||||
|
||||
|
||||
def _sysctl_set(name, val):
|
||||
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
return ssubprocess.call(argv, stdout = open('/dev/null', 'w'))
|
||||
|
||||
|
||||
_changedctls = []
|
||||
def sysctl_set(name, val, permanent=False):
|
||||
PREFIX = 'net.inet.ip'
|
||||
assert(name.startswith(PREFIX + '.'))
|
||||
val = str(val)
|
||||
if not _oldctls:
|
||||
_fill_oldctls(PREFIX)
|
||||
if not (name in _oldctls):
|
||||
debug1('>> No such sysctl: %r\n' % name)
|
||||
return
|
||||
oldval = _oldctls[name]
|
||||
if val != oldval:
|
||||
rv = _sysctl_set(name, val)
|
||||
if rv==0 and permanent:
|
||||
debug1('>> ...saving permanently in /etc/sysctl.conf\n')
|
||||
f = open('/etc/sysctl.conf', 'a')
|
||||
f.write('\n'
|
||||
'# Added by sshuttle\n'
|
||||
'%s=%s\n' % (name, val))
|
||||
f.close()
|
||||
else:
|
||||
_changedctls.append(name)
|
||||
|
||||
|
||||
def _udp_unpack(p):
|
||||
src = (socket.inet_ntoa(p[12:16]), struct.unpack('!H', p[20:22])[0])
|
||||
dst = (socket.inet_ntoa(p[16:20]), struct.unpack('!H', p[22:24])[0])
|
||||
return src, dst
|
||||
|
||||
|
||||
def _udp_repack(p, src, dst):
|
||||
addrs = socket.inet_aton(src[0]) + socket.inet_aton(dst[0])
|
||||
ports = struct.pack('!HH', src[1], dst[1])
|
||||
return p[:12] + addrs + ports + p[24:]
|
||||
|
||||
|
||||
_real_dns_server = [None]
|
||||
def _handle_diversion(divertsock, dnsport):
|
||||
p,tag = divertsock.recvfrom(4096)
|
||||
src,dst = _udp_unpack(p)
|
||||
debug3('got diverted packet from %r to %r\n' % (src, dst))
|
||||
if dst[1] == 53:
|
||||
# outgoing DNS
|
||||
debug3('...packet is a DNS request.\n')
|
||||
_real_dns_server[0] = dst
|
||||
dst = ('127.0.0.1', dnsport)
|
||||
elif src[1] == dnsport:
|
||||
if islocal(src[0]):
|
||||
debug3('...packet is a DNS response.\n')
|
||||
src = _real_dns_server[0]
|
||||
else:
|
||||
log('weird?! unexpected divert from %r to %r\n' % (src, dst))
|
||||
assert(0)
|
||||
newp = _udp_repack(p, src, dst)
|
||||
divertsock.sendto(newp, tag)
|
||||
|
||||
|
||||
def ipfw(*args):
|
||||
argv = ['ipfw', '-q'] + list(args)
|
||||
debug1('>> %s\n' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv)
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def do_ipfw(port, dnsport, subnets):
|
||||
sport = str(port)
|
||||
xsport = str(port+1)
|
||||
|
||||
# cleanup any existing rules
|
||||
if ipfw_rule_exists(port):
|
||||
ipfw('delete', sport)
|
||||
|
||||
while _changedctls:
|
||||
name = _changedctls.pop()
|
||||
oldval = _oldctls[name]
|
||||
_sysctl_set(name, oldval)
|
||||
|
||||
if subnets or dnsport:
|
||||
sysctl_set('net.inet.ip.fw.enable', 1)
|
||||
sysctl_set('net.inet.ip.scopedroute', 0, permanent=True)
|
||||
|
||||
ipfw('add', sport, 'check-state', 'ip',
|
||||
'from', 'any', 'to', 'any')
|
||||
|
||||
if subnets:
|
||||
# create new subnet entries
|
||||
for swidth,sexclude,snet in sorted(subnets, reverse=True):
|
||||
if sexclude:
|
||||
ipfw('add', sport, 'skipto', xsport,
|
||||
'log', 'tcp',
|
||||
'from', 'any', 'to', '%s/%s' % (snet,swidth))
|
||||
else:
|
||||
ipfw('add', sport, 'fwd', '127.0.0.1,%d' % port,
|
||||
'log', 'tcp',
|
||||
'from', 'any', 'to', '%s/%s' % (snet,swidth),
|
||||
'not', 'ipttl', '42', 'keep-state', 'setup')
|
||||
|
||||
# This part is much crazier than it is on Linux, because MacOS (at least
|
||||
# 10.6, and probably other versions, and maybe FreeBSD too) doesn't
|
||||
# correctly fixup the dstip/dstport for UDP packets when it puts them
|
||||
# through a 'fwd' rule. It also doesn't fixup the srcip/srcport in the
|
||||
# response packet. In Linux iptables, all that happens magically for us,
|
||||
# so we just redirect the packets and relax.
|
||||
#
|
||||
# On MacOS, we have to fix the ports ourselves. For that, we use a
|
||||
# 'divert' socket, which receives raw packets and lets us mangle them.
|
||||
#
|
||||
# Here's how it works. Let's say the local DNS server is 1.1.1.1:53,
|
||||
# and the remote DNS server is 2.2.2.2:53, and the local transproxy port
|
||||
# is 10.0.0.1:12300, and a client machine is making a request from
|
||||
# 10.0.0.5:9999. We see a packet like this:
|
||||
# 10.0.0.5:9999 -> 1.1.1.1:53
|
||||
# Since the destip:port matches one of our local nameservers, it will
|
||||
# match a 'fwd' rule, thus grabbing it on the local machine. However,
|
||||
# the local kernel will then see a packet addressed to *:53 and
|
||||
# not know what to do with it; there's nobody listening on port 53. Thus,
|
||||
# we divert it, rewriting it into this:
|
||||
# 10.0.0.5:9999 -> 10.0.0.1:12300
|
||||
# This gets proxied out to the server, which sends it to 2.2.2.2:53,
|
||||
# and the answer comes back, and the proxy sends it back out like this:
|
||||
# 10.0.0.1:12300 -> 10.0.0.5:9999
|
||||
# But that's wrong! The original machine expected an answer from
|
||||
# 1.1.1.1:53, so we have to divert the *answer* and rewrite it:
|
||||
# 1.1.1.1:53 -> 10.0.0.5:9999
|
||||
#
|
||||
# See? Easy stuff.
|
||||
if dnsport:
|
||||
divertsock = socket.socket(socket.AF_INET, socket.SOCK_RAW,
|
||||
IPPROTO_DIVERT)
|
||||
divertsock.bind(('0.0.0.0', port)) # IP field is ignored
|
||||
|
||||
nslist = resolvconf_nameservers()
|
||||
for ip in nslist:
|
||||
# relabel and then catch outgoing DNS requests
|
||||
ipfw('add', sport, 'divert', sport,
|
||||
'log', 'udp',
|
||||
'from', 'any', 'to', '%s/32' % ip, '53',
|
||||
'not', 'ipttl', '42')
|
||||
# relabel DNS responses
|
||||
ipfw('add', sport, 'divert', sport,
|
||||
'log', 'udp',
|
||||
'from', 'any', str(dnsport), 'to', 'any',
|
||||
'not', 'ipttl', '42')
|
||||
|
||||
def do_wait():
|
||||
while 1:
|
||||
r,w,x = select.select([sys.stdin, divertsock], [], [])
|
||||
if divertsock in r:
|
||||
_handle_diversion(divertsock, dnsport)
|
||||
if sys.stdin in r:
|
||||
return
|
||||
else:
|
||||
do_wait = None
|
||||
|
||||
return do_wait
|
||||
|
||||
|
||||
def program_exists(name):
|
||||
paths = (os.getenv('PATH') or os.defpath).split(os.pathsep)
|
||||
for p in paths:
|
||||
fn = '%s/%s' % (p, name)
|
||||
if os.path.exists(fn):
|
||||
return not os.path.isdir(fn) and os.access(fn, os.X_OK)
|
||||
|
||||
|
||||
hostmap = {}
|
||||
def rewrite_etc_hosts(port):
|
||||
HOSTSFILE='/etc/hosts'
|
||||
BAKFILE='%s.sbak' % HOSTSFILE
|
||||
APPEND='# sshuttle-firewall-%d AUTOCREATED' % port
|
||||
old_content = ''
|
||||
st = None
|
||||
try:
|
||||
old_content = open(HOSTSFILE).read()
|
||||
st = os.stat(HOSTSFILE)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if old_content.strip() and not os.path.exists(BAKFILE):
|
||||
os.link(HOSTSFILE, BAKFILE)
|
||||
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
|
||||
f = open(tmpname, 'w')
|
||||
for line in old_content.rstrip().split('\n'):
|
||||
if line.find(APPEND) >= 0:
|
||||
continue
|
||||
f.write('%s\n' % line)
|
||||
for (name,ip) in sorted(hostmap.items()):
|
||||
f.write('%-30s %s\n' % ('%s %s' % (ip,name), APPEND))
|
||||
f.close()
|
||||
|
||||
if st:
|
||||
os.chown(tmpname, st.st_uid, st.st_gid)
|
||||
os.chmod(tmpname, st.st_mode)
|
||||
else:
|
||||
os.chown(tmpname, 0, 0)
|
||||
os.chmod(tmpname, 0644)
|
||||
os.rename(tmpname, HOSTSFILE)
|
||||
|
||||
|
||||
def restore_etc_hosts(port):
|
||||
global hostmap
|
||||
hostmap = {}
|
||||
rewrite_etc_hosts(port)
|
||||
|
||||
|
||||
# This is some voodoo for setting up the kernel's transparent
|
||||
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
|
||||
# otherwise we delete it, then make them from scratch.
|
||||
#
|
||||
# This code is supposed to clean up after itself by deleting its rules on
|
||||
# exit. In case that fails, it's not the end of the world; future runs will
|
||||
# supercede it in the transproxy list, at least, so the leftover rules
|
||||
# are hopefully harmless.
|
||||
def main(port, dnsport, syslog):
|
||||
assert(port > 0)
|
||||
assert(port <= 65535)
|
||||
assert(dnsport >= 0)
|
||||
assert(dnsport <= 65535)
|
||||
|
||||
if os.getuid() != 0:
|
||||
raise Fatal('you must be root (or enable su/sudo) to set the firewall')
|
||||
|
||||
if program_exists('ipfw'):
|
||||
do_it = do_ipfw
|
||||
elif program_exists('iptables'):
|
||||
do_it = do_iptables
|
||||
else:
|
||||
raise Fatal("can't find either ipfw or iptables; check your PATH")
|
||||
|
||||
# because of limitations of the 'su' command, the *real* stdin/stdout
|
||||
# are both attached to stdout initially. Clone stdout into stdin so we
|
||||
# can read from it.
|
||||
os.dup2(1, 0)
|
||||
|
||||
if syslog:
|
||||
ssyslog.start_syslog()
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
debug1('firewall manager ready.\n')
|
||||
sys.stdout.write('READY\n')
|
||||
sys.stdout.flush()
|
||||
|
||||
# ctrl-c shouldn't be passed along to me. When the main sshuttle dies,
|
||||
# I'll die automatically.
|
||||
os.setsid()
|
||||
|
||||
# we wait until we get some input before creating the rules. That way,
|
||||
# sshuttle can launch us as early as possible (and get sudo password
|
||||
# authentication as early in the startup process as possible).
|
||||
line = sys.stdin.readline(128)
|
||||
if not line:
|
||||
return # parent died; nothing to do
|
||||
|
||||
subnets = []
|
||||
if line != 'ROUTES\n':
|
||||
raise Fatal('firewall: expected ROUTES but got %r' % line)
|
||||
while 1:
|
||||
line = sys.stdin.readline(128)
|
||||
if not line:
|
||||
raise Fatal('firewall: expected route but got %r' % line)
|
||||
elif line == 'GO\n':
|
||||
break
|
||||
try:
|
||||
(width,exclude,ip) = line.strip().split(',', 2)
|
||||
except:
|
||||
raise Fatal('firewall: expected route or GO but got %r' % line)
|
||||
subnets.append((int(width), bool(int(exclude)), ip))
|
||||
|
||||
try:
|
||||
if line:
|
||||
debug1('firewall manager: starting transproxy.\n')
|
||||
do_wait = do_it(port, dnsport, subnets)
|
||||
sys.stdout.write('STARTED\n')
|
||||
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except IOError:
|
||||
# the parent process died for some reason; he's surely been loud
|
||||
# enough, so no reason to report another error
|
||||
return
|
||||
|
||||
# Now we wait until EOF or any other kind of exception. We need
|
||||
# to stay running so that we don't need a *second* password
|
||||
# authentication at shutdown time - that cleanup is important!
|
||||
while 1:
|
||||
if do_wait: do_wait()
|
||||
line = sys.stdin.readline(128)
|
||||
if line.startswith('HOST '):
|
||||
(name,ip) = line[5:].strip().split(',', 1)
|
||||
hostmap[name] = ip
|
||||
rewrite_etc_hosts(port)
|
||||
elif line:
|
||||
raise Fatal('expected EOF, got %r' % line)
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
try:
|
||||
debug1('firewall manager: undoing changes.\n')
|
||||
except:
|
||||
pass
|
||||
do_it(port, 0, [])
|
||||
restore_etc_hosts(port)
|
Binary file not shown.
@ -1,75 +0,0 @@
|
||||
import sys, os, socket
|
||||
|
||||
logprefix = ''
|
||||
verbose = 0
|
||||
|
||||
def log(s):
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.write(logprefix + s)
|
||||
sys.stderr.flush()
|
||||
except IOError:
|
||||
# this could happen if stderr gets forcibly disconnected, eg. because
|
||||
# our tty closes. That sucks, but it's no reason to abort the program.
|
||||
pass
|
||||
|
||||
def debug1(s):
|
||||
if verbose >= 1:
|
||||
log(s)
|
||||
|
||||
def debug2(s):
|
||||
if verbose >= 2:
|
||||
log(s)
|
||||
|
||||
def debug3(s):
|
||||
if verbose >= 3:
|
||||
log(s)
|
||||
|
||||
|
||||
class Fatal(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def list_contains_any(l, sub):
|
||||
for i in sub:
|
||||
if i in l:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def resolvconf_nameservers():
|
||||
l = []
|
||||
for line in open('/etc/resolv.conf'):
|
||||
words = line.lower().split()
|
||||
if len(words) >= 2 and words[0] == 'nameserver':
|
||||
l.append(words[1])
|
||||
return l
|
||||
|
||||
|
||||
def resolvconf_random_nameserver():
|
||||
l = resolvconf_nameservers()
|
||||
if l:
|
||||
if len(l) > 1:
|
||||
# don't import this unless we really need it
|
||||
import random
|
||||
random.shuffle(l)
|
||||
return l[0]
|
||||
else:
|
||||
return '127.0.0.1'
|
||||
|
||||
|
||||
def islocal(ip):
|
||||
sock = socket.socket()
|
||||
try:
|
||||
try:
|
||||
sock.bind((ip, 0))
|
||||
except socket.error, e:
|
||||
if e.args[0] == errno.EADDRNOTAVAIL:
|
||||
return False # not a local IP
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
sock.close()
|
||||
return True # it's a local IP, or there would have been an error
|
||||
|
||||
|
Binary file not shown.
@ -1,277 +0,0 @@
|
||||
import time, socket, re, select, errno
|
||||
if not globals().get('skip_imports'):
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers
|
||||
from helpers import *
|
||||
|
||||
POLL_TIME = 60*15
|
||||
NETSTAT_POLL_TIME = 30
|
||||
CACHEFILE=os.path.expanduser('~/.sshuttle.hosts')
|
||||
|
||||
|
||||
_nmb_ok = True
|
||||
_smb_ok = True
|
||||
hostnames = {}
|
||||
queue = {}
|
||||
null = open('/dev/null', 'rb+')
|
||||
|
||||
|
||||
def _is_ip(s):
|
||||
return re.match(r'\d+\.\d+\.\d+\.\d+$', s)
|
||||
|
||||
|
||||
def write_host_cache():
|
||||
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
|
||||
try:
|
||||
f = open(tmpname, 'wb')
|
||||
for name,ip in sorted(hostnames.items()):
|
||||
f.write('%s,%s\n' % (name, ip))
|
||||
f.close()
|
||||
os.rename(tmpname, CACHEFILE)
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmpname)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def read_host_cache():
|
||||
try:
|
||||
f = open(CACHEFILE)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
for line in f:
|
||||
words = line.strip().split(',')
|
||||
if len(words) == 2:
|
||||
(name,ip) = words
|
||||
name = re.sub(r'[^-\w]', '-', name).strip()
|
||||
ip = re.sub(r'[^0-9.]', '', ip).strip()
|
||||
if name and ip:
|
||||
found_host(name, ip)
|
||||
|
||||
|
||||
def found_host(hostname, ip):
|
||||
hostname = re.sub(r'\..*', '', hostname)
|
||||
hostname = re.sub(r'[^-\w]', '_', hostname)
|
||||
if (ip.startswith('127.') or ip.startswith('255.')
|
||||
or hostname == 'localhost'):
|
||||
return
|
||||
oldip = hostnames.get(hostname)
|
||||
if oldip != ip:
|
||||
hostnames[hostname] = ip
|
||||
debug1('Found: %s: %s\n' % (hostname, ip))
|
||||
sys.stdout.write('%s,%s\n' % (hostname, ip))
|
||||
write_host_cache()
|
||||
|
||||
|
||||
def _check_etc_hosts():
|
||||
debug2(' > hosts\n')
|
||||
for line in open('/etc/hosts'):
|
||||
line = re.sub(r'#.*', '', line)
|
||||
words = line.strip().split()
|
||||
if not words:
|
||||
continue
|
||||
ip = words[0]
|
||||
names = words[1:]
|
||||
if _is_ip(ip):
|
||||
debug3('< %s %r\n' % (ip, names))
|
||||
for n in names:
|
||||
check_host(n)
|
||||
found_host(n, ip)
|
||||
|
||||
|
||||
def _check_revdns(ip):
|
||||
debug2(' > rev: %s\n' % ip)
|
||||
try:
|
||||
r = socket.gethostbyaddr(ip)
|
||||
debug3('< %s\n' % r[0])
|
||||
check_host(r[0])
|
||||
found_host(r[0], ip)
|
||||
except socket.herror, e:
|
||||
pass
|
||||
|
||||
|
||||
def _check_dns(hostname):
|
||||
debug2(' > dns: %s\n' % hostname)
|
||||
try:
|
||||
ip = socket.gethostbyname(hostname)
|
||||
debug3('< %s\n' % ip)
|
||||
check_host(ip)
|
||||
found_host(hostname, ip)
|
||||
except socket.gaierror, e:
|
||||
pass
|
||||
|
||||
|
||||
def _check_netstat():
|
||||
debug2(' > netstat\n')
|
||||
argv = ['netstat', '-n']
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
content = p.stdout.read()
|
||||
p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
return
|
||||
|
||||
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
|
||||
debug3('< %s\n' % ip)
|
||||
check_host(ip)
|
||||
|
||||
|
||||
def _check_smb(hostname):
|
||||
return
|
||||
global _smb_ok
|
||||
if not _smb_ok:
|
||||
return
|
||||
argv = ['smbclient', '-U', '%', '-L', hostname]
|
||||
debug2(' > smb: %s\n' % hostname)
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
lines = p.stdout.readlines()
|
||||
p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
_smb_ok = False
|
||||
return
|
||||
|
||||
lines.reverse()
|
||||
|
||||
# junk at top
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if re.match(r'Server\s+', line):
|
||||
break
|
||||
|
||||
# server list section:
|
||||
# Server Comment
|
||||
# ------ -------
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if not line or re.match(r'-+\s+-+', line):
|
||||
continue
|
||||
if re.match(r'Workgroup\s+Master', line):
|
||||
break
|
||||
words = line.split()
|
||||
hostname = words[0].lower()
|
||||
debug3('< %s\n' % hostname)
|
||||
check_host(hostname)
|
||||
|
||||
# workgroup list section:
|
||||
# Workgroup Master
|
||||
# --------- ------
|
||||
while lines:
|
||||
line = lines.pop().strip()
|
||||
if re.match(r'-+\s+', line):
|
||||
continue
|
||||
if not line:
|
||||
break
|
||||
words = line.split()
|
||||
(workgroup, hostname) = (words[0].lower(), words[1].lower())
|
||||
debug3('< group(%s) -> %s\n' % (workgroup, hostname))
|
||||
check_host(hostname)
|
||||
check_workgroup(workgroup)
|
||||
|
||||
if lines:
|
||||
assert(0)
|
||||
|
||||
|
||||
def _check_nmb(hostname, is_workgroup, is_master):
|
||||
return
|
||||
global _nmb_ok
|
||||
if not _nmb_ok:
|
||||
return
|
||||
argv = ['nmblookup'] + ['-M']*is_master + ['--', hostname]
|
||||
debug2(' > n%d%d: %s\n' % (is_workgroup, is_master, hostname))
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
|
||||
lines = p.stdout.readlines()
|
||||
rv = p.wait()
|
||||
except OSError, e:
|
||||
log('%r failed: %r\n' % (argv, e))
|
||||
_nmb_ok = False
|
||||
return
|
||||
if rv:
|
||||
log('%r returned %d\n' % (argv, rv))
|
||||
return
|
||||
for line in lines:
|
||||
m = re.match(r'(\d+\.\d+\.\d+\.\d+) (\w+)<\w\w>\n', line)
|
||||
if m:
|
||||
g = m.groups()
|
||||
(ip, name) = (g[0], g[1].lower())
|
||||
debug3('< %s -> %s\n' % (name, ip))
|
||||
if is_workgroup:
|
||||
_enqueue(_check_smb, ip)
|
||||
else:
|
||||
found_host(name, ip)
|
||||
check_host(name)
|
||||
|
||||
|
||||
def check_host(hostname):
|
||||
if _is_ip(hostname):
|
||||
_enqueue(_check_revdns, hostname)
|
||||
else:
|
||||
_enqueue(_check_dns, hostname)
|
||||
_enqueue(_check_smb, hostname)
|
||||
_enqueue(_check_nmb, hostname, False, False)
|
||||
|
||||
|
||||
def check_workgroup(hostname):
|
||||
_enqueue(_check_nmb, hostname, True, False)
|
||||
_enqueue(_check_nmb, hostname, True, True)
|
||||
|
||||
|
||||
def _enqueue(op, *args):
|
||||
t = (op,args)
|
||||
if queue.get(t) == None:
|
||||
queue[t] = 0
|
||||
|
||||
|
||||
def _stdin_still_ok(timeout):
|
||||
r,w,x = select.select([sys.stdin.fileno()], [], [], timeout)
|
||||
if r:
|
||||
b = os.read(sys.stdin.fileno(), 4096)
|
||||
if not b:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def hw_main(seed_hosts):
|
||||
if helpers.verbose >= 2:
|
||||
helpers.logprefix = 'HH: '
|
||||
else:
|
||||
helpers.logprefix = 'hostwatch: '
|
||||
|
||||
read_host_cache()
|
||||
|
||||
_enqueue(_check_etc_hosts)
|
||||
_enqueue(_check_netstat)
|
||||
check_host('localhost')
|
||||
check_host(socket.gethostname())
|
||||
check_workgroup('workgroup')
|
||||
check_workgroup('-')
|
||||
for h in seed_hosts:
|
||||
check_host(h)
|
||||
|
||||
while 1:
|
||||
now = time.time()
|
||||
for t,last_polled in queue.items():
|
||||
(op,args) = t
|
||||
if not _stdin_still_ok(0):
|
||||
break
|
||||
maxtime = POLL_TIME
|
||||
if op == _check_netstat:
|
||||
maxtime = NETSTAT_POLL_TIME
|
||||
if now - last_polled > maxtime:
|
||||
queue[t] = time.time()
|
||||
op(*args)
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except IOError:
|
||||
break
|
||||
|
||||
# FIXME: use a smarter timeout based on oldest last_polled
|
||||
if not _stdin_still_ok(1):
|
||||
break
|
Binary file not shown.
@ -1,131 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, os, re
|
||||
import helpers, options, client, server, firewall, hostwatch
|
||||
import compat.ssubprocess as ssubprocess
|
||||
from helpers import *
|
||||
|
||||
|
||||
# list of:
|
||||
# 1.2.3.4/5 or just 1.2.3.4
|
||||
def parse_subnets(subnets_str):
|
||||
subnets = []
|
||||
for s in subnets_str:
|
||||
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP subnet format' % s)
|
||||
(a,b,c,d,width) = m.groups()
|
||||
(a,b,c,d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
|
||||
if width == None:
|
||||
width = 32
|
||||
else:
|
||||
width = int(width)
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||
if width > 32:
|
||||
raise Fatal('*/%d is greater than the maximum of 32' % width)
|
||||
subnets.append(('%d.%d.%d.%d' % (a,b,c,d), width))
|
||||
return subnets
|
||||
|
||||
|
||||
# 1.2.3.4:567 or just 1.2.3.4 or just 567
|
||||
def parse_ipport(s):
|
||||
s = str(s)
|
||||
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP:port format' % s)
|
||||
(a,b,c,d,port) = m.groups()
|
||||
(a,b,c,d,port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
|
||||
int(port or 0))
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||
if port > 65535:
|
||||
raise Fatal('*:%d is greater than the maximum of 65535' % port)
|
||||
if a == None:
|
||||
a = b = c = d = 0
|
||||
return ('%d.%d.%d.%d' % (a,b,c,d), port)
|
||||
|
||||
|
||||
optspec = """
|
||||
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
|
||||
sshuttle --server
|
||||
sshuttle --firewall <port> <subnets...>
|
||||
sshuttle --hostwatch
|
||||
--
|
||||
l,listen= transproxy to this ip address and port number [127.0.0.1:0]
|
||||
H,auto-hosts scan for remote hostnames and update local /etc/hosts
|
||||
N,auto-nets automatically determine subnets to route
|
||||
dns capture local DNS requests and forward to the remote DNS server
|
||||
python= path to python interpreter on the remote server [python]
|
||||
r,remote= ssh hostname (and optional username) of remote sshuttle server
|
||||
x,exclude= exclude this subnet (can be used more than once)
|
||||
v,verbose increase debug message verbosity
|
||||
e,ssh-cmd= the command to use to connect to the remote [ssh]
|
||||
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
|
||||
no-latency-control sacrifice latency to improve bandwidth benchmarks
|
||||
wrap= restart counting channel numbers after this number (for testing)
|
||||
D,daemon run in the background as a daemon
|
||||
syslog send log messages to syslog (default if you use --daemon)
|
||||
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
|
||||
server (internal use only)
|
||||
firewall (internal use only)
|
||||
hostwatch (internal use only)
|
||||
"""
|
||||
o = options.Options(optspec)
|
||||
(opt, flags, extra) = o.parse(sys.argv[1:])
|
||||
|
||||
if opt.daemon:
|
||||
opt.syslog = 1
|
||||
if opt.wrap:
|
||||
import ssnet
|
||||
ssnet.MAX_CHANNEL = int(opt.wrap)
|
||||
helpers.verbose = opt.verbose
|
||||
|
||||
try:
|
||||
if opt.server:
|
||||
if len(extra) != 0:
|
||||
o.fatal('no arguments expected')
|
||||
server.latency_control = opt.latency_control
|
||||
sys.exit(server.main())
|
||||
elif opt.firewall:
|
||||
if len(extra) != 2:
|
||||
o.fatal('exactly two arguments expected')
|
||||
sys.exit(firewall.main(int(extra[0]), int(extra[1]), opt.syslog))
|
||||
elif opt.hostwatch:
|
||||
sys.exit(hostwatch.hw_main(extra))
|
||||
else:
|
||||
if len(extra) < 1 and not opt.auto_nets:
|
||||
o.fatal('at least one subnet (or -N) expected')
|
||||
includes = extra
|
||||
excludes = ['127.0.0.0/8']
|
||||
for k,v in flags:
|
||||
if k in ('-x','--exclude'):
|
||||
excludes.append(v)
|
||||
remotename = opt.remote
|
||||
if remotename == '' or remotename == '-':
|
||||
remotename = None
|
||||
if opt.seed_hosts and not opt.auto_hosts:
|
||||
o.fatal('--seed-hosts only works if you also use -H')
|
||||
if opt.seed_hosts:
|
||||
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
||||
elif opt.auto_hosts:
|
||||
sh = []
|
||||
else:
|
||||
sh = None
|
||||
sys.exit(client.main(parse_ipport(opt.listen or '0.0.0.0:0'),
|
||||
opt.ssh_cmd,
|
||||
remotename,
|
||||
opt.python,
|
||||
opt.latency_control,
|
||||
opt.dns,
|
||||
sh,
|
||||
opt.auto_nets,
|
||||
parse_subnets(includes),
|
||||
parse_subnets(excludes),
|
||||
opt.syslog, opt.daemon, opt.pidfile))
|
||||
except Fatal, e:
|
||||
log('fatal: %s\n' % e)
|
||||
sys.exit(99)
|
||||
except KeyboardInterrupt:
|
||||
log('\n')
|
||||
log('Keyboard interrupt: exiting.\n')
|
||||
sys.exit(1)
|
@ -1,200 +0,0 @@
|
||||
"""Command-line options parser.
|
||||
With the help of an options spec string, easily parse command-line options.
|
||||
"""
|
||||
import sys, os, textwrap, getopt, re, struct
|
||||
|
||||
class OptDict:
|
||||
def __init__(self):
|
||||
self._opts = {}
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
k = k[3:]
|
||||
v = not v
|
||||
self._opts[k] = v
|
||||
|
||||
def __getitem__(self, k):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return not self._opts[k[3:]]
|
||||
return self._opts[k]
|
||||
|
||||
def __getattr__(self, k):
|
||||
return self[k]
|
||||
|
||||
|
||||
def _default_onabort(msg):
|
||||
sys.exit(97)
|
||||
|
||||
|
||||
def _intify(v):
|
||||
try:
|
||||
vv = int(v or '')
|
||||
if str(vv) == v:
|
||||
return vv
|
||||
except ValueError:
|
||||
pass
|
||||
return v
|
||||
|
||||
|
||||
def _atoi(v):
|
||||
try:
|
||||
return int(v or 0)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def _remove_negative_kv(k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return k[3:], not v
|
||||
return k,v
|
||||
|
||||
def _remove_negative_k(k):
|
||||
return _remove_negative_kv(k, None)[0]
|
||||
|
||||
|
||||
def _tty_width():
|
||||
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||
try:
|
||||
import fcntl, termios
|
||||
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
|
||||
except (IOError, ImportError):
|
||||
return _atoi(os.environ.get('WIDTH')) or 70
|
||||
(ysize,xsize,ypix,xpix) = struct.unpack('HHHH', s)
|
||||
return xsize or 70
|
||||
|
||||
|
||||
class Options:
|
||||
"""Option parser.
|
||||
When constructed, two strings are mandatory. The first one is the command
|
||||
name showed before error messages. The second one is a string called an
|
||||
optspec that specifies the synopsis and option flags and their description.
|
||||
For more information about optspecs, consult the bup-options(1) man page.
|
||||
|
||||
Two optional arguments specify an alternative parsing function and an
|
||||
alternative behaviour on abort (after having output the usage string).
|
||||
|
||||
By default, the parser function is getopt.gnu_getopt, and the abort
|
||||
behaviour is to exit the program.
|
||||
"""
|
||||
def __init__(self, optspec, optfunc=getopt.gnu_getopt,
|
||||
onabort=_default_onabort):
|
||||
self.optspec = optspec
|
||||
self._onabort = onabort
|
||||
self.optfunc = optfunc
|
||||
self._aliases = {}
|
||||
self._shortopts = 'h?'
|
||||
self._longopts = ['help']
|
||||
self._hasparms = {}
|
||||
self._defaults = {}
|
||||
self._usagestr = self._gen_usage()
|
||||
|
||||
def _gen_usage(self):
|
||||
out = []
|
||||
lines = self.optspec.strip().split('\n')
|
||||
lines.reverse()
|
||||
first_syn = True
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l == '--': break
|
||||
out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l))
|
||||
first_syn = False
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l.startswith(' '):
|
||||
out.append('%s%s\n' % (last_was_option and '\n' or '',
|
||||
l.lstrip()))
|
||||
last_was_option = False
|
||||
elif l:
|
||||
(flags, extra) = l.split(' ', 1)
|
||||
extra = extra.strip()
|
||||
if flags.endswith('='):
|
||||
flags = flags[:-1]
|
||||
has_parm = 1
|
||||
else:
|
||||
has_parm = 0
|
||||
g = re.search(r'\[([^\]]*)\]$', extra)
|
||||
if g:
|
||||
defval = g.group(1)
|
||||
else:
|
||||
defval = None
|
||||
flagl = flags.split(',')
|
||||
flagl_nice = []
|
||||
for _f in flagl:
|
||||
f,dvi = _remove_negative_kv(_f, _intify(defval))
|
||||
self._aliases[f] = _remove_negative_k(flagl[0])
|
||||
self._hasparms[f] = has_parm
|
||||
self._defaults[f] = dvi
|
||||
if len(f) == 1:
|
||||
self._shortopts += f + (has_parm and ':' or '')
|
||||
flagl_nice.append('-' + f)
|
||||
else:
|
||||
f_nice = re.sub(r'\W', '_', f)
|
||||
self._aliases[f_nice] = _remove_negative_k(flagl[0])
|
||||
self._longopts.append(f + (has_parm and '=' or ''))
|
||||
self._longopts.append('no-' + f)
|
||||
flagl_nice.append('--' + _f)
|
||||
flags_nice = ', '.join(flagl_nice)
|
||||
if has_parm:
|
||||
flags_nice += ' ...'
|
||||
prefix = ' %-20s ' % flags_nice
|
||||
argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(),
|
||||
initial_indent=prefix,
|
||||
subsequent_indent=' '*28))
|
||||
out.append(argtext + '\n')
|
||||
last_was_option = True
|
||||
else:
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
return ''.join(out).rstrip() + '\n'
|
||||
|
||||
def usage(self, msg=""):
|
||||
"""Print usage string to stderr and abort."""
|
||||
sys.stderr.write(self._usagestr)
|
||||
e = self._onabort and self._onabort(msg) or None
|
||||
if e:
|
||||
raise e
|
||||
|
||||
def fatal(self, s):
|
||||
"""Print an error message to stderr and abort with usage string."""
|
||||
msg = 'error: %s\n' % s
|
||||
sys.stderr.write(msg)
|
||||
return self.usage(msg)
|
||||
|
||||
def parse(self, args):
|
||||
"""Parse a list of arguments and return (options, flags, extra).
|
||||
|
||||
In the returned tuple, "options" is an OptDict with known options,
|
||||
"flags" is a list of option flags that were used on the command-line,
|
||||
and "extra" is a list of positional arguments.
|
||||
"""
|
||||
try:
|
||||
(flags,extra) = self.optfunc(args, self._shortopts, self._longopts)
|
||||
except getopt.GetoptError, e:
|
||||
self.fatal(e)
|
||||
|
||||
opt = OptDict()
|
||||
|
||||
for k,v in self._defaults.iteritems():
|
||||
k = self._aliases[k]
|
||||
opt[k] = v
|
||||
|
||||
for (k,v) in flags:
|
||||
k = k.lstrip('-')
|
||||
if k in ('h', '?', 'help'):
|
||||
self.usage()
|
||||
if k.startswith('no-'):
|
||||
k = self._aliases[k[3:]]
|
||||
v = 0
|
||||
else:
|
||||
k = self._aliases[k]
|
||||
if not self._hasparms[k]:
|
||||
assert(v == '')
|
||||
v = (opt._opts.get(k) or 0) + 1
|
||||
else:
|
||||
v = _intify(v)
|
||||
opt[k] = v
|
||||
for (f1,f2) in self._aliases.iteritems():
|
||||
opt[f1] = opt._opts.get(f2)
|
||||
return (opt,flags,extra)
|
Binary file not shown.
@ -1,212 +0,0 @@
|
||||
import re, struct, socket, select, traceback, time
|
||||
if not globals().get('skip_imports'):
|
||||
import ssnet, helpers, hostwatch
|
||||
import compat.ssubprocess as ssubprocess
|
||||
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
|
||||
from helpers import *
|
||||
|
||||
|
||||
def _ipmatch(ipstr):
|
||||
if ipstr == 'default':
|
||||
ipstr = '0.0.0.0/0'
|
||||
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
|
||||
if m:
|
||||
g = m.groups()
|
||||
ips = g[0]
|
||||
width = int(g[4] or 32)
|
||||
if g[1] == None:
|
||||
ips += '.0.0.0'
|
||||
width = min(width, 8)
|
||||
elif g[2] == None:
|
||||
ips += '.0.0'
|
||||
width = min(width, 16)
|
||||
elif g[3] == None:
|
||||
ips += '.0'
|
||||
width = min(width, 24)
|
||||
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
|
||||
|
||||
|
||||
def _ipstr(ip, width):
|
||||
if width >= 32:
|
||||
return ip
|
||||
else:
|
||||
return "%s/%d" % (ip, width)
|
||||
|
||||
|
||||
def _maskbits(netmask):
|
||||
if not netmask:
|
||||
return 32
|
||||
for i in range(32):
|
||||
if netmask[0] & _shl(1, i):
|
||||
return 32-i
|
||||
return 0
|
||||
|
||||
|
||||
def _shl(n, bits):
|
||||
return n * int(2**bits)
|
||||
|
||||
|
||||
def _list_routes():
|
||||
argv = ['netstat', '-rn']
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
|
||||
routes = []
|
||||
for line in p.stdout:
|
||||
cols = re.split(r'\s+', line)
|
||||
ipw = _ipmatch(cols[0])
|
||||
if not ipw:
|
||||
continue # some lines won't be parseable; never mind
|
||||
maskw = _ipmatch(cols[2]) # linux only
|
||||
mask = _maskbits(maskw) # returns 32 if maskw is null
|
||||
width = min(ipw[1], mask)
|
||||
ip = ipw[0] & _shl(_shl(1, width) - 1, 32-width)
|
||||
routes.append((socket.inet_ntoa(struct.pack('!I', ip)), width))
|
||||
rv = p.wait()
|
||||
if rv != 0:
|
||||
log('WARNING: %r returned %d\n' % (argv, rv))
|
||||
log('WARNING: That prevents --auto-nets from working.\n')
|
||||
return routes
|
||||
|
||||
|
||||
def list_routes():
|
||||
for (ip,width) in _list_routes():
|
||||
if not ip.startswith('0.') and not ip.startswith('127.'):
|
||||
yield (ip,width)
|
||||
|
||||
|
||||
def _exc_dump():
|
||||
exc_info = sys.exc_info()
|
||||
return ''.join(traceback.format_exception(*exc_info))
|
||||
|
||||
|
||||
def start_hostwatch(seed_hosts):
|
||||
s1,s2 = socket.socketpair()
|
||||
pid = os.fork()
|
||||
if not pid:
|
||||
# child
|
||||
rv = 99
|
||||
try:
|
||||
try:
|
||||
s2.close()
|
||||
os.dup2(s1.fileno(), 1)
|
||||
os.dup2(s1.fileno(), 0)
|
||||
s1.close()
|
||||
rv = hostwatch.hw_main(seed_hosts) or 0
|
||||
except Exception, e:
|
||||
log('%s\n' % _exc_dump())
|
||||
rv = 98
|
||||
finally:
|
||||
os._exit(rv)
|
||||
s1.close()
|
||||
return pid,s2
|
||||
|
||||
|
||||
class Hostwatch:
|
||||
def __init__(self):
|
||||
self.pid = 0
|
||||
self.sock = None
|
||||
|
||||
|
||||
class DnsProxy(Handler):
|
||||
def __init__(self, mux, chan, request):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
Handler.__init__(self, [sock])
|
||||
self.sock = sock
|
||||
self.timeout = time.time()+30
|
||||
self.mux = mux
|
||||
self.chan = chan
|
||||
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
|
||||
self.sock.connect((resolvconf_random_nameserver(), 53))
|
||||
self.sock.send(request)
|
||||
|
||||
def callback(self):
|
||||
data = self.sock.recv(4096)
|
||||
debug2('DNS response: %d bytes\n' % len(data))
|
||||
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
|
||||
self.ok = False
|
||||
|
||||
|
||||
def main():
|
||||
if helpers.verbose >= 1:
|
||||
helpers.logprefix = ' s: '
|
||||
else:
|
||||
helpers.logprefix = 'server: '
|
||||
debug1('latency control setting = %r\n' % latency_control)
|
||||
|
||||
routes = list(list_routes())
|
||||
debug1('available routes:\n')
|
||||
for r in routes:
|
||||
debug1(' %s/%d\n' % r)
|
||||
|
||||
# synchronization header
|
||||
sys.stdout.write('SSHUTTLE0001')
|
||||
sys.stdout.flush()
|
||||
|
||||
handlers = []
|
||||
mux = Mux(socket.fromfd(sys.stdin.fileno(),
|
||||
socket.AF_INET, socket.SOCK_STREAM),
|
||||
socket.fromfd(sys.stdout.fileno(),
|
||||
socket.AF_INET, socket.SOCK_STREAM))
|
||||
handlers.append(mux)
|
||||
routepkt = ''
|
||||
for r in routes:
|
||||
routepkt += '%s,%d\n' % r
|
||||
mux.send(0, ssnet.CMD_ROUTES, routepkt)
|
||||
|
||||
hw = Hostwatch()
|
||||
hw.leftover = ''
|
||||
|
||||
def hostwatch_ready():
|
||||
assert(hw.pid)
|
||||
content = hw.sock.recv(4096)
|
||||
if content:
|
||||
lines = (hw.leftover + content).split('\n')
|
||||
if lines[-1]:
|
||||
# no terminating newline: entry isn't complete yet!
|
||||
hw.leftover = lines.pop()
|
||||
lines.append('')
|
||||
else:
|
||||
hw.leftover = ''
|
||||
mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
|
||||
else:
|
||||
raise Fatal('hostwatch process died')
|
||||
|
||||
def got_host_req(data):
|
||||
if not hw.pid:
|
||||
(hw.pid,hw.sock) = start_hostwatch(data.strip().split())
|
||||
handlers.append(Handler(socks = [hw.sock],
|
||||
callback = hostwatch_ready))
|
||||
mux.got_host_req = got_host_req
|
||||
|
||||
def new_channel(channel, data):
|
||||
(dstip,dstport) = data.split(',', 1)
|
||||
dstport = int(dstport)
|
||||
outwrap = ssnet.connect_dst(dstip,dstport)
|
||||
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
|
||||
mux.new_channel = new_channel
|
||||
|
||||
dnshandlers = {}
|
||||
def dns_req(channel, data):
|
||||
debug2('Incoming DNS request.\n')
|
||||
h = DnsProxy(mux, channel, data)
|
||||
handlers.append(h)
|
||||
dnshandlers[channel] = h
|
||||
mux.got_dns_req = dns_req
|
||||
|
||||
while mux.ok:
|
||||
if hw.pid:
|
||||
assert(hw.pid > 0)
|
||||
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
|
||||
if rpid:
|
||||
raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' % rv)
|
||||
|
||||
ssnet.runonce(handlers, mux)
|
||||
if latency_control:
|
||||
mux.check_fullness()
|
||||
mux.callback()
|
||||
|
||||
if dnshandlers:
|
||||
now = time.time()
|
||||
for channel,h in dnshandlers.items():
|
||||
if h.timeout < now or not h.ok:
|
||||
del dnshandlers[channel]
|
||||
h.ok = False
|
Binary file not shown.
@ -1,99 +0,0 @@
|
||||
import sys, os, re, socket, zlib
|
||||
import compat.ssubprocess as ssubprocess
|
||||
import helpers
|
||||
from helpers import *
|
||||
|
||||
|
||||
def readfile(name):
|
||||
basedir = os.path.dirname(os.path.abspath(sys.argv[0]))
|
||||
path = [basedir] + sys.path
|
||||
for d in path:
|
||||
fullname = os.path.join(d, name)
|
||||
if os.path.exists(fullname):
|
||||
return open(fullname, 'rb').read()
|
||||
raise Exception("can't find file %r in any of %r" % (name, path))
|
||||
|
||||
|
||||
def empackage(z, filename, data=None):
|
||||
(path,basename) = os.path.split(filename)
|
||||
if not data:
|
||||
data = readfile(filename)
|
||||
content = z.compress(data)
|
||||
content += z.flush(zlib.Z_SYNC_FLUSH)
|
||||
return '%s\n%d\n%s' % (basename, len(content), content)
|
||||
|
||||
|
||||
def connect(ssh_cmd, rhostport, python, stderr, options):
|
||||
main_exe = sys.argv[0]
|
||||
portl = []
|
||||
|
||||
rhostIsIPv6 = False
|
||||
if (rhostport or '').count(':') > 1:
|
||||
rhostIsIPv6 = True
|
||||
if rhostport.count(']') or rhostport.count('['):
|
||||
result = rhostport.split(']')
|
||||
rhost = result[0].strip('[')
|
||||
if len(result) > 1:
|
||||
result[1] = result[1].strip(':')
|
||||
if result[1] is not '':
|
||||
portl = ['-p', str(int(result[1]))]
|
||||
else: # can't disambiguate IPv6 colons and a port number. pass the hostname through.
|
||||
rhost = rhostport
|
||||
else: # IPv4
|
||||
l = (rhostport or '').split(':', 1)
|
||||
rhost = l[0]
|
||||
if len(l) > 1:
|
||||
portl = ['-p', str(int(l[1]))]
|
||||
|
||||
if rhost == '-':
|
||||
rhost = None
|
||||
|
||||
ipv6flag = []
|
||||
if rhostIsIPv6:
|
||||
ipv6flag = ['-6']
|
||||
|
||||
z = zlib.compressobj(1)
|
||||
content = readfile('assembler.py')
|
||||
optdata = ''.join("%s=%r\n" % (k,v) for (k,v) in options.items())
|
||||
content2 = (empackage(z, 'cmdline_options.py', optdata) +
|
||||
empackage(z, 'helpers.py') +
|
||||
empackage(z, 'compat/ssubprocess.py') +
|
||||
empackage(z, 'ssnet.py') +
|
||||
empackage(z, 'hostwatch.py') +
|
||||
empackage(z, 'server.py') +
|
||||
"\n")
|
||||
|
||||
pyscript = r"""
|
||||
import sys;
|
||||
skip_imports=1;
|
||||
verbosity=%d;
|
||||
exec compile(sys.stdin.read(%d), "assembler.py", "exec")
|
||||
""" % (helpers.verbose or 0, len(content))
|
||||
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
|
||||
|
||||
|
||||
if not rhost:
|
||||
argv = [python, '-c', pyscript]
|
||||
else:
|
||||
if ssh_cmd:
|
||||
sshl = ssh_cmd.split(' ')
|
||||
else:
|
||||
sshl = ['ssh']
|
||||
argv = (sshl +
|
||||
portl +
|
||||
ipv6flag +
|
||||
[rhost, '--', "'%s' -c '%s'" % (python, pyscript)])
|
||||
(s1,s2) = socket.socketpair()
|
||||
def setup():
|
||||
# runs in the child process
|
||||
s2.close()
|
||||
s1a,s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
|
||||
s1.close()
|
||||
debug2('executing: %r\n' % argv)
|
||||
p = ssubprocess.Popen(argv, stdin=s1a, stdout=s1b, preexec_fn=setup,
|
||||
close_fds=True, stderr=stderr)
|
||||
os.close(s1a)
|
||||
os.close(s1b)
|
||||
s2.sendall(content)
|
||||
s2.sendall(content2)
|
||||
return p, s2
|
Binary file not shown.
@ -1,131 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, os, re
|
||||
import helpers, options, client, server, firewall, hostwatch
|
||||
import compat.ssubprocess as ssubprocess
|
||||
from helpers import *
|
||||
|
||||
|
||||
# list of:
|
||||
# 1.2.3.4/5 or just 1.2.3.4
|
||||
def parse_subnets(subnets_str):
|
||||
subnets = []
|
||||
for s in subnets_str:
|
||||
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP subnet format' % s)
|
||||
(a,b,c,d,width) = m.groups()
|
||||
(a,b,c,d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
|
||||
if width == None:
|
||||
width = 32
|
||||
else:
|
||||
width = int(width)
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||
if width > 32:
|
||||
raise Fatal('*/%d is greater than the maximum of 32' % width)
|
||||
subnets.append(('%d.%d.%d.%d' % (a,b,c,d), width))
|
||||
return subnets
|
||||
|
||||
|
||||
# 1.2.3.4:567 or just 1.2.3.4 or just 567
|
||||
def parse_ipport(s):
|
||||
s = str(s)
|
||||
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
|
||||
if not m:
|
||||
raise Fatal('%r is not a valid IP:port format' % s)
|
||||
(a,b,c,d,port) = m.groups()
|
||||
(a,b,c,d,port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
|
||||
int(port or 0))
|
||||
if a > 255 or b > 255 or c > 255 or d > 255:
|
||||
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
|
||||
if port > 65535:
|
||||
raise Fatal('*:%d is greater than the maximum of 65535' % port)
|
||||
if a == None:
|
||||
a = b = c = d = 0
|
||||
return ('%d.%d.%d.%d' % (a,b,c,d), port)
|
||||
|
||||
|
||||
optspec = """
|
||||
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
|
||||
sshuttle --server
|
||||
sshuttle --firewall <port> <subnets...>
|
||||
sshuttle --hostwatch
|
||||
--
|
||||
l,listen= transproxy to this ip address and port number [127.0.0.1:0]
|
||||
H,auto-hosts scan for remote hostnames and update local /etc/hosts
|
||||
N,auto-nets automatically determine subnets to route
|
||||
dns capture local DNS requests and forward to the remote DNS server
|
||||
python= path to python interpreter on the remote server [python]
|
||||
r,remote= ssh hostname (and optional username) of remote sshuttle server
|
||||
x,exclude= exclude this subnet (can be used more than once)
|
||||
v,verbose increase debug message verbosity
|
||||
e,ssh-cmd= the command to use to connect to the remote [ssh]
|
||||
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
|
||||
no-latency-control sacrifice latency to improve bandwidth benchmarks
|
||||
wrap= restart counting channel numbers after this number (for testing)
|
||||
D,daemon run in the background as a daemon
|
||||
syslog send log messages to syslog (default if you use --daemon)
|
||||
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
|
||||
server (internal use only)
|
||||
firewall (internal use only)
|
||||
hostwatch (internal use only)
|
||||
"""
|
||||
o = options.Options(optspec)
|
||||
(opt, flags, extra) = o.parse(sys.argv[1:])
|
||||
|
||||
if opt.daemon:
|
||||
opt.syslog = 1
|
||||
if opt.wrap:
|
||||
import ssnet
|
||||
ssnet.MAX_CHANNEL = int(opt.wrap)
|
||||
helpers.verbose = opt.verbose
|
||||
|
||||
try:
|
||||
if opt.server:
|
||||
if len(extra) != 0:
|
||||
o.fatal('no arguments expected')
|
||||
server.latency_control = opt.latency_control
|
||||
sys.exit(server.main())
|
||||
elif opt.firewall:
|
||||
if len(extra) != 2:
|
||||
o.fatal('exactly two arguments expected')
|
||||
sys.exit(firewall.main(int(extra[0]), int(extra[1]), opt.syslog))
|
||||
elif opt.hostwatch:
|
||||
sys.exit(hostwatch.hw_main(extra))
|
||||
else:
|
||||
if len(extra) < 1 and not opt.auto_nets:
|
||||
o.fatal('at least one subnet (or -N) expected')
|
||||
includes = extra
|
||||
excludes = ['127.0.0.0/8']
|
||||
for k,v in flags:
|
||||
if k in ('-x','--exclude'):
|
||||
excludes.append(v)
|
||||
remotename = opt.remote
|
||||
if remotename == '' or remotename == '-':
|
||||
remotename = None
|
||||
if opt.seed_hosts and not opt.auto_hosts:
|
||||
o.fatal('--seed-hosts only works if you also use -H')
|
||||
if opt.seed_hosts:
|
||||
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
||||
elif opt.auto_hosts:
|
||||
sh = []
|
||||
else:
|
||||
sh = None
|
||||
sys.exit(client.main(parse_ipport(opt.listen or '0.0.0.0:0'),
|
||||
opt.ssh_cmd,
|
||||
remotename,
|
||||
opt.python,
|
||||
opt.latency_control,
|
||||
opt.dns,
|
||||
sh,
|
||||
opt.auto_nets,
|
||||
parse_subnets(includes),
|
||||
parse_subnets(excludes),
|
||||
opt.syslog, opt.daemon, opt.pidfile))
|
||||
except Fatal, e:
|
||||
log('fatal: %s\n' % e)
|
||||
sys.exit(99)
|
||||
except KeyboardInterrupt:
|
||||
log('\n')
|
||||
log('Keyboard interrupt: exiting.\n')
|
||||
sys.exit(1)
|
Binary file not shown.
@ -1,16 +0,0 @@
|
||||
import sys, os
|
||||
from compat import ssubprocess
|
||||
|
||||
|
||||
_p = None
|
||||
def start_syslog():
|
||||
global _p
|
||||
_p = ssubprocess.Popen(['logger',
|
||||
'-p', 'daemon.notice',
|
||||
'-t', 'sshuttle'], stdin=ssubprocess.PIPE)
|
||||
|
||||
|
||||
def stderr_to_syslog():
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os.dup2(_p.stdin.fileno(), 2)
|
Binary file not shown.
@ -1,86 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
import sys, os, socket, select, struct, time
|
||||
|
||||
listener = socket.socket()
|
||||
listener.bind(('127.0.0.1', 0))
|
||||
listener.listen(500)
|
||||
|
||||
servers = []
|
||||
clients = []
|
||||
remain = {}
|
||||
|
||||
NUMCLIENTS = 50
|
||||
count = 0
|
||||
|
||||
|
||||
while 1:
|
||||
if len(clients) < NUMCLIENTS:
|
||||
c = socket.socket()
|
||||
c.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
c.bind(('0.0.0.0', 0))
|
||||
c.connect(listener.getsockname())
|
||||
count += 1
|
||||
if count >= 16384:
|
||||
count = 1
|
||||
print 'cli CREATING %d' % count
|
||||
b = struct.pack('I', count) + 'x'*count
|
||||
remain[c] = count
|
||||
print 'cli >> %r' % len(b)
|
||||
c.send(b)
|
||||
c.shutdown(socket.SHUT_WR)
|
||||
clients.append(c)
|
||||
r = [listener]
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
r = [listener]+servers+clients
|
||||
print 'select(%d)' % len(r)
|
||||
r,w,x = select.select(r, [], [], 5)
|
||||
assert(r)
|
||||
for i in r:
|
||||
if i == listener:
|
||||
s,addr = listener.accept()
|
||||
servers.append(s)
|
||||
elif i in servers:
|
||||
b = i.recv(4096)
|
||||
print 'srv << %r' % len(b)
|
||||
if not i in remain:
|
||||
assert(len(b) >= 4)
|
||||
want = struct.unpack('I', b[:4])[0]
|
||||
b = b[4:]
|
||||
#i.send('y'*want)
|
||||
else:
|
||||
want = remain[i]
|
||||
if want < len(b):
|
||||
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||
assert(want >= len(b))
|
||||
want -= len(b)
|
||||
remain[i] = want
|
||||
if not b: # EOF
|
||||
if want:
|
||||
print 'weird: eof but wanted %d more' % want
|
||||
assert(want == 0)
|
||||
i.close()
|
||||
servers.remove(i)
|
||||
del remain[i]
|
||||
else:
|
||||
print 'srv >> %r' % len(b)
|
||||
i.send('y'*len(b))
|
||||
if not want:
|
||||
i.shutdown(socket.SHUT_WR)
|
||||
elif i in clients:
|
||||
b = i.recv(4096)
|
||||
print 'cli << %r' % len(b)
|
||||
want = remain[i]
|
||||
if want < len(b):
|
||||
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
|
||||
assert(want >= len(b))
|
||||
want -= len(b)
|
||||
remain[i] = want
|
||||
if not b: # EOF
|
||||
if want:
|
||||
print 'weird: eof but wanted %d more' % want
|
||||
assert(want == 0)
|
||||
i.close()
|
||||
clients.remove(i)
|
||||
del remain[i]
|
||||
listener.accept()
|
9
bandit.yml
Normal file
9
bandit.yml
Normal file
@ -0,0 +1,9 @@
|
||||
exclude_dirs:
|
||||
- tests
|
||||
skips:
|
||||
- B101
|
||||
- B104
|
||||
- B404
|
||||
- B603
|
||||
- B606
|
||||
- B607
|
177
docs/Makefile
Normal file
177
docs/Makefile
Normal file
@ -0,0 +1,177 @@
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sshuttle.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sshuttle.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/sshuttle"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sshuttle"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
1
docs/changes.rst
Normal file
1
docs/changes.rst
Normal file
@ -0,0 +1 @@
|
||||
.. include:: ../CHANGES.rst
|
11
docs/chromeos.rst
Normal file
11
docs/chromeos.rst
Normal file
@ -0,0 +1,11 @@
|
||||
Google ChromeOS
|
||||
===============
|
||||
|
||||
Currently there is no built in support for running sshuttle directly on
|
||||
Google ChromeOS/Chromebooks.
|
||||
|
||||
What we can really do is to create a Linux VM with Crostini. In the default
|
||||
stretch/Debian 9 VM, you can then install sshuttle as on any Linux box and
|
||||
it just works, as do xterms and ssvncviewer etc.
|
||||
|
||||
https://www.reddit.com/r/Crostini/wiki/getstarted/crostini-setup-guide
|
261
docs/conf.py
Normal file
261
docs/conf.py
Normal file
@ -0,0 +1,261 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# sshuttle documentation build configuration file, created by
|
||||
# sphinx-quickstart on Sun Jan 17 12:13:47 2016.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath('..'))
|
||||
import sshuttle # NOQA
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.todo',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'sshuttle'
|
||||
copyright = '2016, Brian May'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = sshuttle.__version__
|
||||
# The short X.Y version.
|
||||
version = '.'.join(release.split('.')[:2])
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
# language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
# today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
# today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
# default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
# add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
# add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
# keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'furo'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
# html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
# html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
# html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
# html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
# html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
# html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
# html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
# html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
# html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
# html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
# html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
# html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
# html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
# html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
# html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
# html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
# html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
# html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'sshuttledoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
# 'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'sshuttle.tex', 'sshuttle documentation', 'Brian May', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
# latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
# latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
# latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('manpage', 'sshuttle', 'sshuttle documentation', ['Brian May'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'sshuttle', 'sshuttle documentation',
|
||||
'Brian May', 'sshuttle', 'A transparent proxy-based VPN using ssh',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
# texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
# texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
# texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
# texinfo_no_detailmenu = False
|
36
docs/how-it-works.rst
Normal file
36
docs/how-it-works.rst
Normal file
@ -0,0 +1,36 @@
|
||||
How it works
|
||||
============
|
||||
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
|
||||
of both, and kind of neither.
|
||||
|
||||
It's like a VPN, since it can forward every port on an entire network, not
|
||||
just ports you specify. Conveniently, it lets you use the "real" IP
|
||||
addresses of each host rather than faking port numbers on localhost.
|
||||
|
||||
On the other hand, the way it *works* is more like ssh port forwarding than
|
||||
a VPN. Normally, a VPN forwards your data one packet at a time, and
|
||||
doesn't care about individual connections; ie. it's "stateless" with respect
|
||||
to the traffic. sshuttle is the opposite of stateless; it tracks every
|
||||
single connection.
|
||||
|
||||
You could compare sshuttle to something like the old `Slirp
|
||||
<http://en.wikipedia.org/wiki/Slirp>`_ program, which was a userspace TCP/IP
|
||||
implementation that did something similar. But it operated on a
|
||||
packet-by-packet basis on the client side, reassembling the packets on the
|
||||
server side. That worked okay back in the "real live serial port" days,
|
||||
because serial ports had predictable latency and buffering.
|
||||
|
||||
But you can't safely just forward TCP packets over a TCP session (like ssh),
|
||||
because TCP's performance depends fundamentally on packet loss; it
|
||||
*must* experience packet loss in order to know when to slow down! At
|
||||
the same time, the outer TCP session (ssh, in this case) is a reliable
|
||||
transport, which means that what you forward through the tunnel *never*
|
||||
experiences packet loss. The ssh session itself experiences packet loss, of
|
||||
course, but TCP fixes it up and ssh (and thus you) never know the
|
||||
difference. But neither does your inner TCP session, and extremely screwy
|
||||
performance ensues.
|
||||
|
||||
sshuttle assembles the TCP stream locally, multiplexes it statefully over
|
||||
an ssh session, and disassembles it back into packets at the other end. So
|
||||
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
|
||||
safe.
|
28
docs/index.rst
Normal file
28
docs/index.rst
Normal file
@ -0,0 +1,28 @@
|
||||
sshuttle: where transparent proxy meets VPN meets ssh
|
||||
=====================================================
|
||||
|
||||
:Date: |today|
|
||||
:Version: |version|
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
overview
|
||||
requirements
|
||||
installation
|
||||
usage
|
||||
platform
|
||||
Man Page <manpage>
|
||||
how-it-works
|
||||
support
|
||||
trivia
|
||||
changes
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
84
docs/installation.rst
Normal file
84
docs/installation.rst
Normal file
@ -0,0 +1,84 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
- Ubuntu 16.04 or later::
|
||||
|
||||
apt-get install sshuttle
|
||||
|
||||
- Debian stretch or later::
|
||||
|
||||
apt-get install sshuttle
|
||||
|
||||
- Arch Linux::
|
||||
|
||||
pacman -S sshuttle
|
||||
|
||||
- Fedora::
|
||||
|
||||
dnf install sshuttle
|
||||
|
||||
- openSUSE::
|
||||
|
||||
zypper in sshuttle
|
||||
|
||||
- Gentoo::
|
||||
|
||||
emerge -av net-proxy/sshuttle
|
||||
|
||||
- NixOS::
|
||||
|
||||
nix-env -iA nixos.sshuttle
|
||||
|
||||
- From PyPI::
|
||||
|
||||
sudo pip install sshuttle
|
||||
|
||||
- Clone::
|
||||
|
||||
git clone https://github.com/sshuttle/sshuttle.git
|
||||
cd sshuttle
|
||||
sudo ./setup.py install
|
||||
|
||||
- FreeBSD::
|
||||
|
||||
# ports
|
||||
cd /usr/ports/net/py-sshuttle && make install clean
|
||||
# pkg
|
||||
pkg install py39-sshuttle
|
||||
|
||||
- OpenBSD::
|
||||
|
||||
pkg_add sshuttle
|
||||
|
||||
- macOS, via MacPorts::
|
||||
|
||||
sudo port selfupdate
|
||||
sudo port install sshuttle
|
||||
|
||||
It is also possible to install into a virtualenv as a non-root user.
|
||||
|
||||
- From PyPI::
|
||||
|
||||
python3 -m venv /tmp/sshuttle
|
||||
. /tmp/sshuttle/bin/activate
|
||||
pip install sshuttle
|
||||
|
||||
- Clone::
|
||||
|
||||
git clone https://github.com/sshuttle/sshuttle.git
|
||||
cd sshuttle
|
||||
python3 -m venv /tmp/sshuttle
|
||||
. /tmp/sshuttle/bin/activate
|
||||
python -m pip install .
|
||||
|
||||
- Homebrew::
|
||||
|
||||
brew install sshuttle
|
||||
|
||||
- Nix::
|
||||
|
||||
nix-shell -p sshuttle
|
||||
|
||||
- Windows::
|
||||
|
||||
pip install sshuttle
|
242
docs/make.bat
Normal file
242
docs/make.bat
Normal file
@ -0,0 +1,242 @@
|
||||
@ECHO OFF
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set BUILDDIR=_build
|
||||
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
||||
set I18NSPHINXOPTS=%SPHINXOPTS% .
|
||||
if NOT "%PAPER%" == "" (
|
||||
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
||||
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
if "%1" == "help" (
|
||||
:help
|
||||
echo.Please use `make ^<target^>` where ^<target^> is one of
|
||||
echo. html to make standalone HTML files
|
||||
echo. dirhtml to make HTML files named index.html in directories
|
||||
echo. singlehtml to make a single large HTML file
|
||||
echo. pickle to make pickle files
|
||||
echo. json to make JSON files
|
||||
echo. htmlhelp to make HTML files and a HTML help project
|
||||
echo. qthelp to make HTML files and a qthelp project
|
||||
echo. devhelp to make HTML files and a Devhelp project
|
||||
echo. epub to make an epub
|
||||
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
||||
echo. text to make text files
|
||||
echo. man to make manual pages
|
||||
echo. texinfo to make Texinfo files
|
||||
echo. gettext to make PO message catalogs
|
||||
echo. changes to make an overview over all changed/added/deprecated items
|
||||
echo. xml to make Docutils-native XML files
|
||||
echo. pseudoxml to make pseudoxml-XML files for display purposes
|
||||
echo. linkcheck to check all external links for integrity
|
||||
echo. doctest to run all doctests embedded in the documentation if enabled
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "clean" (
|
||||
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
||||
del /q /s %BUILDDIR%\*
|
||||
goto end
|
||||
)
|
||||
|
||||
|
||||
%SPHINXBUILD% 2> nul
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "html" (
|
||||
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "dirhtml" (
|
||||
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "singlehtml" (
|
||||
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pickle" (
|
||||
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the pickle files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "json" (
|
||||
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can process the JSON files.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "htmlhelp" (
|
||||
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run HTML Help Workshop with the ^
|
||||
.hhp project file in %BUILDDIR%/htmlhelp.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "qthelp" (
|
||||
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
||||
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
||||
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\sshuttle.qhcp
|
||||
echo.To view the help file:
|
||||
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\sshuttle.ghc
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "devhelp" (
|
||||
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "epub" (
|
||||
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latex" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdf" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "latexpdfja" (
|
||||
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
||||
cd %BUILDDIR%/latex
|
||||
make all-pdf-ja
|
||||
cd %BUILDDIR%/..
|
||||
echo.
|
||||
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "text" (
|
||||
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The text files are in %BUILDDIR%/text.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "man" (
|
||||
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "texinfo" (
|
||||
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "gettext" (
|
||||
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "changes" (
|
||||
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.The overview file is in %BUILDDIR%/changes.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "linkcheck" (
|
||||
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Link check complete; look for any errors in the above output ^
|
||||
or in %BUILDDIR%/linkcheck/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "doctest" (
|
||||
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Testing of doctests in the sources finished, look at the ^
|
||||
results in %BUILDDIR%/doctest/output.txt.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "xml" (
|
||||
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The XML files are in %BUILDDIR%/xml.
|
||||
goto end
|
||||
)
|
||||
|
||||
if "%1" == "pseudoxml" (
|
||||
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
|
||||
if errorlevel 1 exit /b 1
|
||||
echo.
|
||||
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
|
||||
goto end
|
||||
)
|
||||
|
||||
:end
|
503
docs/manpage.rst
Normal file
503
docs/manpage.rst
Normal file
@ -0,0 +1,503 @@
|
||||
sshuttle
|
||||
========
|
||||
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
**sshuttle** [*options*] **-r** *[username@]sshserver[:port]* \<*subnets* ...\>
|
||||
|
||||
|
||||
Description
|
||||
-----------
|
||||
:program:`sshuttle` allows you to create a VPN connection from your
|
||||
machine to any remote server that you can connect to via ssh, as long
|
||||
as that server has a sufficiently new Python installation.
|
||||
|
||||
To work, you must have root access on the local machine,
|
||||
but you can have a normal account on the server.
|
||||
|
||||
It's valid to run :program:`sshuttle` more than once simultaneously on
|
||||
a single client machine, connecting to a different server
|
||||
every time, so you can be on more than one VPN at once.
|
||||
|
||||
If run on a router, :program:`sshuttle` can forward traffic for your
|
||||
entire subnet to the VPN.
|
||||
|
||||
|
||||
Options
|
||||
-------
|
||||
.. program:: sshuttle
|
||||
|
||||
.. option:: <subnets>
|
||||
|
||||
A list of subnets to route over the VPN, in the form
|
||||
``a.b.c.d[/width][port[-port]]``. Valid examples are 1.2.3.4 (a
|
||||
single IP address) and 1.2.3.4/32 (equivalent to 1.2.3.4),
|
||||
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0 netmask).
|
||||
Specify subnets 0/0 to match all IPv4 addresses and ::/0 to match
|
||||
all IPv6 addresses. Any of the previous examples are also valid if
|
||||
you append a port or a port range, so 1.2.3.4:8000 will only
|
||||
tunnel traffic that has as the destination port 8000 of 1.2.3.4
|
||||
and 1.2.3.0/24:8000-9000 will tunnel traffic going to any port
|
||||
between 8000 and 9000 (inclusive) for all IPs in the 1.2.3.0/24
|
||||
subnet. A hostname can be provided instead of an IP address. If
|
||||
the hostname resolves to multiple IPs, all of the IPs are
|
||||
included. If a width is provided with a hostname, the width is
|
||||
applied to all of the hostnames IPs (if they are all either IPv4
|
||||
or IPv6). Widths cannot be supplied to hostnames that resolve to
|
||||
both IPv4 and IPv6. Valid examples are example.com,
|
||||
example.com:8000, example.com/24, example.com/24:8000 and
|
||||
example.com:8000-9000.
|
||||
|
||||
.. option:: --method <auto|nat|nft|tproxy|pf|ipfw>
|
||||
|
||||
Which firewall method should sshuttle use? For auto, sshuttle attempts to
|
||||
guess the appropriate method depending on what it can find in PATH. The
|
||||
default value is auto.
|
||||
|
||||
.. option:: -l <[ip:]port>, --listen=<[ip:]port>
|
||||
|
||||
Use this ip address and port number as the transparent
|
||||
proxy port. By default :program:`sshuttle` finds an available
|
||||
port automatically and listens on IP 127.0.0.1
|
||||
(localhost), so you don't need to override it, and
|
||||
connections are only proxied from the local machine,
|
||||
not from outside machines. If you want to accept
|
||||
connections from other machines on your network (ie. to
|
||||
run :program:`sshuttle` on a router) try enabling IP Forwarding in
|
||||
your kernel, then using ``--listen 0.0.0.0:0``.
|
||||
You can use any name resolving to an IP address of the machine running
|
||||
:program:`sshuttle`, e.g. ``--listen localhost``.
|
||||
|
||||
For the nft, tproxy and pf methods this can be an IPv6 address. Use
|
||||
this option with comma separated values if required, to provide both
|
||||
IPv4 and IPv6 addresses, e.g. ``--listen 127.0.0.1:0,[::1]:0``.
|
||||
|
||||
.. option:: -H, --auto-hosts
|
||||
|
||||
Scan for remote hostnames and update the local /etc/hosts
|
||||
file with matching entries for as long as the VPN is
|
||||
open. This is nicer than changing your system's DNS
|
||||
(/etc/resolv.conf) settings, for several reasons. First,
|
||||
hostnames are added without domain names attached, so
|
||||
you can ``ssh thatserver`` without worrying if your local
|
||||
domain matches the remote one. Second, if you :program:`sshuttle`
|
||||
into more than one VPN at a time, it's impossible to
|
||||
use more than one DNS server at once anyway, but
|
||||
:program:`sshuttle` correctly merges /etc/hosts entries between
|
||||
all running copies. Third, if you're only routing a
|
||||
few subnets over the VPN, you probably would prefer to
|
||||
keep using your local DNS server for everything else.
|
||||
|
||||
:program:`sshuttle` tries to store a cache of the hostnames in
|
||||
~/.sshuttle.hosts on the remote host. Similarly, it tries to read
|
||||
the file when you later reconnect to the host with --auto-hosts
|
||||
enabled to quickly populate the host list. When troubleshooting
|
||||
this feature, try removing this file on the remote host when
|
||||
sshuttle is not running.
|
||||
|
||||
.. option:: -N, --auto-nets
|
||||
|
||||
In addition to the subnets provided on the command
|
||||
line, ask the server which subnets it thinks we should
|
||||
route, and route those automatically. The suggestions
|
||||
are taken automatically from the server's routing
|
||||
table.
|
||||
|
||||
This feature does not detect IPv6 routes. Specify IPv6 subnets
|
||||
manually. For example, specify the ``::/0`` subnet on the command
|
||||
line to route all IPv6 traffic.
|
||||
|
||||
.. option:: --dns
|
||||
|
||||
Capture local DNS requests and forward to the remote DNS
|
||||
server. All queries to any of the local system's DNS
|
||||
servers (/etc/resolv.conf and, if it exists,
|
||||
/run/systemd/resolve/resolv.conf) will be intercepted and
|
||||
resolved on the remote side of the tunnel instead, there
|
||||
using the DNS specified via the :option:`--to-ns` option,
|
||||
if specified. Only plain DNS traffic sent to these servers
|
||||
on port 53 are captured.
|
||||
|
||||
.. option:: --ns-hosts=<server1[,server2[,server3[...]]]>
|
||||
|
||||
Capture local DNS requests to the specified server(s)
|
||||
and forward to the remote DNS server. Contrary to the
|
||||
:option:`--dns` option, this flag allows to specify the
|
||||
DNS server(s) the queries to which to intercept,
|
||||
instead of intercepting all DNS traffic on the local
|
||||
machine. This can be useful when only certain DNS
|
||||
requests should be resolved on the remote side of the
|
||||
tunnel, e.g. in combination with dnsmasq.
|
||||
|
||||
.. option:: --to-ns=<server>
|
||||
|
||||
The DNS to forward requests to when remote DNS
|
||||
resolution is enabled. If not given, sshuttle will
|
||||
simply resolve using the system configured resolver on
|
||||
the remote side (via /etc/resolv.conf on the remote
|
||||
side).
|
||||
|
||||
.. option:: --python
|
||||
|
||||
Specify the name/path of the remote python interpreter. The
|
||||
default is to use ``python3`` (or ``python``, if ``python3``
|
||||
fails) in the remote system's PATH.
|
||||
|
||||
.. option:: -r <[username@]sshserver[:port]>, --remote=<[username@]sshserver[:port]>
|
||||
|
||||
The remote hostname and optional username and ssh
|
||||
port number to use for connecting to the remote server.
|
||||
For example, example.com, testuser@example.com,
|
||||
testuser@example.com:2222, or example.com:2244. This
|
||||
hostname is passed to ssh, so it will recognize any
|
||||
aliases and settings you may have configured in
|
||||
~/.ssh/config.
|
||||
|
||||
.. option:: -x <subnet>, --exclude=<subnet>
|
||||
|
||||
Explicitly exclude this subnet from forwarding. The
|
||||
format of this option is the same as the ``<subnets>``
|
||||
option. To exclude more than one subnet, specify the
|
||||
``-x`` option more than once. You can say something like
|
||||
``0/0 -x 1.2.3.0/24`` to forward everything except the
|
||||
local subnet over the VPN, for example.
|
||||
|
||||
.. option:: -X <file>, --exclude-from=<file>
|
||||
|
||||
Exclude the subnets specified in a file, one subnet per
|
||||
line. Useful when you have lots of subnets to exclude.
|
||||
|
||||
.. option:: -v, --verbose
|
||||
|
||||
Print more information about the session. This option
|
||||
can be used more than once for increased verbosity. By
|
||||
default, :program:`sshuttle` prints only error messages.
|
||||
|
||||
.. option:: -e, --ssh-cmd
|
||||
|
||||
The command to use to connect to the remote server. The
|
||||
default is just ``ssh``. Use this if your ssh client is
|
||||
in a non-standard location or you want to provide extra
|
||||
options to the ssh command, for example, ``-e 'ssh -v'``.
|
||||
|
||||
.. option:: --remote-shell
|
||||
|
||||
For Windows targets, specify configured remote shell program alternative to defacto posix shell.
|
||||
It would be either ``cmd`` or ``powershell`` unless something like git-bash is in use.
|
||||
|
||||
.. option:: --no-cmd-delimiter
|
||||
|
||||
Do not add a double dash (--) delimiter before invoking Python on
|
||||
the remote host. This option is useful when the ssh command used
|
||||
to connect is a custom command that does not interpret this
|
||||
delimiter correctly.
|
||||
|
||||
.. option:: --seed-hosts
|
||||
|
||||
A comma-separated list of hostnames to use to
|
||||
initialize the :option:`--auto-hosts` scan algorithm.
|
||||
:option:`--auto-hosts` does things like poll netstat output
|
||||
for lists of local hostnames, but can speed things up
|
||||
if you use this option to give it a few names to start
|
||||
from.
|
||||
|
||||
If this option is used *without* :option:`--auto-hosts`,
|
||||
then the listed hostnames will be scanned and added, but
|
||||
no further hostnames will be added.
|
||||
|
||||
.. option:: --no-latency-control
|
||||
|
||||
Sacrifice latency to improve bandwidth benchmarks. ssh
|
||||
uses really big socket buffers, which can overload the
|
||||
connection if you start doing large file transfers,
|
||||
thus making all your other sessions inside the same
|
||||
tunnel go slowly. Normally, :program:`sshuttle` tries to avoid
|
||||
this problem using a "fullness check" that allows only
|
||||
a certain amount of outstanding data to be buffered at
|
||||
a time. But on high-bandwidth links, this can leave a
|
||||
lot of your bandwidth underutilized. It also makes
|
||||
:program:`sshuttle` seem slow in bandwidth benchmarks (benchmarks
|
||||
rarely test ping latency, which is what :program:`sshuttle` is
|
||||
trying to control). This option disables the latency
|
||||
control feature, maximizing bandwidth usage. Use at
|
||||
your own risk.
|
||||
|
||||
.. option:: --latency-buffer-size
|
||||
|
||||
Set the size of the buffer used in latency control. The
|
||||
default is ``32768``. Changing this option allows a compromise
|
||||
to be made between latency and bandwidth without completely
|
||||
disabling latency control (with :option:`--no-latency-control`).
|
||||
|
||||
.. option:: -D, --daemon
|
||||
|
||||
Automatically fork into the background after connecting
|
||||
to the remote server. Implies :option:`--syslog`.
|
||||
|
||||
.. option:: -s <file>, --subnets=<file>
|
||||
|
||||
Include the subnets specified in a file instead of on the
|
||||
command line. One subnet per line.
|
||||
|
||||
.. option:: --syslog
|
||||
|
||||
after connecting, send all log messages to the
|
||||
:manpage:`syslog(3)` service instead of stderr. This is
|
||||
implicit if you use :option:`--daemon`.
|
||||
|
||||
.. option:: --pidfile=<pidfilename>
|
||||
|
||||
when using :option:`--daemon`, save :program:`sshuttle`'s pid to
|
||||
*pidfilename*. The default is ``sshuttle.pid`` in the
|
||||
current directory.
|
||||
|
||||
.. option:: --disable-ipv6
|
||||
|
||||
Disable IPv6 support for methods that support it (nat, nft,
|
||||
tproxy, and pf).
|
||||
|
||||
.. option:: --firewall
|
||||
|
||||
(internal use only) run the firewall manager. This is
|
||||
the only part of :program:`sshuttle` that must run as root. If
|
||||
you start :program:`sshuttle` as a non-root user, it will
|
||||
automatically run ``sudo`` or ``su`` to start the firewall
|
||||
manager, but the core of :program:`sshuttle` still runs as a
|
||||
normal user.
|
||||
|
||||
.. option:: --hostwatch
|
||||
|
||||
(internal use only) run the hostwatch daemon. This
|
||||
process runs on the server side and collects hostnames for
|
||||
the :option:`--auto-hosts` option. Using this option by itself
|
||||
makes it a lot easier to debug and test the :option:`--auto-hosts`
|
||||
feature.
|
||||
|
||||
.. option:: --sudoers-no-modify
|
||||
|
||||
sshuttle prints a configuration to stdout which allows a user to
|
||||
run sshuttle without a password. This option is INSECURE because,
|
||||
with some cleverness, it also allows the user to run any command
|
||||
as root without a password. The output also includes a suggested
|
||||
method for you to install the configuration.
|
||||
|
||||
Use --sudoers-user to modify the user that it applies to.
|
||||
|
||||
.. option:: --sudoers-user
|
||||
|
||||
Set the user name or group with %group_name for passwordless
|
||||
operation. Default is the current user. Set to ALL for all users
|
||||
(NOT RECOMMENDED: See note about security in --sudoers-no-modify
|
||||
documentation above). Only works with the --sudoers-no-modify
|
||||
option.
|
||||
|
||||
.. option:: -t <mark>, --tmark=<mark>
|
||||
|
||||
An option used by the tproxy method: Use the specified traffic
|
||||
mark. The mark must be a hexadecimal value. Defaults to 0x01.
|
||||
|
||||
.. option:: --version
|
||||
|
||||
Print program version.
|
||||
|
||||
|
||||
Configuration File
|
||||
------------------
|
||||
All the options described above can optionally be specified in a configuration
|
||||
file.
|
||||
|
||||
To run :program:`sshuttle` with options defined in, e.g., `/etc/sshuttle.conf`
|
||||
just pass the path to the file preceded by the `@` character, e.g.
|
||||
`@/etc/sshuttle.conf`.
|
||||
|
||||
When running :program:`sshuttle` with options defined in a configuration file,
|
||||
options can still be passed via the command line in addition to what is
|
||||
defined in the file. If a given option is defined both in the file and in
|
||||
the command line, the value in the command line will take precedence.
|
||||
|
||||
Arguments read from a file must be one per line, as shown below::
|
||||
|
||||
value
|
||||
--option1
|
||||
value1
|
||||
--option2
|
||||
value2
|
||||
|
||||
The configuration file supports comments for human-readable
|
||||
annotations. For example::
|
||||
|
||||
# company-internal API
|
||||
8.8.8.8/32
|
||||
# home IoT
|
||||
192.168.63.0/24
|
||||
|
||||
|
||||
Environment Variable
|
||||
--------------------
|
||||
|
||||
You can specify command line options with the `SSHUTTLE_ARGS` environment
|
||||
variable. If a given option is defined in both the environment variable and
|
||||
command line, the value on the command line will take precedence.
|
||||
|
||||
For example::
|
||||
|
||||
SSHUTTLE_ARGS="-e 'ssh -v' --dns" sshuttle -r example.com 0/0
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Use the following command to route all IPv4 TCP traffic through remote
|
||||
(-r) host example.com (and possibly other traffic too, depending on
|
||||
the selected --method). The 0/0 subnet, short for 0.0.0.0/0, matches
|
||||
all IPv4 addresses. The ::/0 subnet, matching all IPv6 addresses could
|
||||
be added to the example. We also exclude (-x) example.com:22 so that
|
||||
we can establish ssh connections from our local machine to the remote
|
||||
host without them being routed through sshuttle. Excluding the remote
|
||||
host may be necessary on some machines for sshuttle to work properly.
|
||||
Press Ctrl+C to exit. To also route DNS queries through sshuttle, try
|
||||
adding --dns. Add or remove -v options to see more or less
|
||||
information::
|
||||
|
||||
$ sshuttle -r example.com -x example.com:22 0/0
|
||||
|
||||
Starting sshuttle proxy (version ...).
|
||||
[local sudo] Password:
|
||||
fw: Starting firewall with Python version 3.9.5
|
||||
fw: ready method name nat.
|
||||
c : IPv6 disabled since it isn't supported by method nat.
|
||||
c : Method: nat
|
||||
c : IPv4: on
|
||||
c : IPv6: off (not available with nat method)
|
||||
c : UDP : off (not available with nat method)
|
||||
c : DNS : off (available)
|
||||
c : User: off (available)
|
||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
||||
c : (<AddressFamily.AF_INET: 2>, '0.0.0.0', 0, 0, 0)
|
||||
c : Subnets to exclude from forwarding:
|
||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
||||
c : TCP redirector listening on ('127.0.0.1', 12299).
|
||||
c : Starting client with Python version 3.9.5
|
||||
c : Connecting to server...
|
||||
user@example.com's password:
|
||||
s: Starting server with Python version 3.6.8
|
||||
s: latency control setting = True
|
||||
s: auto-nets:False
|
||||
c : Connected to server.
|
||||
fw: setting up.
|
||||
fw: iptables -w -t nat -N sshuttle-12299
|
||||
fw: iptables -w -t nat -F sshuttle-12299
|
||||
...
|
||||
Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||
^C
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
||||
|
||||
|
||||
Connect to a remote server, with automatic hostname
|
||||
and subnet guessing::
|
||||
|
||||
$ sshuttle -vNHr example.com -x example.com:22
|
||||
Starting sshuttle proxy (version ...).
|
||||
[local sudo] Password:
|
||||
fw: Starting firewall with Python version 3.9.5
|
||||
fw: ready method name nat.
|
||||
c : IPv6 disabled since it isn't supported by method nat.
|
||||
c : Method: nat
|
||||
c : IPv4: on
|
||||
c : IPv6: off (not available with nat method)
|
||||
c : UDP : off (not available with nat method)
|
||||
c : DNS : off (available)
|
||||
c : User: off (available)
|
||||
c : Subnets to forward through remote host (type, IP, cidr mask width, startPort, endPort):
|
||||
c : NOTE: Additional subnets to forward may be added below by --auto-nets.
|
||||
c : Subnets to exclude from forwarding:
|
||||
c : (<AddressFamily.AF_INET: 2>, '...', 32, 22, 22)
|
||||
c : (<AddressFamily.AF_INET: 2>, '127.0.0.1', 32, 0, 0)
|
||||
c : TCP redirector listening on ('127.0.0.1', 12300).
|
||||
c : Starting client with Python version 3.9.5
|
||||
c : Connecting to server...
|
||||
user@example.com's password:
|
||||
s: Starting server with Python version 3.6.8
|
||||
s: latency control setting = True
|
||||
s: auto-nets:True
|
||||
c : Connected to server.
|
||||
c : seed_hosts: []
|
||||
s: available routes:
|
||||
s: 77.141.99.0/24
|
||||
fw: setting up.
|
||||
fw: iptables -w -t nat -N sshuttle-12300
|
||||
fw: iptables -w -t nat -F sshuttle-12300
|
||||
...
|
||||
c : Accept: 192.168.42.121:60554 -> 77.141.99.22:22.
|
||||
^C
|
||||
c : Keyboard interrupt: exiting.
|
||||
c : SW'unknown':Mux#1: deleting (1 remain)
|
||||
c : SW#7:192.168.42.121:60554: deleting (0 remain)
|
||||
|
||||
Run :program:`sshuttle` with a `/etc/sshuttle.conf` configuration file::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf
|
||||
|
||||
Use the options defined in `/etc/sshuttle.conf` but be more verbose::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf -vvv
|
||||
|
||||
Override the remote server defined in `/etc/sshuttle.conf`::
|
||||
|
||||
$ sshuttle @/etc/sshuttle.conf -r otheruser@test.example.com
|
||||
|
||||
Example configuration file::
|
||||
|
||||
192.168.0.0/16
|
||||
--remote
|
||||
user@example.com
|
||||
|
||||
|
||||
Discussion
|
||||
----------
|
||||
When it starts, :program:`sshuttle` creates an ssh session to the
|
||||
server specified by the ``-r`` option.
|
||||
|
||||
After connecting to the remote server, :program:`sshuttle` uploads its
|
||||
(python) source code to the remote end and executes it
|
||||
there. Thus, you don't need to install :program:`sshuttle` on the
|
||||
remote server, and there are never :program:`sshuttle` version
|
||||
conflicts between client and server.
|
||||
|
||||
Unlike most VPNs, :program:`sshuttle` forwards sessions, not packets.
|
||||
That is, it uses kernel transparent proxying (`iptables
|
||||
REDIRECT` rules on Linux) to
|
||||
capture outgoing TCP sessions, then creates entirely
|
||||
separate TCP sessions out to the original destination at
|
||||
the other end of the tunnel.
|
||||
|
||||
Packet-level forwarding (eg. using the tun/tap devices on
|
||||
Linux) seems elegant at first, but it results in
|
||||
several problems, notably the 'tcp over tcp' problem. The
|
||||
tcp protocol depends fundamentally on packets being dropped
|
||||
in order to implement its congestion control algorithm; if
|
||||
you pass tcp packets through a tcp-based tunnel (such as
|
||||
ssh), the inner tcp packets will never be dropped, and so
|
||||
the inner tcp stream's congestion control will be
|
||||
completely broken, and performance will be terrible. Thus,
|
||||
packet-based VPNs (such as IPsec and openvpn) cannot use
|
||||
tcp-based encrypted streams like ssh or ssl, and have to
|
||||
implement their own encryption from scratch, which is very
|
||||
complex and error prone.
|
||||
|
||||
:program:`sshuttle`'s simplicity comes from the fact that it can
|
||||
safely use the existing ssh encrypted tunnel without
|
||||
incurring a performance penalty. It does this by letting
|
||||
the client-side kernel manage the incoming tcp stream, and
|
||||
the server-side kernel manage the outgoing tcp stream;
|
||||
there is no need for congestion control to be shared
|
||||
between the two separate streams, so a tcp-based tunnel is
|
||||
fine.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:manpage:`ssh(1)`, :manpage:`python(1)`
|
8
docs/openwrt.rst
Normal file
8
docs/openwrt.rst
Normal file
@ -0,0 +1,8 @@
|
||||
OpenWRT
|
||||
========
|
||||
|
||||
Run::
|
||||
|
||||
opkg install python3 python3-pip iptables-mod-extra iptables-mod-nat-extra iptables-mod-ipopt
|
||||
python3 /usr/bin/pip3 install sshuttle
|
||||
sshuttle -l 0.0.0.0 -r <IP> -x 192.168.1.1 0/0
|
26
docs/overview.rst
Normal file
26
docs/overview.rst
Normal file
@ -0,0 +1,26 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
As far as I know, sshuttle is the only program that solves the following
|
||||
common case:
|
||||
|
||||
- Your client machine (or router) is Linux, MacOS, FreeBSD, OpenBSD or pfSense.
|
||||
|
||||
- You have access to a remote network via ssh.
|
||||
|
||||
- You don't necessarily have admin access on the remote network.
|
||||
|
||||
- The remote network has no VPN, or only stupid/complex VPN
|
||||
protocols (IPsec, PPTP, etc). Or maybe you *are* the
|
||||
admin and you just got frustrated with the awful state of
|
||||
VPN tools.
|
||||
|
||||
- You don't want to create an ssh port forward for every
|
||||
single host/port on the remote network.
|
||||
|
||||
- You hate openssh's port forwarding because it's randomly
|
||||
slow and/or stupid.
|
||||
|
||||
- You can't use openssh's PermitTunnel feature because
|
||||
it's disabled by default on openssh servers; plus it does
|
||||
TCP-over-TCP, which has terrible performance (see below).
|
12
docs/platform.rst
Normal file
12
docs/platform.rst
Normal file
@ -0,0 +1,12 @@
|
||||
Platform Specific Notes
|
||||
=======================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
chromeos
|
||||
tproxy
|
||||
windows
|
||||
openwrt
|
97
docs/requirements.rst
Normal file
97
docs/requirements.rst
Normal file
@ -0,0 +1,97 @@
|
||||
Requirements
|
||||
============
|
||||
|
||||
Client side Requirements
|
||||
------------------------
|
||||
|
||||
- sudo, or root access on your client machine.
|
||||
(The server doesn't need admin access.)
|
||||
- Python 3.9 or greater.
|
||||
|
||||
|
||||
Linux with NAT method
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* iptables DNAT and REDIRECT modules. ip6tables for IPv6.
|
||||
|
||||
Linux with nft method
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* nftables
|
||||
|
||||
Linux with TPROXY method
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 UDP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 UDP
|
||||
* IPv6 DNS
|
||||
|
||||
|
||||
MacOS / FreeBSD / OpenBSD / pfSense
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Method: pf
|
||||
|
||||
Supports:
|
||||
|
||||
* IPv4 TCP
|
||||
* IPv4 DNS
|
||||
* IPv6 TCP
|
||||
* IPv6 DNS
|
||||
|
||||
Requires:
|
||||
|
||||
* You need to have the pfctl command.
|
||||
|
||||
Windows
|
||||
~~~~~~~
|
||||
|
||||
Experimental built-in support available. See :doc:`windows` for more information.
|
||||
|
||||
|
||||
Server side Requirements
|
||||
------------------------
|
||||
|
||||
- Python 3.9 or greater.
|
||||
|
||||
|
||||
Additional Suggested Software
|
||||
-----------------------------
|
||||
|
||||
- If you are using systemd, sshuttle can notify it when the connection to
|
||||
the remote end is established and the firewall rules are installed. For
|
||||
this feature to work you must configure the process start-up type for the
|
||||
sshuttle service unit to notify, as shown in the example below.
|
||||
|
||||
.. code-block:: ini
|
||||
:emphasize-lines: 6
|
||||
|
||||
[Unit]
|
||||
Description=sshuttle
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/sshuttle --dns --remote <user>@<server> <subnets...>
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
11
docs/support.rst
Normal file
11
docs/support.rst
Normal file
@ -0,0 +1,11 @@
|
||||
Support
|
||||
=======
|
||||
|
||||
Mailing list:
|
||||
|
||||
* Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
|
||||
* List archives are at: http://groups.google.com/group/sshuttle
|
||||
|
||||
Issue tracker and pull requests at github:
|
||||
|
||||
* https://github.com/sshuttle/sshuttle
|
40
docs/tproxy.rst
Normal file
40
docs/tproxy.rst
Normal file
@ -0,0 +1,40 @@
|
||||
TPROXY
|
||||
======
|
||||
TPROXY is the only method that supports UDP.
|
||||
|
||||
There are some things you need to consider for TPROXY to work:
|
||||
|
||||
- The following commands need to be run first as root. This only needs to be
|
||||
done once after booting up::
|
||||
|
||||
ip route add local default dev lo table 100
|
||||
ip rule add fwmark {TMARK} lookup 100
|
||||
ip -6 route add local default dev lo table 100
|
||||
ip -6 rule add fwmark {TMARK} lookup 100
|
||||
|
||||
where {TMARK} is the identifier mark passed with -t or --tmark flag
|
||||
as a hexadecimal string (default value is '0x01').
|
||||
|
||||
- The ``--auto-nets`` feature does not detect IPv6 routes automatically. Add IPv6
|
||||
routes manually. e.g. by adding ``'::/0'`` to the end of the command line.
|
||||
|
||||
- The client needs to be run as root. e.g.::
|
||||
|
||||
sudo SSH_AUTH_SOCK="$SSH_AUTH_SOCK" $HOME/tree/sshuttle.tproxy/sshuttle --method=tproxy ...
|
||||
|
||||
- You may need to exclude the IP address of the server you are connecting to.
|
||||
Otherwise sshuttle may attempt to intercept the ssh packets, which will not
|
||||
work. Use the ``--exclude`` parameter for this.
|
||||
|
||||
- You need the ``--method=tproxy`` parameter, as above.
|
||||
|
||||
- The routes for the outgoing packets must already exist. For example, if your
|
||||
connection does not have IPv6 support, no IPv6 routes will exist, IPv6
|
||||
packets will not be generated and sshuttle cannot intercept them::
|
||||
|
||||
telnet -6 www.google.com 80
|
||||
Trying 2404:6800:4001:805::1010...
|
||||
telnet: Unable to connect to remote host: Network is unreachable
|
||||
|
||||
Add some dummy routes to external interfaces. Make sure they get removed
|
||||
however after sshuttle exits.
|
35
docs/trivia.rst
Normal file
35
docs/trivia.rst
Normal file
@ -0,0 +1,35 @@
|
||||
Useless Trivia
|
||||
==============
|
||||
This section written by the original author, Avery Pennarun
|
||||
<apenwarr@gmail.com>.
|
||||
|
||||
Back in 1998, I released the first version of `Tunnel
|
||||
Vision <http://alumnit.ca/wiki/?TunnelVisionReadMe>`_, a semi-intelligent VPN
|
||||
client for Linux. Unfortunately, I made two big mistakes: I implemented the
|
||||
key exchange myself (oops), and I ended up doing TCP-over-TCP (double oops).
|
||||
The resulting program worked okay - and people used it for years - but the
|
||||
performance was always a bit funny. And nobody ever found any security flaws
|
||||
in my key exchange, either, but that doesn't mean anything. :)
|
||||
|
||||
The same year, dcoombs and I also released Fast Forward, a proxy server
|
||||
supporting transparent proxying. Among other things, we used it for
|
||||
automatically splitting traffic across more than one Internet connection (a
|
||||
tool we called "Double Vision").
|
||||
|
||||
I was still in university at the time. A couple years after that, one of my
|
||||
professors was working with some graduate students on the technology that would
|
||||
eventually become `Slipstream Internet Acceleration
|
||||
<http://www.slipstream.com/>`_. He asked me to do a contract for him to build
|
||||
an initial prototype of a transparent proxy server for mobile networks. The
|
||||
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
|
||||
packets, you can reduce latency and improve performance vs. just forwarding
|
||||
the packets over a plain VPN or mobile network. (It's unlikely that any of my
|
||||
code has persisted in the Slipstream product today, but the concept is still
|
||||
pretty cool. I'm still horrified that people use plain TCP on complex mobile
|
||||
networks with crazily variable latency, for which it was never really
|
||||
intended.)
|
||||
|
||||
That project I did for Slipstream was what first gave me the idea to merge
|
||||
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
|
||||
program that was the best of all worlds. And here we are, at last.
|
||||
You're welcome.
|
93
docs/usage.rst
Normal file
93
docs/usage.rst
Normal file
@ -0,0 +1,93 @@
|
||||
Usage
|
||||
=====
|
||||
|
||||
.. note::
|
||||
|
||||
For information on usage with Windows, see the :doc:`windows` section.
|
||||
For information on using the TProxy method, see the :doc:`tproxy` section.
|
||||
|
||||
Forward all traffic::
|
||||
|
||||
sshuttle -r username@sshserver 0.0.0.0/0
|
||||
|
||||
- Use the :option:`sshuttle -r` parameter to specify a remote server.
|
||||
On some systems, you may also need to use the :option:`sshuttle -x`
|
||||
parameter to exclude sshserver or sshserver:22 so that your local
|
||||
machine can communicate directly to sshserver without it being
|
||||
redirected by sshuttle.
|
||||
|
||||
- By default sshuttle will automatically choose a method to use. Override with
|
||||
the :option:`sshuttle --method` parameter.
|
||||
|
||||
- There is a shortcut for 0.0.0.0/0 for those that value
|
||||
their wrists::
|
||||
|
||||
sshuttle -r username@sshserver 0/0
|
||||
|
||||
|
||||
- For 'My VPN broke and need a temporary solution FAST to access local IPv4 addresses'::
|
||||
|
||||
sshuttle --dns -NHr username@sshserver 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
|
||||
|
||||
If you would also like your DNS queries to be proxied
|
||||
through the DNS server of the server you are connect to::
|
||||
|
||||
sshuttle --dns -r username@sshserver 0/0
|
||||
|
||||
The above is probably what you want to use to prevent
|
||||
local network attacks such as Firesheep and friends.
|
||||
See the documentation for the :option:`sshuttle --dns` parameter.
|
||||
|
||||
(You may be prompted for one or more passwords; first, the local password to
|
||||
become root using sudo, and then the remote ssh password. Or you might have
|
||||
sudo and ssh set up to not require passwords, in which case you won't be
|
||||
prompted at all.)
|
||||
|
||||
|
||||
Usage Notes
|
||||
-----------
|
||||
That's it! Now your local machine can access the remote network as if you
|
||||
were right there. And if your "client" machine is a router, everyone on
|
||||
your local network can make connections to your remote network.
|
||||
|
||||
You don't need to install sshuttle on the remote server;
|
||||
the remote server just needs to have python available.
|
||||
sshuttle will automatically upload and run its source code
|
||||
to the remote python interpreter.
|
||||
|
||||
This creates a transparent proxy server on your local machine for all IP
|
||||
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
|
||||
you want; use any number of IP addresses or subnets to change which
|
||||
addresses get proxied. Using 0.0.0.0/0 proxies *everything*, which is
|
||||
interesting if you don't trust the people on your local network.)
|
||||
|
||||
Any TCP session you initiate to one of the proxied IP addresses will be
|
||||
captured by sshuttle and sent over an ssh session to the remote copy of
|
||||
sshuttle, which will then regenerate the connection on that end, and funnel
|
||||
the data back and forth through ssh.
|
||||
|
||||
Fun, right? A poor man's instant VPN, and you don't even have to have
|
||||
admin access on the server.
|
||||
|
||||
Sudoers File
|
||||
------------
|
||||
|
||||
sshuttle can generate a sudoers.d file for Linux and MacOS. This
|
||||
allows one or more users to run sshuttle without entering the
|
||||
local sudo password. **WARNING:** This option is *insecure*
|
||||
because, with some cleverness, it also allows these users to run any
|
||||
command (via the --ssh-cmd option) as root without a password.
|
||||
|
||||
To print a sudo configuration file and see a suggested way to install it, run::
|
||||
|
||||
sshuttle --sudoers-no-modify
|
||||
|
||||
A custom user or group can be set with the
|
||||
:option:`sshuttle --sudoers-no-modify --sudoers-user {user_descriptor}`
|
||||
option. Valid values for this vary based on how your system is configured.
|
||||
Values such as usernames, groups prepended with `%` and sudoers user
|
||||
aliases will work. See the sudoers manual for more information on valid
|
||||
user-specified actions. The option must be used with `--sudoers-no-modify`::
|
||||
|
||||
sshuttle --sudoers-no-modify --sudoers-user mike
|
||||
sshuttle --sudoers-no-modify --sudoers-user %sudo
|
28
docs/windows.rst
Normal file
28
docs/windows.rst
Normal file
@ -0,0 +1,28 @@
|
||||
Microsoft Windows
|
||||
=================
|
||||
|
||||
Experimental native support::
|
||||
|
||||
Experimental built-in support for Windows is available through `windivert` method.
|
||||
You have to install https://pypi.org/project/pydivert package. You need Administrator privileges to use windivert method
|
||||
|
||||
Notes
|
||||
- sshuttle should be executed from admin shell (Automatic firewall process admin elevation is not available)
|
||||
- TCP/IPv4 supported (IPv6/UDP/DNS are not available)
|
||||
|
||||
Use Linux VM on Windows::
|
||||
|
||||
What we can really do is to create a Linux VM with Vagrant (or simply
|
||||
Virtualbox if you like). In the Vagrant settings, remember to turn on bridged
|
||||
NIC. Then, run sshuttle inside the VM like below::
|
||||
|
||||
sshuttle -l 0.0.0.0 -x 10.0.0.0/8 -x 192.168.0.0/16 0/0
|
||||
|
||||
10.0.0.0/8 excludes NAT traffic of Vagrant and 192.168.0.0/16 excludes
|
||||
traffic to local area network (assuming that we're using 192.168.0.0 subnet).
|
||||
|
||||
Assuming the VM has the IP 192.168.1.200 obtained on the bridge NIC (we can
|
||||
configure that in Vagrant), we can then ask Windows to route all its traffic
|
||||
via the VM by running the following in cmd.exe with admin right::
|
||||
|
||||
route add 0.0.0.0 mask 0.0.0.0 192.168.1.200
|
133
flake.lock
generated
Normal file
133
flake.lock
generated
Normal file
@ -0,0 +1,133 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1740743217,
|
||||
"narHash": "sha256-brsCRzLqimpyhORma84c3W2xPbIidZlIc3JGIuQVSNI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b27ba4eb322d9d2bf2dc9ada9fd59442f50c8d7c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pyproject-build-systems": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"pyproject-nix": [
|
||||
"pyproject-nix"
|
||||
],
|
||||
"uv2nix": [
|
||||
"uv2nix"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1740362541,
|
||||
"narHash": "sha256-S8Mno07MspggOv/xIz5g8hB2b/C5HPiX8E+rXzKY+5U=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "build-system-pkgs",
|
||||
"rev": "e151741c848ba92331af91f4e47640a1fb82be19",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "build-system-pkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pyproject-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1739758351,
|
||||
"narHash": "sha256-Aoa4dEoC7Hf6+gFVk/SDquZTMFlmlfsgdTWuqQxzePs=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "pyproject.nix",
|
||||
"rev": "1329712f7f9af3a8b270764ba338a455b7323811",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "pyproject.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"pyproject-build-systems": "pyproject-build-systems",
|
||||
"pyproject-nix": "pyproject-nix",
|
||||
"uv2nix": "uv2nix"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"uv2nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"pyproject-nix": [
|
||||
"pyproject-nix"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1740497536,
|
||||
"narHash": "sha256-K+8wsVooqhaqyxuvew3+62mgOfRLJ7whv7woqPU3Ypo=",
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "uv2nix",
|
||||
"rev": "d01fd3a141755ad5d5b93dd9fcbd76d6401f5bac",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "pyproject-nix",
|
||||
"repo": "uv2nix",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
117
flake.nix
Normal file
117
flake.nix
Normal file
@ -0,0 +1,117 @@
|
||||
{
|
||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling.";
|
||||
|
||||
inputs = {
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||
pyproject-nix = {
|
||||
url = "github:pyproject-nix/pyproject.nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
uv2nix = {
|
||||
url = "github:pyproject-nix/uv2nix";
|
||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
pyproject-build-systems = {
|
||||
url = "github:pyproject-nix/build-system-pkgs";
|
||||
inputs.pyproject-nix.follows = "pyproject-nix";
|
||||
inputs.uv2nix.follows = "uv2nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
{
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
pyproject-nix,
|
||||
uv2nix,
|
||||
pyproject-build-systems,
|
||||
}:
|
||||
flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
inherit (nixpkgs) lib;
|
||||
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
python = pkgs.python312;
|
||||
|
||||
workspace = uv2nix.lib.workspace.loadWorkspace { workspaceRoot = ./.; };
|
||||
|
||||
# Create package overlay from workspace.
|
||||
overlay = workspace.mkPyprojectOverlay {
|
||||
sourcePreference = "sdist";
|
||||
};
|
||||
|
||||
# Extend generated overlay with build fixups
|
||||
#
|
||||
# Uv2nix can only work with what it has, and uv.lock is missing essential metadata to perform some builds.
|
||||
# This is an additional overlay implementing build fixups.
|
||||
# See:
|
||||
# - https://pyproject-nix.github.io/uv2nix/FAQ.html
|
||||
pyprojectOverrides =
|
||||
final: prev:
|
||||
# Implement build fixups here.
|
||||
# Note that uv2nix is _not_ using Nixpkgs buildPythonPackage.
|
||||
# It's using https://pyproject-nix.github.io/pyproject.nix/build.html
|
||||
let
|
||||
inherit (final) resolveBuildSystem;
|
||||
inherit (builtins) mapAttrs;
|
||||
|
||||
# Build system dependencies specified in the shape expected by resolveBuildSystem
|
||||
# The empty lists below are lists of optional dependencies.
|
||||
#
|
||||
# A package `foo` with specification written as:
|
||||
# `setuptools-scm[toml]` in pyproject.toml would be written as
|
||||
# `foo.setuptools-scm = [ "toml" ]` in Nix
|
||||
buildSystemOverrides = {
|
||||
chardet.setuptools = [ ];
|
||||
colorlog.setuptools = [ ];
|
||||
python-debian.setuptools = [ ];
|
||||
pluggy.setuptools = [ ];
|
||||
pathspec.flit-core = [ ];
|
||||
packaging.flit-core = [ ];
|
||||
};
|
||||
|
||||
in
|
||||
mapAttrs (
|
||||
name: spec:
|
||||
prev.${name}.overrideAttrs (old: {
|
||||
nativeBuildInputs = old.nativeBuildInputs ++ resolveBuildSystem spec;
|
||||
})
|
||||
) buildSystemOverrides;
|
||||
|
||||
pythonSet =
|
||||
(pkgs.callPackage pyproject-nix.build.packages {
|
||||
inherit python;
|
||||
}).overrideScope
|
||||
(
|
||||
lib.composeManyExtensions [
|
||||
pyproject-build-systems.overlays.default
|
||||
overlay
|
||||
pyprojectOverrides
|
||||
]
|
||||
);
|
||||
|
||||
inherit (pkgs.callPackages pyproject-nix.build.util { }) mkApplication;
|
||||
package = mkApplication {
|
||||
venv = pythonSet.mkVirtualEnv "sshuttle" workspace.deps.default;
|
||||
package = pythonSet.sshuttle;
|
||||
};
|
||||
in
|
||||
{
|
||||
packages = {
|
||||
sshuttle = package;
|
||||
default = package;
|
||||
};
|
||||
devShells.default = pkgs.mkShell {
|
||||
packages = [
|
||||
pkgs.uv
|
||||
];
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
57
pyproject.toml
Normal file
57
pyproject.toml
Normal file
@ -0,0 +1,57 @@
|
||||
[project]
|
||||
authors = [
|
||||
{name = "Brian May", email = "brian@linuxpenguins.xyz"},
|
||||
]
|
||||
license = {text = "LGPL-2.1"}
|
||||
requires-python = "<4.0,>=3.9"
|
||||
dependencies = []
|
||||
name = "sshuttle"
|
||||
version = "1.3.1"
|
||||
description = "Transparent proxy server that works as a poor man's VPN. Forwards over ssh. Doesn't require admin. Works with Linux and MacOS. Supports DNS tunneling."
|
||||
readme = "README.rst"
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: End Users/Desktop",
|
||||
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Topic :: System :: Networking",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
sshuttle = "sshuttle.cmdline:main"
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest<9.0.0,>=8.0.1",
|
||||
"pytest-cov<7.0,>=4.1",
|
||||
"flake8<8.0.0,>=7.0.0",
|
||||
"pyflakes<4.0.0,>=3.2.0",
|
||||
"bump2version<2.0.0,>=1.0.1",
|
||||
"twine<7,>=5",
|
||||
"black>=25.1.0",
|
||||
"jedi-language-server>=0.44.0",
|
||||
"pylsp-mypy>=0.7.0",
|
||||
"python-lsp-server>=1.12.2",
|
||||
"ruff>=0.11.2",
|
||||
]
|
||||
docs = [
|
||||
"sphinx==8.1.3; python_version ~= \"3.10\"",
|
||||
"furo==2024.8.6",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
default-groups = []
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.sdist]
|
||||
exclude = [
|
||||
"/.jj"
|
||||
]
|
15
run
Executable file
15
run
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
export PYTHONPATH="$(dirname "$0"):$PYTHONPATH"
|
||||
export PATH="$(dirname "$0")/bin:$PATH"
|
||||
|
||||
python_best_version() {
|
||||
if [ -x "$(command -v python3)" ] &&
|
||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
||||
exec python3 "$@"
|
||||
else
|
||||
exec python "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
python_best_version -m "sshuttle" "$@"
|
39
scripts/Containerfile
Normal file
39
scripts/Containerfile
Normal file
@ -0,0 +1,39 @@
|
||||
# https://hub.docker.com/r/linuxserver/openssh-server/
|
||||
ARG BASE_IMAGE=docker.io/linuxserver/openssh-server:version-9.3_p2-r1
|
||||
|
||||
FROM ${BASE_IMAGE} as pyenv
|
||||
|
||||
# https://github.com/pyenv/pyenv/wiki#suggested-build-environment
|
||||
RUN apk add --no-cache build-base git libffi-dev openssl-dev bzip2-dev zlib-dev readline-dev sqlite-dev
|
||||
ENV PYENV_ROOT=/pyenv
|
||||
RUN curl https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
|
||||
RUN /pyenv/bin/pyenv install 3.10
|
||||
RUN /pyenv/bin/pyenv install 3.11
|
||||
RUN /pyenv/bin/pyenv install 3.12
|
||||
RUN bash -xc 'rm -rf /pyenv/{.git,plugins} /pyenv/versions/*/lib/*/{test,config,config-*linux-gnu}' && \
|
||||
find /pyenv -type d -name __pycache__ -exec rm -rf {} + && \
|
||||
find /pyenv -type f -name '*.py[co]' -delete
|
||||
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
RUN apk add --no-cache bash nginx iperf3
|
||||
|
||||
# pyenv setup
|
||||
ENV PYENV_ROOT=/pyenv
|
||||
ENV PATH=/pyenv/shims:/pyenv/bin:$PATH
|
||||
COPY --from=pyenv /pyenv /pyenv
|
||||
|
||||
# OpenSSH Server variables
|
||||
ENV PUID=1000
|
||||
ENV PGID=1000
|
||||
ENV PASSWORD_ACCESS=true
|
||||
ENV USER_NAME=test
|
||||
ENV USER_PASSWORD=test
|
||||
ENV LOG_STDOUT=true
|
||||
|
||||
# suppress linuxserver.io logo printing, chnage sshd config
|
||||
RUN sed -i '1 a exec &>/dev/null' /etc/s6-overlay/s6-rc.d/init-adduser/run
|
||||
|
||||
# https://www.linuxserver.io/blog/2019-09-14-customizing-our-containers
|
||||
# To customize the container and start other components
|
||||
COPY container.setup.sh /custom-cont-init.d/setup.sh
|
21
scripts/README.md
Normal file
21
scripts/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Container based test bed for sshuttle
|
||||
|
||||
```bash
|
||||
test-bed up -d # start containers
|
||||
|
||||
exec-sshuttle <node-id> [--copy-id] [--server-py=2.7|3.10] [--client-py=2.7|3.10] [--sshuttle-bin=/path/to/sshuttle] [sshuttle-args...]
|
||||
# --copy-id -> optionally do ssh-copy-id to make it passwordless for future runs
|
||||
# --sshuttle-bin -> use another sshuttle binary instead of one from dev setup
|
||||
# --server-py -> Python version to use in server. (manged by pyenv)
|
||||
# --client-py -> Python version to use in client (manged by pyenv)
|
||||
|
||||
exec-sshuttle node-1 # start sshuttle to connect to node-1
|
||||
|
||||
exec-tool curl node-1 # curl to nginx instance running on node1 via IP that is only reachable via sshuttle
|
||||
exec-tool iperf3 node-1 # measure throughput to node-1
|
||||
|
||||
run-benchmark node-1 --client-py=3.10
|
||||
|
||||
```
|
||||
|
||||
<https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows>
|
34
scripts/compose.yml
Normal file
34
scripts/compose.yml
Normal file
@ -0,0 +1,34 @@
|
||||
name: sshuttle-testbed
|
||||
|
||||
services:
|
||||
node-1:
|
||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
||||
container_name: sshuttle-testbed-node-1
|
||||
hostname: node-1
|
||||
cap_add:
|
||||
- "NET_ADMIN"
|
||||
environment:
|
||||
- ADD_IP_ADDRESSES=10.55.1.77/24
|
||||
networks:
|
||||
default:
|
||||
ipv6_address: 2001:0DB8::551
|
||||
node-2:
|
||||
image: ghcr.io/sshuttle/sshuttle-testbed
|
||||
container_name: sshuttle-testbed-node-2
|
||||
hostname: node-2
|
||||
cap_add:
|
||||
- "NET_ADMIN"
|
||||
environment:
|
||||
- ADD_IP_ADDRESSES=10.55.2.77/32
|
||||
networks:
|
||||
default:
|
||||
ipv6_address: 2001:0DB8::552
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
enable_ipv6: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 2001:0DB8::/112
|
||||
# internal: true
|
65
scripts/container.setup.sh
Executable file
65
scripts/container.setup.sh
Executable file
@ -0,0 +1,65 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
log ">>> Setting up $(hostname) | id: $(id)\nIP:\n$(ip a)\nRoutes:\n$(ip r)\npyenv:\n$(pyenv versions)"
|
||||
|
||||
echo "
|
||||
AcceptEnv PYENV_VERSION
|
||||
" >> /etc/ssh/sshd_config
|
||||
|
||||
iface="$(ip route | awk '/default/ { print $5 }')"
|
||||
default_gw="$(ip route | awk '/default/ { print $3 }')"
|
||||
for addr in ${ADD_IP_ADDRESSES//,/ }; do
|
||||
log ">>> Adding $addr to interface $iface"
|
||||
net_addr=$(ipcalc -n "$addr" | awk -F= '{print $2}')
|
||||
with_set_x ip addr add "$addr" dev "$iface"
|
||||
with_set_x ip route add "$net_addr" via "$default_gw" dev "$iface" # so that sshuttle -N can discover routes
|
||||
done
|
||||
|
||||
log ">>> Starting iperf3 server"
|
||||
iperf3 --server --port 5001 &
|
||||
|
||||
mkdir -p /www
|
||||
echo "<h5>Hello from $(hostname)</h5>
|
||||
<pre>
|
||||
<u>ip address</u>
|
||||
$(ip address)
|
||||
<u>ip route</u>
|
||||
$(ip route)
|
||||
</pre>" >/www/index.html
|
||||
echo "
|
||||
daemon off;
|
||||
worker_processes 1;
|
||||
error_log /dev/stdout info;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
server {
|
||||
access_log /dev/stdout;
|
||||
listen 8080 default_server;
|
||||
listen [::]:8080 default_server;
|
||||
root /www;
|
||||
}
|
||||
}" >/etc/nginx/nginx.conf
|
||||
|
||||
log ">>> Starting nginx"
|
||||
nginx &
|
159
scripts/exec-sshuttle
Executable file
159
scripts/exec-sshuttle
Executable file
@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export MSYS_NO_PATHCONV=1
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
ssh_cmd='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
|
||||
ssh_copy_id=false
|
||||
args=()
|
||||
subnet_args=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg=$1
|
||||
shift
|
||||
case "$arg" in
|
||||
-v|-vv*)
|
||||
ssh_cmd+=" -v"
|
||||
args+=("$arg")
|
||||
;;
|
||||
-r)
|
||||
args+=("-r" "$1")
|
||||
shift
|
||||
;;
|
||||
--copy-id)
|
||||
ssh_copy_id=true
|
||||
;;
|
||||
--server-py=*)
|
||||
server_pyenv_ver="${arg#*=}"
|
||||
;;
|
||||
--client-py=*)
|
||||
client_pyenv_ver="${arg#*=}"
|
||||
;;
|
||||
-6)
|
||||
ipv6_only=true
|
||||
;;
|
||||
--sshuttle-bin=*)
|
||||
sshuttle_bin="${arg#*=}"
|
||||
;;
|
||||
-N|*/*)
|
||||
subnet_args+=("$arg")
|
||||
;;
|
||||
-*)
|
||||
args+=("$arg")
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$target" ]]; then
|
||||
target=$arg
|
||||
else
|
||||
args+=("$arg")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [[ ${#subnet_args[@]} -eq 0 ]]; then
|
||||
subnet_args=("-N")
|
||||
fi
|
||||
|
||||
if [[ $target == node-* ]]; then
|
||||
log "Target is a a test-bed node"
|
||||
port="2222"
|
||||
user_part="test:test"
|
||||
host=$("$(dirname "$0")/test-bed" get-ip "$target")
|
||||
index=${target#node-}
|
||||
if [[ $ipv6_only == true ]]; then
|
||||
args+=("2001:0DB8::/112")
|
||||
else
|
||||
args+=("10.55.$index.0/24")
|
||||
fi
|
||||
target="$user_part@$host:$port"
|
||||
if ! command -v sshpass >/dev/null; then
|
||||
log "sshpass is not found. You might have to manually enter ssh password: 'test'"
|
||||
fi
|
||||
if [[ -z $server_pyenv_ver ]]; then
|
||||
log "server-py argumwnt is not specified. Setting it to 3.8"
|
||||
server_pyenv_ver="3.8"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n $server_pyenv_ver ]]; then
|
||||
log "Would pass PYENV_VERRSION=$server_pyenv_ver to server. pyenv is required on server to make it work"
|
||||
pycmd="/pyenv/shims/python"
|
||||
ssh_cmd+=" -o SetEnv=PYENV_VERSION=${server_pyenv_ver:-'3'}"
|
||||
args=("--python=$pycmd" "${args[@]}")
|
||||
fi
|
||||
|
||||
if [[ $ssh_copy_id == true ]]; then
|
||||
log "Trying to make it passwordless"
|
||||
if [[ $target == *@* ]]; then
|
||||
user_part="${target%%@*}"
|
||||
host_part="${target#*@}"
|
||||
else
|
||||
user_part="$(whoami)"
|
||||
host_part="$target"
|
||||
fi
|
||||
if [[ $host_part == *:* ]]; then
|
||||
host="${host_part%:*}"
|
||||
port="${host_part#*:}"
|
||||
else
|
||||
host="$host_part"
|
||||
port="22"
|
||||
fi
|
||||
if [[ $user_part == *:* ]]; then
|
||||
user="${user_part%:*}"
|
||||
password="${user_part#*:}"
|
||||
else
|
||||
user="$user_part"
|
||||
password=""
|
||||
fi
|
||||
cmd=(ssh-copy-id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p "$port" "$user@$host")
|
||||
if [[ -n $password ]] && command -v sshpass >/dev/null; then
|
||||
cmd=(sshpass -p "$password" "${cmd[@]}")
|
||||
fi
|
||||
with_set_x "${cmd[@]}"
|
||||
fi
|
||||
|
||||
if [[ -z $sshuttle_bin || "$sshuttle_bin" == dev ]]; then
|
||||
cd "$(dirname "$0")/.."
|
||||
export PYTHONPATH="."
|
||||
if [[ -n $client_pyenv_ver ]]; then
|
||||
log "Using pyenv version: $client_pyenv_ver"
|
||||
command -v pyenv &>/dev/null || log "You have to install pyenv to use --client-py" && exit 1
|
||||
sshuttle_cmd=(/usr/bin/env PYENV_VERSION="$client_pyenv_ver" pyenv exec python -m sshuttle)
|
||||
else
|
||||
log "Using best python version availble"
|
||||
if [ -x "$(command -v python3)" ] &&
|
||||
python3 -c "import sys; sys.exit(not sys.version_info > (3, 5))"; then
|
||||
sshuttle_cmd=(python3 -m sshuttle)
|
||||
else
|
||||
sshuttle_cmd=(python -m sshuttle)
|
||||
fi
|
||||
fi
|
||||
else
|
||||
[[ -n $client_pyenv_ver ]] && log "Can't specify --client-py when --sshuttle-bin is specified" && exit 1
|
||||
sshuttle_cmd=("$sshuttle_bin")
|
||||
fi
|
||||
|
||||
if [[ " ${args[*]} " != *" --ssh-cmd "* ]]; then
|
||||
args=("--ssh-cmd" "$ssh_cmd" "${args[@]}")
|
||||
fi
|
||||
|
||||
if [[ " ${args[*]} " != *" -r "* ]]; then
|
||||
args=("-r" "$target" "${args[@]}")
|
||||
fi
|
||||
|
||||
set -x
|
||||
"${sshuttle_cmd[@]}" --version
|
||||
exec "${sshuttle_cmd[@]}" "${args[@]}" "${subnet_args[@]}"
|
86
scripts/exec-tool
Executable file
86
scripts/exec-tool
Executable file
@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
|
||||
args=()
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg=$1
|
||||
shift
|
||||
case "$arg" in
|
||||
-6)
|
||||
ipv6_only=true
|
||||
continue
|
||||
;;
|
||||
-*) ;;
|
||||
*)
|
||||
if [[ -z $tool ]]; then
|
||||
tool=$arg
|
||||
continue
|
||||
elif [[ -z $node ]]; then
|
||||
node=$arg
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
args+=("$arg")
|
||||
done
|
||||
|
||||
tool=${tool?:"tool argument missing. should be one of iperf3,ping,curl,ab"}
|
||||
node=${node?:"node argument missing. should be 'node-1' , 'node-2' etc"}
|
||||
|
||||
if [[ $node == node-* ]]; then
|
||||
index=${node#node-}
|
||||
if [[ $ipv6_only == true ]]; then
|
||||
host="2001:0DB8::55$index"
|
||||
else
|
||||
host="10.55.$index.77"
|
||||
fi
|
||||
else
|
||||
host=$node
|
||||
fi
|
||||
|
||||
connect_timeout_sec=3
|
||||
|
||||
case "$tool" in
|
||||
ping)
|
||||
with_set_x exec ping -W $connect_timeout_sec "${args[@]}" "$host"
|
||||
;;
|
||||
iperf3)
|
||||
port=5001
|
||||
with_set_x exec iperf3 --client "$host" --port=$port --connect-timeout=$((connect_timeout_sec * 1000)) "${args[@]}"
|
||||
;;
|
||||
curl)
|
||||
port=8080
|
||||
if [[ $host = *:* ]]; then
|
||||
host="[$host]"
|
||||
args+=(--ipv6)
|
||||
fi
|
||||
with_set_x exec curl "http://$host:$port/" -v --connect-timeout $connect_timeout_sec "${args[@]}"
|
||||
;;
|
||||
ab)
|
||||
port=8080
|
||||
if [[ " ${args[*]}" != *" -n "* && " ${args[*]}" != *" -c "* ]]; then
|
||||
args+=(-n 500 -c 50 "${args[@]}")
|
||||
fi
|
||||
with_set_x exec ab -s $connect_timeout_sec "${args[@]}" "http://$host:$port/"
|
||||
;;
|
||||
*)
|
||||
log "Unknown tool: $tool"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
40
scripts/run-benchmark
Executable file
40
scripts/run-benchmark
Executable file
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function log() {
|
||||
echo "$*" >&2
|
||||
}
|
||||
|
||||
./test-bed up -d
|
||||
|
||||
benchmark() {
|
||||
log -e "\n======== Benchmarking sshuttle | Args: [$*] ========"
|
||||
local node=$1
|
||||
shift
|
||||
with_set_x ./exec-sshuttle "$node" --listen 55771 "$@" &
|
||||
sshuttle_pid=$!
|
||||
trap 'kill -0 $sshuttle_pid &>/dev/null && kill -15 $sshuttle_pid' EXIT
|
||||
while ! nc -z localhost 55771; do sleep 0.1; done
|
||||
sleep 1
|
||||
./exec-tool iperf3 "$node" --time=4
|
||||
with_set_x kill -15 $sshuttle_pid
|
||||
wait $sshuttle_pid || true
|
||||
}
|
||||
|
||||
if [[ $# -gt 0 ]]; then
|
||||
benchmark "${@}"
|
||||
else
|
||||
benchmark node-1 --sshuttle-bin="${SSHUTTLE_BIN:-sshuttle}"
|
||||
benchmark node-1 --sshuttle-bin=dev
|
||||
fi
|
9
scripts/run-checks
Executable file
9
scripts/run-checks
Executable file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export PYTHONPATH=.
|
||||
|
||||
set -x
|
||||
python -m flake8 sshuttle tests
|
||||
python -m pytest .
|
42
scripts/test-bed
Executable file
42
scripts/test-bed
Executable file
@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [[ -z $1 || $1 = -* ]]; then
|
||||
set -- up "$@"
|
||||
fi
|
||||
|
||||
function with_set_x() {
|
||||
set -x
|
||||
"$@"
|
||||
{
|
||||
ec=$?
|
||||
set +x
|
||||
return $ec
|
||||
} 2>/dev/null
|
||||
}
|
||||
|
||||
function build() {
|
||||
# podman build -t ghcr.io/sshuttle/sshuttle-testbed .
|
||||
with_set_x docker build --progress=plain -t ghcr.io/sshuttle/sshuttle-testbed -f Containerfile .
|
||||
}
|
||||
|
||||
function compose() {
|
||||
# podman-compose "$@"
|
||||
with_set_x docker compose "$@"
|
||||
}
|
||||
|
||||
function get-ip() {
|
||||
local container_name=sshuttle-testbed-"$1"
|
||||
docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$container_name"
|
||||
}
|
||||
|
||||
if [[ $1 == get-ip ]]; then
|
||||
shift
|
||||
get-ip "$@"
|
||||
else
|
||||
if [[ $* = *--build* ]]; then
|
||||
build
|
||||
fi
|
||||
compose "$@"
|
||||
fi
|
30
setup.cfg
Normal file
30
setup.cfg
Normal file
@ -0,0 +1,30 @@
|
||||
[bumpversion]
|
||||
current_version = 1.3.1
|
||||
|
||||
[bumpversion:file:setup.py]
|
||||
|
||||
[bumpversion:file:pyproject.toml]
|
||||
|
||||
[bumpversion:file:sshuttle/version.py]
|
||||
|
||||
[aliases]
|
||||
test = pytest
|
||||
|
||||
[bdist_wheel]
|
||||
universal = 1
|
||||
|
||||
[upload]
|
||||
sign = true
|
||||
identity = 0x1784577F811F6EAC
|
||||
|
||||
[flake8]
|
||||
count = true
|
||||
show-source = true
|
||||
statistics = true
|
||||
max-line-length = 128
|
||||
|
||||
[pycodestyle]
|
||||
max-line-length = 128
|
||||
|
||||
[tool:pytest]
|
||||
addopts = --cov=sshuttle --cov-branch --cov-report=term-missing
|
1
sshuttle/__init__.py
Normal file
1
sshuttle/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
__version__ = "1.3.1"
|
10
sshuttle/__main__.py
Normal file
10
sshuttle/__main__.py
Normal file
@ -0,0 +1,10 @@
|
||||
"""Coverage.py's main entry point."""
|
||||
import sys
|
||||
import os
|
||||
from sshuttle.cmdline import main
|
||||
from sshuttle.helpers import debug3
|
||||
|
||||
debug3("Start: (pid=%s, ppid=%s) %r" % (os.getpid(), os.getppid(), sys.argv))
|
||||
exit_code = main()
|
||||
debug3("Exit: (pid=%s, ppid=%s, code=%s) cmd %r" % (os.getpid(), os.getppid(), exit_code, sys.argv))
|
||||
sys.exit(exit_code)
|
53
sshuttle/assembler.py
Normal file
53
sshuttle/assembler.py
Normal file
@ -0,0 +1,53 @@
|
||||
import sys
|
||||
import zlib
|
||||
import types
|
||||
import platform
|
||||
|
||||
stdin = stdin # type: typing.BinaryIO # noqa: F821 must be a previously defined global
|
||||
verbosity = verbosity # type: int # noqa: F821 must be a previously defined global
|
||||
if verbosity > 0:
|
||||
sys.stderr.write(' s: Running server on remote host with %s (version %s)\n'
|
||||
% (sys.executable, platform.python_version()))
|
||||
|
||||
z = zlib.decompressobj()
|
||||
|
||||
while 1:
|
||||
name = stdin.readline().strip()
|
||||
if name:
|
||||
# python2 compat: in python2 stdin.readline().strip() -> str
|
||||
# in python3 stdin.readline().strip() -> bytes
|
||||
# (see #481)
|
||||
if sys.version_info >= (3, 0):
|
||||
name = name.decode("ASCII")
|
||||
nbytes = int(stdin.readline())
|
||||
if verbosity >= 2:
|
||||
sys.stderr.write(' s: assembling %r (%d bytes)\n'
|
||||
% (name, nbytes))
|
||||
content = z.decompress(stdin.read(nbytes))
|
||||
|
||||
module = types.ModuleType(name)
|
||||
parents = name.rsplit(".", 1)
|
||||
if len(parents) == 2:
|
||||
parent, parent_name = parents
|
||||
setattr(sys.modules[parent], parent_name, module)
|
||||
|
||||
code = compile(content, name, "exec")
|
||||
exec(code, module.__dict__) # nosec
|
||||
sys.modules[name] = module
|
||||
else:
|
||||
break
|
||||
|
||||
sys.stderr.flush()
|
||||
sys.stdout.flush()
|
||||
|
||||
# import can only happen once the code has been transferred to
|
||||
# the server. 'noqa: E402' excludes these lines from QA checks.
|
||||
import sshuttle.helpers # noqa: E402
|
||||
sshuttle.helpers.verbose = verbosity
|
||||
|
||||
import sshuttle.cmdline_options as options # noqa: E402
|
||||
from sshuttle.server import main # noqa: E402
|
||||
|
||||
main(options.latency_control, options.latency_buffer_size,
|
||||
options.auto_hosts, options.to_nameserver,
|
||||
options.auto_nets)
|
1173
sshuttle/client.py
Normal file
1173
sshuttle/client.py
Normal file
File diff suppressed because it is too large
Load Diff
145
sshuttle/cmdline.py
Normal file
145
sshuttle/cmdline.py
Normal file
@ -0,0 +1,145 @@
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import socket
|
||||
import sys
|
||||
import sshuttle.helpers as helpers
|
||||
import sshuttle.client as client
|
||||
import sshuttle.firewall as firewall
|
||||
import sshuttle.hostwatch as hostwatch
|
||||
import sshuttle.ssyslog as ssyslog
|
||||
from sshuttle.options import parser, parse_ipport
|
||||
from sshuttle.helpers import family_ip_tuple, log, Fatal
|
||||
from sshuttle.sudoers import sudoers
|
||||
from sshuttle.namespace import enter_namespace
|
||||
|
||||
|
||||
def main():
|
||||
if 'SSHUTTLE_ARGS' in os.environ:
|
||||
env_args = shlex.split(os.environ['SSHUTTLE_ARGS'])
|
||||
else:
|
||||
env_args = []
|
||||
args = [*env_args, *sys.argv[1:]]
|
||||
|
||||
opt = parser.parse_args(args)
|
||||
|
||||
if opt.sudoers_no_modify:
|
||||
# sudoers() calls exit() when it completes
|
||||
sudoers(user_name=opt.sudoers_user)
|
||||
|
||||
if opt.daemon:
|
||||
opt.syslog = 1
|
||||
if opt.wrap:
|
||||
import sshuttle.ssnet as ssnet
|
||||
ssnet.MAX_CHANNEL = opt.wrap
|
||||
if opt.latency_buffer_size:
|
||||
import sshuttle.ssnet as ssnet
|
||||
ssnet.LATENCY_BUFFER_SIZE = opt.latency_buffer_size
|
||||
helpers.verbose = opt.verbose
|
||||
|
||||
try:
|
||||
# Since namespace and namespace-pid options are only available
|
||||
# in linux, we must check if it exists with getattr
|
||||
namespace = getattr(opt, 'namespace', None)
|
||||
namespace_pid = getattr(opt, 'namespace_pid', None)
|
||||
if namespace or namespace_pid:
|
||||
prefix = helpers.logprefix
|
||||
helpers.logprefix = 'ns: '
|
||||
enter_namespace(namespace, namespace_pid)
|
||||
helpers.logprefix = prefix
|
||||
|
||||
if opt.firewall:
|
||||
if opt.subnets or opt.subnets_file:
|
||||
parser.error('exactly zero arguments expected')
|
||||
return firewall.main(opt.method, opt.syslog)
|
||||
elif opt.hostwatch:
|
||||
hostwatch.hw_main(opt.subnets, opt.auto_hosts)
|
||||
return 0
|
||||
else:
|
||||
# parse_subnetports() is used to create a list of includes
|
||||
# and excludes. It is called once for each parameter and
|
||||
# returns a list of one or more items for each subnet (it
|
||||
# can return more than one item when a hostname in the
|
||||
# parameter resolves to multiple IP addresses. Here, we
|
||||
# flatten these lists.
|
||||
includes = [item for sublist in opt.subnets+opt.subnets_file
|
||||
for item in sublist]
|
||||
excludes = [item for sublist in opt.exclude for item in sublist]
|
||||
|
||||
if not includes and not opt.auto_nets:
|
||||
parser.error('at least one subnet, subnet file, '
|
||||
'or -N expected')
|
||||
remotename = opt.remote
|
||||
if remotename == '' or remotename == '-':
|
||||
remotename = None
|
||||
nslist = [family_ip_tuple(ns) for ns in opt.ns_hosts]
|
||||
if opt.seed_hosts:
|
||||
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
|
||||
elif opt.auto_hosts:
|
||||
sh = []
|
||||
else:
|
||||
sh = None
|
||||
if opt.listen:
|
||||
ipport_v6 = None
|
||||
ipport_v4 = None
|
||||
lst = opt.listen.split(",")
|
||||
for ip in lst:
|
||||
family, ip, port = parse_ipport(ip)
|
||||
if family == socket.AF_INET6:
|
||||
ipport_v6 = (ip, port)
|
||||
else:
|
||||
ipport_v4 = (ip, port)
|
||||
else:
|
||||
# parse_ipport4('127.0.0.1:0')
|
||||
ipport_v4 = "auto"
|
||||
# parse_ipport6('[::1]:0')
|
||||
ipport_v6 = "auto" if not opt.disable_ipv6 else None
|
||||
try:
|
||||
int(opt.tmark, 16)
|
||||
except ValueError:
|
||||
parser.error("--tmark must be a hexadecimal value")
|
||||
opt.tmark = opt.tmark.lower() # make 'x' in 0x lowercase
|
||||
if not opt.tmark.startswith("0x"): # accept without 0x prefix
|
||||
opt.tmark = "0x%s" % opt.tmark
|
||||
if opt.syslog:
|
||||
ssyslog.start_syslog()
|
||||
ssyslog.close_stdin()
|
||||
ssyslog.stdout_to_syslog()
|
||||
ssyslog.stderr_to_syslog()
|
||||
return_code = client.main(ipport_v6, ipport_v4,
|
||||
opt.ssh_cmd,
|
||||
remotename,
|
||||
opt.python,
|
||||
opt.latency_control,
|
||||
opt.latency_buffer_size,
|
||||
opt.dns,
|
||||
nslist,
|
||||
opt.method,
|
||||
sh,
|
||||
opt.auto_hosts,
|
||||
opt.auto_nets,
|
||||
includes,
|
||||
excludes,
|
||||
opt.daemon,
|
||||
opt.to_ns,
|
||||
opt.pidfile,
|
||||
opt.user,
|
||||
opt.group,
|
||||
opt.sudo_pythonpath,
|
||||
opt.add_cmd_delimiter,
|
||||
opt.remote_shell,
|
||||
opt.tmark)
|
||||
|
||||
if return_code == 0:
|
||||
log('Normal exit code, exiting...')
|
||||
else:
|
||||
log('Abnormal exit code %d detected, failing...' % return_code)
|
||||
return return_code
|
||||
|
||||
except Fatal as e:
|
||||
log('fatal: %s' % e)
|
||||
return 99
|
||||
except KeyboardInterrupt:
|
||||
log('\n')
|
||||
log('Keyboard interrupt: exiting.')
|
||||
return 1
|
428
sshuttle/firewall.py
Normal file
428
sshuttle/firewall.py
Normal file
@ -0,0 +1,428 @@
|
||||
import errno
|
||||
import shutil
|
||||
import socket
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import platform
|
||||
import traceback
|
||||
import subprocess as ssubprocess
|
||||
import base64
|
||||
import io
|
||||
|
||||
import sshuttle.ssyslog as ssyslog
|
||||
import sshuttle.helpers as helpers
|
||||
from sshuttle.helpers import is_admin_user, log, debug1, debug2, debug3, Fatal
|
||||
from sshuttle.methods import get_auto_method, get_method
|
||||
|
||||
if sys.platform == 'win32':
|
||||
HOSTSFILE = r"C:\Windows\System32\drivers\etc\hosts"
|
||||
else:
|
||||
HOSTSFILE = '/etc/hosts'
|
||||
sshuttle_pid = None
|
||||
|
||||
|
||||
def rewrite_etc_hosts(hostmap, port):
|
||||
BAKFILE = '%s.sbak' % HOSTSFILE
|
||||
APPEND = '# sshuttle-firewall-%d AUTOCREATED' % port
|
||||
old_content = ''
|
||||
st = None
|
||||
try:
|
||||
old_content = open(HOSTSFILE).read()
|
||||
st = os.stat(HOSTSFILE)
|
||||
except IOError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if old_content.strip() and not os.path.exists(BAKFILE):
|
||||
try:
|
||||
os.link(HOSTSFILE, BAKFILE)
|
||||
except OSError:
|
||||
# file is locked - performing non-atomic copy
|
||||
shutil.copyfile(HOSTSFILE, BAKFILE)
|
||||
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
|
||||
f = open(tmpname, 'w')
|
||||
for line in old_content.rstrip().split('\n'):
|
||||
if line.find(APPEND) >= 0:
|
||||
continue
|
||||
f.write('%s\n' % line)
|
||||
for (name, ip) in sorted(hostmap.items()):
|
||||
f.write('%-30s %s\n' % ('%s %s' % (ip, name), APPEND))
|
||||
f.close()
|
||||
|
||||
if sys.platform != 'win32':
|
||||
if st is not None:
|
||||
os.chown(tmpname, st.st_uid, st.st_gid)
|
||||
os.chmod(tmpname, st.st_mode)
|
||||
else:
|
||||
os.chown(tmpname, 0, 0)
|
||||
os.chmod(tmpname, 0o644)
|
||||
try:
|
||||
os.rename(tmpname, HOSTSFILE)
|
||||
except OSError:
|
||||
# file is locked - performing non-atomic copy
|
||||
log('Warning: Using a non-atomic way to overwrite %s that can corrupt the file if '
|
||||
'multiple processes write to it simultaneously.' % HOSTSFILE)
|
||||
shutil.move(tmpname, HOSTSFILE)
|
||||
|
||||
|
||||
def restore_etc_hosts(hostmap, port):
|
||||
# Only restore if we added hosts to /etc/hosts previously.
|
||||
if len(hostmap) > 0:
|
||||
debug2('undoing /etc/hosts changes.')
|
||||
rewrite_etc_hosts({}, port)
|
||||
|
||||
|
||||
def firewall_exit(signum, frame):
|
||||
# The typical sshuttle exit is that the main sshuttle process
|
||||
# exits, closes file descriptors it uses, and the firewall process
|
||||
# notices that it can't read from stdin anymore and exits
|
||||
# (cleaning up firewall rules).
|
||||
#
|
||||
# However, in some cases, Ctrl+C might get sent to the firewall
|
||||
# process. This might caused if someone manually tries to kill the
|
||||
# firewall process, or if sshuttle was started using sudo's use_pty option
|
||||
# and they try to exit by pressing Ctrl+C. Here, we forward the
|
||||
# Ctrl+C/SIGINT to the main sshuttle process which should trigger
|
||||
# the typical exit process as described above.
|
||||
global sshuttle_pid
|
||||
if sshuttle_pid:
|
||||
debug1("Relaying interupt signal to sshuttle process %d" % sshuttle_pid)
|
||||
if sys.platform == 'win32':
|
||||
sig = signal.CTRL_C_EVENT
|
||||
else:
|
||||
sig = signal.SIGINT
|
||||
os.kill(sshuttle_pid, sig)
|
||||
|
||||
|
||||
def _setup_daemon_for_unix_like():
|
||||
if not is_admin_user():
|
||||
raise Fatal('You must have root privileges (or enable su/sudo) to set the firewall')
|
||||
|
||||
# don't disappear if our controlling terminal or stdout/stderr
|
||||
# disappears; we still have to clean up.
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, firewall_exit)
|
||||
signal.signal(signal.SIGINT, firewall_exit)
|
||||
|
||||
# Calling setsid() here isn't strictly necessary. However, it forces
|
||||
# Ctrl+C to get sent to the main sshuttle process instead of to
|
||||
# the firewall process---which is our preferred way to shutdown.
|
||||
# Nonetheless, if the firewall process receives a SIGTERM/SIGINT
|
||||
# signal, it will relay a SIGINT to the main sshuttle process
|
||||
# automatically.
|
||||
try:
|
||||
os.setsid()
|
||||
except OSError:
|
||||
# setsid() fails if sudo is configured with the use_pty option.
|
||||
pass
|
||||
|
||||
return sys.stdin.buffer, sys.stdout.buffer
|
||||
|
||||
|
||||
def _setup_daemon_for_windows():
|
||||
if not is_admin_user():
|
||||
raise Fatal('You must be administrator to set the firewall')
|
||||
|
||||
signal.signal(signal.SIGTERM, firewall_exit)
|
||||
signal.signal(signal.SIGINT, firewall_exit)
|
||||
|
||||
com_chan = os.environ.get('SSHUTTLE_FW_COM_CHANNEL')
|
||||
if com_chan == 'stdio':
|
||||
debug3('Using inherited stdio for communicating with sshuttle client process')
|
||||
else:
|
||||
debug3('Using shared socket for communicating with sshuttle client process')
|
||||
socket_share_data = base64.b64decode(com_chan)
|
||||
sock = socket.fromshare(socket_share_data) # type: socket.socket
|
||||
sys.stdin = io.TextIOWrapper(sock.makefile('rb', buffering=0))
|
||||
sys.stdout = io.TextIOWrapper(sock.makefile('wb', buffering=0), write_through=True)
|
||||
sock.close()
|
||||
return sys.stdin.buffer, sys.stdout.buffer
|
||||
|
||||
|
||||
# Isolate function that needs to be replaced for tests
|
||||
if sys.platform == 'win32':
|
||||
setup_daemon = _setup_daemon_for_windows
|
||||
else:
|
||||
setup_daemon = _setup_daemon_for_unix_like
|
||||
|
||||
|
||||
# Note that we're sorting in a very particular order:
|
||||
# we need to go from smaller, more specific, port ranges, to larger,
|
||||
# less-specific, port ranges. At each level, we order by subnet
|
||||
# width, from most-specific subnets (largest swidth) to
|
||||
# least-specific. On ties, excludes come first.
|
||||
# s:(inet, subnet width, exclude flag, subnet, first port, last port)
|
||||
def subnet_weight(s):
|
||||
return (-s[-1] + (s[-2] or -65535), s[1], s[2])
|
||||
|
||||
|
||||
def flush_systemd_dns_cache():
|
||||
# If the user is using systemd-resolve for DNS resolution, it is
|
||||
# possible for the request to go through systemd-resolve before we
|
||||
# see it...and it may use a cached result instead of sending a
|
||||
# request that we can intercept. When sshuttle starts and stops,
|
||||
# this means that we should clear the cache!
|
||||
#
|
||||
# The command to do this was named systemd-resolve, but changed to
|
||||
# resolvectl in systemd 239.
|
||||
# https://github.com/systemd/systemd/blob/f8eb41003df1a4eab59ff9bec67b2787c9368dbd/NEWS#L3816
|
||||
|
||||
p = None
|
||||
if helpers.which("resolvectl"):
|
||||
debug2("Flushing systemd's DNS resolver cache: "
|
||||
"resolvectl flush-caches")
|
||||
p = ssubprocess.Popen(["resolvectl", "flush-caches"],
|
||||
stdout=ssubprocess.PIPE, env=helpers.get_env())
|
||||
elif helpers.which("systemd-resolve"):
|
||||
debug2("Flushing systemd's DNS resolver cache: "
|
||||
"systemd-resolve --flush-caches")
|
||||
p = ssubprocess.Popen(["systemd-resolve", "--flush-caches"],
|
||||
stdout=ssubprocess.PIPE, env=helpers.get_env())
|
||||
|
||||
if p:
|
||||
# Wait so flush is finished and process doesn't show up as defunct.
|
||||
rv = p.wait()
|
||||
if rv != 0:
|
||||
log("Received non-zero return code %d when flushing DNS resolver "
|
||||
"cache." % rv)
|
||||
|
||||
|
||||
# This is some voodoo for setting up the kernel's transparent
|
||||
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
|
||||
# otherwise we delete it, then make them from scratch.
|
||||
#
|
||||
# This code is supposed to clean up after itself by deleting its rules on
|
||||
# exit. In case that fails, it's not the end of the world; future runs will
|
||||
# supersede it in the transproxy list, at least, so the leftover rules
|
||||
# are hopefully harmless.
|
||||
def main(method_name, syslog):
|
||||
helpers.logprefix = 'fw: '
|
||||
stdin, stdout = setup_daemon()
|
||||
hostmap = {}
|
||||
debug1('Starting firewall with Python version %s'
|
||||
% platform.python_version())
|
||||
|
||||
if method_name == "auto":
|
||||
method = get_auto_method()
|
||||
else:
|
||||
method = get_method(method_name)
|
||||
|
||||
if syslog:
|
||||
ssyslog.start_syslog()
|
||||
ssyslog.stderr_to_syslog()
|
||||
|
||||
if not method.is_supported():
|
||||
raise Fatal("The %s method is not supported on this machine. "
|
||||
"Check that the appropriate programs are in your "
|
||||
"PATH." % method_name)
|
||||
|
||||
debug1('ready method name %s.' % method.name)
|
||||
stdout.write(('READY %s\n' % method.name).encode('ASCII'))
|
||||
stdout.flush()
|
||||
|
||||
def _read_next_string_line():
|
||||
try:
|
||||
line = stdin.readline(128)
|
||||
if not line:
|
||||
return # parent probably exited
|
||||
return line.decode('ASCII').strip()
|
||||
except IOError as e:
|
||||
# On windows, ConnectionResetError is thrown when parent process closes it's socket pair end
|
||||
debug3('read from stdin failed: %s' % (e,))
|
||||
return
|
||||
# we wait until we get some input before creating the rules. That way,
|
||||
# sshuttle can launch us as early as possible (and get sudo password
|
||||
# authentication as early in the startup process as possible).
|
||||
try:
|
||||
line = _read_next_string_line()
|
||||
if not line:
|
||||
return # parent probably exited
|
||||
except IOError as e:
|
||||
# On windows, ConnectionResetError is thrown when parent process closes it's socket pair end
|
||||
debug3('read from stdin failed: %s' % (e,))
|
||||
return
|
||||
|
||||
subnets = []
|
||||
if line != 'ROUTES':
|
||||
raise Fatal('expected ROUTES but got %r' % line)
|
||||
while 1:
|
||||
line = _read_next_string_line()
|
||||
if not line:
|
||||
raise Fatal('expected route but got %r' % line)
|
||||
elif line.startswith("NSLIST"):
|
||||
break
|
||||
try:
|
||||
(family, width, exclude, ip, fport, lport) = line.split(',', 5)
|
||||
except Exception:
|
||||
raise Fatal('expected route or NSLIST but got %r' % line)
|
||||
subnets.append((
|
||||
int(family),
|
||||
int(width),
|
||||
bool(int(exclude)),
|
||||
ip,
|
||||
int(fport),
|
||||
int(lport)))
|
||||
debug2('Got subnets: %r' % subnets)
|
||||
|
||||
nslist = []
|
||||
if line != 'NSLIST':
|
||||
raise Fatal('expected NSLIST but got %r' % line)
|
||||
while 1:
|
||||
line = _read_next_string_line()
|
||||
if not line:
|
||||
raise Fatal('expected nslist but got %r' % line)
|
||||
elif line.startswith("PORTS "):
|
||||
break
|
||||
try:
|
||||
(family, ip) = line.split(',', 1)
|
||||
except Exception:
|
||||
raise Fatal('expected nslist or PORTS but got %r' % line)
|
||||
nslist.append((int(family), ip))
|
||||
debug2('Got partial nslist: %r' % nslist)
|
||||
debug2('Got nslist: %r' % nslist)
|
||||
|
||||
if not line.startswith('PORTS '):
|
||||
raise Fatal('expected PORTS but got %r' % line)
|
||||
_, _, ports = line.partition(" ")
|
||||
ports = ports.split(",")
|
||||
if len(ports) != 4:
|
||||
raise Fatal('expected 4 ports but got %d' % len(ports))
|
||||
port_v6 = int(ports[0])
|
||||
port_v4 = int(ports[1])
|
||||
dnsport_v6 = int(ports[2])
|
||||
dnsport_v4 = int(ports[3])
|
||||
|
||||
assert port_v6 >= 0
|
||||
assert port_v6 <= 65535
|
||||
assert port_v4 >= 0
|
||||
assert port_v4 <= 65535
|
||||
assert dnsport_v6 >= 0
|
||||
assert dnsport_v6 <= 65535
|
||||
assert dnsport_v4 >= 0
|
||||
assert dnsport_v4 <= 65535
|
||||
|
||||
debug2('Got ports: %d,%d,%d,%d'
|
||||
% (port_v6, port_v4, dnsport_v6, dnsport_v4))
|
||||
|
||||
line = _read_next_string_line()
|
||||
if not line or not line.startswith("GO "):
|
||||
raise Fatal('expected GO but got %r' % line)
|
||||
|
||||
_, _, args = line.partition(" ")
|
||||
global sshuttle_pid
|
||||
udp, user, group, tmark, sshuttle_pid = args.split(" ", 4)
|
||||
udp = bool(int(udp))
|
||||
sshuttle_pid = int(sshuttle_pid)
|
||||
if user == '-':
|
||||
user = None
|
||||
if group == '-':
|
||||
group = None
|
||||
debug2('Got udp: %r, user: %r, group: %r, tmark: %s, sshuttle_pid: %d' %
|
||||
(udp, user, group, tmark, sshuttle_pid))
|
||||
|
||||
subnets_v6 = [i for i in subnets if i[0] == socket.AF_INET6]
|
||||
nslist_v6 = [i for i in nslist if i[0] == socket.AF_INET6]
|
||||
subnets_v4 = [i for i in subnets if i[0] == socket.AF_INET]
|
||||
nslist_v4 = [i for i in nslist if i[0] == socket.AF_INET]
|
||||
|
||||
try:
|
||||
debug1('setting up.')
|
||||
|
||||
if subnets_v6 or nslist_v6:
|
||||
debug2('setting up IPv6.')
|
||||
method.setup_firewall(
|
||||
port_v6, dnsport_v6, nslist_v6,
|
||||
socket.AF_INET6, subnets_v6, udp,
|
||||
user, group, tmark)
|
||||
|
||||
if subnets_v4 or nslist_v4:
|
||||
debug2('setting up IPv4.')
|
||||
method.setup_firewall(
|
||||
port_v4, dnsport_v4, nslist_v4,
|
||||
socket.AF_INET, subnets_v4, udp,
|
||||
user, group, tmark)
|
||||
|
||||
try:
|
||||
# For some methods (eg: windivert) firewall setup will be differed / will run asynchronously.
|
||||
# Such method implements wait_for_firewall_ready() to wait until firewall is up and running.
|
||||
method.wait_for_firewall_ready(sshuttle_pid)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
if sys.platform == 'linux':
|
||||
flush_systemd_dns_cache()
|
||||
|
||||
try:
|
||||
stdout.write(b'STARTED\n')
|
||||
stdout.flush()
|
||||
except IOError as e: # the parent process probably died
|
||||
debug3('write to stdout failed: %s' % (e,))
|
||||
return
|
||||
|
||||
# Now we wait until EOF or any other kind of exception. We need
|
||||
# to stay running so that we don't need a *second* password
|
||||
# authentication at shutdown time - that cleanup is important!
|
||||
while 1:
|
||||
line = _read_next_string_line()
|
||||
if not line:
|
||||
return
|
||||
if line.startswith('HOST '):
|
||||
(name, ip) = line[5:].split(',', 1)
|
||||
hostmap[name] = ip
|
||||
debug2('setting up /etc/hosts.')
|
||||
rewrite_etc_hosts(hostmap, port_v6 or port_v4)
|
||||
elif line:
|
||||
if not method.firewall_command(line):
|
||||
raise Fatal('expected command, got %r' % line)
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
try:
|
||||
debug1('undoing changes.')
|
||||
except Exception:
|
||||
debug2('An error occurred, ignoring it.')
|
||||
|
||||
try:
|
||||
if subnets_v6 or nslist_v6:
|
||||
debug2('undoing IPv6 changes.')
|
||||
method.restore_firewall(port_v6, socket.AF_INET6, udp, user, group)
|
||||
except Exception:
|
||||
try:
|
||||
debug1("Error trying to undo IPv6 firewall.")
|
||||
debug1(traceback.format_exc())
|
||||
except Exception:
|
||||
debug2('An error occurred, ignoring it.')
|
||||
|
||||
try:
|
||||
if subnets_v4 or nslist_v4:
|
||||
debug2('undoing IPv4 changes.')
|
||||
method.restore_firewall(port_v4, socket.AF_INET, udp, user, group)
|
||||
except Exception:
|
||||
try:
|
||||
debug1("Error trying to undo IPv4 firewall.")
|
||||
debug1(traceback.format_exc())
|
||||
except Exception:
|
||||
debug2('An error occurred, ignoring it.')
|
||||
|
||||
try:
|
||||
# debug2() message printed in restore_etc_hosts() function.
|
||||
restore_etc_hosts(hostmap, port_v6 or port_v4)
|
||||
except Exception:
|
||||
try:
|
||||
debug1("Error trying to undo /etc/hosts changes.")
|
||||
debug1(traceback.format_exc())
|
||||
except Exception:
|
||||
debug2('An error occurred, ignoring it.')
|
||||
|
||||
if sys.platform == 'linux':
|
||||
try:
|
||||
flush_systemd_dns_cache()
|
||||
except Exception:
|
||||
try:
|
||||
debug1("Error trying to flush systemd dns cache.")
|
||||
debug1(traceback.format_exc())
|
||||
except Exception:
|
||||
debug2("An error occurred, ignoring it.")
|
349
sshuttle/helpers.py
Normal file
349
sshuttle/helpers.py
Normal file
@ -0,0 +1,349 @@
|
||||
import sys
|
||||
import socket
|
||||
import errno
|
||||
import os
|
||||
import threading
|
||||
import subprocess
|
||||
import traceback
|
||||
import re
|
||||
|
||||
if sys.platform != "win32":
|
||||
import fcntl
|
||||
|
||||
logprefix = ''
|
||||
verbose = 0
|
||||
|
||||
|
||||
def b(s):
|
||||
return s.encode("ASCII")
|
||||
|
||||
|
||||
def get_verbose_level():
|
||||
return verbose
|
||||
|
||||
|
||||
def log(s):
|
||||
global logprefix
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except (IOError, ValueError): # ValueError ~ I/O operation on closed file
|
||||
pass
|
||||
try:
|
||||
# Put newline at end of string if line doesn't have one.
|
||||
if not s.endswith("\n"):
|
||||
s = s+"\n"
|
||||
|
||||
prefix = logprefix
|
||||
s = s.rstrip("\n")
|
||||
for line in s.split("\n"):
|
||||
sys.stderr.write(prefix + line + "\n")
|
||||
prefix = " "
|
||||
sys.stderr.flush()
|
||||
except (IOError, ValueError): # ValueError ~ I/O operation on closed file
|
||||
# this could happen if stderr gets forcibly disconnected, eg. because
|
||||
# our tty closes. That sucks, but it's no reason to abort the program.
|
||||
pass
|
||||
|
||||
|
||||
def debug1(s):
|
||||
if verbose >= 1:
|
||||
log(s)
|
||||
|
||||
|
||||
def debug2(s):
|
||||
if verbose >= 2:
|
||||
log(s)
|
||||
|
||||
|
||||
def debug3(s):
|
||||
if verbose >= 3:
|
||||
log(s)
|
||||
|
||||
|
||||
class Fatal(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def resolvconf_nameservers(systemd_resolved):
|
||||
"""Retrieves a list of tuples (address type, address as a string) of
|
||||
the DNS servers used by the system to resolve hostnames.
|
||||
|
||||
If parameter is False, DNS servers are retrieved from only
|
||||
/etc/resolv.conf. This behavior makes sense for the sshuttle
|
||||
server.
|
||||
|
||||
If parameter is True, we retrieve information from both
|
||||
/etc/resolv.conf and /run/systemd/resolve/resolv.conf (if it
|
||||
exists). This behavior makes sense for the sshuttle client.
|
||||
|
||||
"""
|
||||
|
||||
# Historically, we just needed to read /etc/resolv.conf.
|
||||
#
|
||||
# If systemd-resolved is active, /etc/resolv.conf will point to
|
||||
# localhost and the actual DNS servers that systemd-resolved uses
|
||||
# are stored in /run/systemd/resolve/resolv.conf. For programs
|
||||
# that use the localhost DNS server, having sshuttle read
|
||||
# /etc/resolv.conf is sufficient. However, resolved provides other
|
||||
# ways of resolving hostnames (such as via dbus) that may not
|
||||
# route requests through localhost. So, we retrieve a list of DNS
|
||||
# servers that resolved uses so we can intercept those as well.
|
||||
#
|
||||
# For more information about systemd-resolved, see:
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html
|
||||
#
|
||||
# On machines without systemd-resolved, we expect opening the
|
||||
# second file will fail.
|
||||
files = ['/etc/resolv.conf']
|
||||
if systemd_resolved:
|
||||
files += ['/run/systemd/resolve/resolv.conf']
|
||||
|
||||
nsservers = []
|
||||
for f in files:
|
||||
this_file_nsservers = []
|
||||
try:
|
||||
for line in open(f):
|
||||
words = line.lower().split()
|
||||
if len(words) >= 2 and words[0] == 'nameserver':
|
||||
this_file_nsservers.append(family_ip_tuple(words[1]))
|
||||
debug2("Found DNS servers in %s: %s" %
|
||||
(f, [n[1] for n in this_file_nsservers]))
|
||||
nsservers += this_file_nsservers
|
||||
except OSError as e:
|
||||
debug3("Failed to read %s when looking for DNS servers: %s" %
|
||||
(f, e.strerror))
|
||||
|
||||
return nsservers
|
||||
|
||||
|
||||
def windows_nameservers():
|
||||
out = subprocess.check_output(["powershell", "-NonInteractive", "-NoProfile", "-Command", "Get-DnsClientServerAddress"],
|
||||
encoding="utf-8")
|
||||
servers = set()
|
||||
for line in out.splitlines():
|
||||
if line.startswith("Loopback "):
|
||||
continue
|
||||
m = re.search(r'{.+}', line)
|
||||
if not m:
|
||||
continue
|
||||
for s in m.group().strip('{}').split(','):
|
||||
s = s.strip()
|
||||
if s.startswith('fec0:0:0:ffff'):
|
||||
continue
|
||||
servers.add(s)
|
||||
debug2("Found DNS servers: %s" % servers)
|
||||
return [(socket.AF_INET6 if ':' in s else socket.AF_INET, s) for s in servers]
|
||||
|
||||
|
||||
def get_random_nameserver():
|
||||
"""Return a random nameserver selected from servers produced by
|
||||
resolvconf_nameservers()/windows_nameservers()
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
if globals().get('_nameservers') is None:
|
||||
ns_list = windows_nameservers()
|
||||
globals()['_nameservers'] = ns_list
|
||||
else:
|
||||
ns_list = globals()['_nameservers']
|
||||
else:
|
||||
ns_list = resolvconf_nameservers(systemd_resolved=False)
|
||||
if ns_list:
|
||||
if len(ns_list) > 1:
|
||||
# don't import this unless we really need it
|
||||
import random
|
||||
random.shuffle(ns_list)
|
||||
return ns_list[0]
|
||||
else:
|
||||
return (socket.AF_INET, '127.0.0.1')
|
||||
|
||||
|
||||
def islocal(ip, family):
|
||||
sock = socket.socket(family)
|
||||
try:
|
||||
try:
|
||||
sock.bind((ip, 0))
|
||||
except socket.error:
|
||||
_, e = sys.exc_info()[:2]
|
||||
if e.args[0] == errno.EADDRNOTAVAIL:
|
||||
return False # not a local IP
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
sock.close()
|
||||
return True # it's a local IP, or there would have been an error
|
||||
|
||||
|
||||
def family_ip_tuple(ip):
|
||||
if ':' in ip:
|
||||
return (socket.AF_INET6, ip)
|
||||
else:
|
||||
return (socket.AF_INET, ip)
|
||||
|
||||
|
||||
def family_to_string(family):
|
||||
if family == socket.AF_INET6:
|
||||
return "AF_INET6"
|
||||
elif family == socket.AF_INET:
|
||||
return "AF_INET"
|
||||
else:
|
||||
return str(family)
|
||||
|
||||
|
||||
def get_env():
|
||||
"""An environment for sshuttle subprocesses. See get_path()."""
|
||||
env = {
|
||||
'PATH': get_path(),
|
||||
'LC_ALL': "C",
|
||||
}
|
||||
return env
|
||||
|
||||
|
||||
def get_path():
|
||||
"""Returns a string of paths separated by os.pathsep.
|
||||
|
||||
Users might not have all of the programs sshuttle needs in their
|
||||
PATH variable (i.e., some programs might be in /sbin). Use PATH
|
||||
and a hardcoded set of paths to search through. This function is
|
||||
used by our which() and get_env() functions. If which() and the
|
||||
subprocess environments differ, programs that which() finds might
|
||||
not be found at run time (or vice versa).
|
||||
"""
|
||||
path = []
|
||||
if "PATH" in os.environ:
|
||||
path += os.environ["PATH"].split(os.pathsep)
|
||||
# Python default paths.
|
||||
path += os.defpath.split(os.pathsep)
|
||||
# /sbin, etc are not in os.defpath and may not be in PATH either.
|
||||
# /bin/ and /usr/bin below are probably redundant.
|
||||
path += ['/bin', '/usr/bin', '/sbin', '/usr/sbin']
|
||||
|
||||
# Remove duplicates. Not strictly necessary.
|
||||
path_dedup = []
|
||||
for i in path:
|
||||
if i not in path_dedup:
|
||||
path_dedup.append(i)
|
||||
|
||||
return os.pathsep.join(path_dedup)
|
||||
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from shutil import which as _which
|
||||
else:
|
||||
# Although sshuttle does not officially support older versions of
|
||||
# Python, some still run the sshuttle server on remote machines
|
||||
# with old versions of python.
|
||||
def _which(file, mode=os.F_OK | os.X_OK, path=None):
|
||||
if path is not None:
|
||||
search_paths = path.split(os.pathsep)
|
||||
elif "PATH" in os.environ:
|
||||
search_paths = os.environ["PATH"].split(os.pathsep)
|
||||
else:
|
||||
search_paths = os.defpath.split(os.pathsep)
|
||||
|
||||
for p in search_paths:
|
||||
filepath = os.path.join(p, file)
|
||||
if os.path.exists(filepath) and os.access(filepath, mode):
|
||||
return filepath
|
||||
return None
|
||||
|
||||
|
||||
def which(file, mode=os.F_OK | os.X_OK):
|
||||
"""A wrapper around shutil.which() that searches a predictable set of
|
||||
paths and is more verbose about what is happening. See get_path()
|
||||
for more information.
|
||||
"""
|
||||
path = get_path()
|
||||
rv = _which(file, mode, path)
|
||||
if rv:
|
||||
debug2("which() found '%s' at %s" % (file, rv))
|
||||
else:
|
||||
debug2("which() could not find '%s' in %s" % (file, path))
|
||||
return rv
|
||||
|
||||
|
||||
def is_admin_user():
|
||||
if sys.platform == 'win32':
|
||||
# https://stackoverflow.com/questions/130763/request-uac-elevation-from-within-a-python-script/41930586#41930586
|
||||
import ctypes
|
||||
try:
|
||||
return ctypes.windll.shell32.IsUserAnAdmin()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# TODO(nom3ad): for sys.platform == 'linux', check capabilities for non-root users. (CAP_NET_ADMIN might be enough?)
|
||||
return os.getuid() == 0
|
||||
|
||||
|
||||
def set_non_blocking_io(fd):
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
os.set_blocking(fd, False)
|
||||
except AttributeError:
|
||||
# python < 3.5
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags |= os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
else:
|
||||
_sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
|
||||
_sock.setblocking(False)
|
||||
|
||||
|
||||
class RWPair:
|
||||
def __init__(self, r, w):
|
||||
self.r = r
|
||||
self.w = w
|
||||
self.read = r.read
|
||||
self.readline = r.readline
|
||||
self.write = w.write
|
||||
self.flush = w.flush
|
||||
|
||||
def close(self):
|
||||
for f in self.r, self.w:
|
||||
try:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class SocketRWShim:
|
||||
__slots__ = ('_r', '_w', '_on_end', '_s1', '_s2', '_t1', '_t2')
|
||||
|
||||
def __init__(self, r, w, on_end=None):
|
||||
self._r = r
|
||||
self._w = w
|
||||
self._on_end = on_end
|
||||
|
||||
self._s1, self._s2 = socket.socketpair()
|
||||
debug3("[SocketShim] r=%r w=%r | s1=%r s2=%r" % (self._r, self._w, self._s1, self._s2))
|
||||
|
||||
def stream_reader_to_sock():
|
||||
try:
|
||||
for data in iter(lambda: self._r.read(16384), b''):
|
||||
self._s1.sendall(data)
|
||||
# debug3("[SocketRWShim] <<<<< r.read() %d %r..." % (len(data), data[:min(32, len(data))]))
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
finally:
|
||||
debug2("[SocketRWShim] Thread 'stream_reader_to_sock' exiting")
|
||||
self._s1.close()
|
||||
self._on_end and self._on_end()
|
||||
|
||||
def stream_sock_to_writer():
|
||||
try:
|
||||
for data in iter(lambda: self._s1.recv(16384), b''):
|
||||
while data:
|
||||
n = self._w.write(data)
|
||||
data = data[n:]
|
||||
# debug3("[SocketRWShim] <<<<< w.write() %d %r..." % (len(data), data[:min(32, len(data))]))
|
||||
except Exception:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
finally:
|
||||
debug2("[SocketRWShim] Thread 'stream_sock_to_writer' exiting")
|
||||
self._s1.close()
|
||||
self._on_end and self._on_end()
|
||||
|
||||
self._t1 = threading.Thread(target=stream_reader_to_sock, name='stream_reader_to_sock', daemon=True).start()
|
||||
self._t2 = threading.Thread(target=stream_sock_to_writer, name='stream_sock_to_writer', daemon=True).start()
|
||||
|
||||
def makefiles(self):
|
||||
return self._s2.makefile("rb", buffering=0), self._s2.makefile("wb", buffering=0)
|
250
sshuttle/hostwatch.py
Normal file
250
sshuttle/hostwatch.py
Normal file
@ -0,0 +1,250 @@
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
import select
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
import platform
|
||||
|
||||
import subprocess as ssubprocess
|
||||
import sshuttle.helpers as helpers
|
||||
from sshuttle.helpers import log, debug1, debug2, debug3, get_env
|
||||
|
||||
POLL_TIME = 60 * 15
|
||||
NETSTAT_POLL_TIME = 30
|
||||
CACHEFILE = os.path.expanduser('~/.sshuttle.hosts')
|
||||
|
||||
# Have we already failed to write CACHEFILE?
|
||||
CACHE_WRITE_FAILED = False
|
||||
|
||||
SHOULD_WRITE_CACHE = False
|
||||
|
||||
hostnames = {}
|
||||
queue = {}
|
||||
try:
|
||||
null = open(os.devnull, 'wb')
|
||||
except IOError:
|
||||
_, e = sys.exc_info()[:2]
|
||||
log('warning: %s' % e)
|
||||
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
|
||||
|
||||
|
||||
def _is_ip(s):
|
||||
return re.match(r'\d+\.\d+\.\d+\.\d+$', s)
|
||||
|
||||
|
||||
def write_host_cache():
|
||||
"""If possible, write our hosts file to disk so future connections
|
||||
can reuse the hosts that we already found."""
|
||||
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
|
||||
global CACHE_WRITE_FAILED
|
||||
try:
|
||||
f = open(tmpname, 'wb')
|
||||
for name, ip in sorted(hostnames.items()):
|
||||
f.write(('%s,%s\n' % (name, ip)).encode("ASCII"))
|
||||
f.close()
|
||||
os.chmod(tmpname, 384) # 600 in octal, 'rw-------'
|
||||
os.rename(tmpname, CACHEFILE)
|
||||
CACHE_WRITE_FAILED = False
|
||||
except (OSError, IOError):
|
||||
# Write message if we haven't yet or if we get a failure after
|
||||
# a previous success.
|
||||
if not CACHE_WRITE_FAILED:
|
||||
log("Failed to write host cache to temporary file "
|
||||
"%s and rename it to %s" % (tmpname, CACHEFILE))
|
||||
CACHE_WRITE_FAILED = True
|
||||
|
||||
try:
|
||||
os.unlink(tmpname)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def read_host_cache():
|
||||
"""If possible, read the cache file from disk to populate hosts that
|
||||
were found in a previous sshuttle run."""
|
||||
try:
|
||||
f = open(CACHEFILE)
|
||||
except (OSError, IOError):
|
||||
_, e = sys.exc_info()[:2]
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
log("Failed to read existing host cache file %s on remote host"
|
||||
% CACHEFILE)
|
||||
return
|
||||
for line in f:
|
||||
words = line.strip().split(',')
|
||||
if len(words) == 2:
|
||||
(name, ip) = words
|
||||
name = re.sub(r'[^-\w\.]', '-', name).strip()
|
||||
# Remove characters that shouldn't be in IP
|
||||
ip = re.sub(r'[^0-9.]', '', ip).strip()
|
||||
if name and ip:
|
||||
found_host(name, ip)
|
||||
f.close()
|
||||
global SHOULD_WRITE_CACHE
|
||||
if SHOULD_WRITE_CACHE:
|
||||
write_host_cache()
|
||||
SHOULD_WRITE_CACHE = False
|
||||
|
||||
|
||||
def found_host(name, ip):
|
||||
"""The provided name maps to the given IP. Add the host to the
|
||||
hostnames list, send the host to the sshuttle client via
|
||||
stdout, and write the host to the cache file.
|
||||
"""
|
||||
hostname = re.sub(r'\..*', '', name)
|
||||
hostname = re.sub(r'[^-\w\.]', '_', hostname)
|
||||
if (ip.startswith('127.') or ip.startswith('255.') or
|
||||
hostname == 'localhost'):
|
||||
return
|
||||
|
||||
if hostname != name:
|
||||
found_host(hostname, ip)
|
||||
|
||||
global SHOULD_WRITE_CACHE
|
||||
oldip = hostnames.get(name)
|
||||
if oldip != ip:
|
||||
hostnames[name] = ip
|
||||
debug1('Found: %s: %s' % (name, ip))
|
||||
sys.stdout.write('%s,%s\n' % (name, ip))
|
||||
SHOULD_WRITE_CACHE = True
|
||||
|
||||
|
||||
def _check_etc_hosts():
|
||||
"""If possible, read /etc/hosts to find hosts."""
|
||||
filename = '/etc/hosts'
|
||||
debug2(' > Reading %s on remote host' % filename)
|
||||
try:
|
||||
for line in open(filename):
|
||||
line = re.sub(r'#.*', '', line) # remove comments
|
||||
words = line.strip().split()
|
||||
if not words:
|
||||
continue
|
||||
ip = words[0]
|
||||
if _is_ip(ip):
|
||||
names = words[1:]
|
||||
debug3('< %s %r' % (ip, names))
|
||||
for n in names:
|
||||
check_host(n)
|
||||
found_host(n, ip)
|
||||
except (OSError, IOError):
|
||||
debug1("Failed to read %s on remote host" % filename)
|
||||
|
||||
|
||||
def _check_revdns(ip):
|
||||
"""Use reverse DNS to try to get hostnames from an IP addresses."""
|
||||
debug2(' > rev: %s' % ip)
|
||||
try:
|
||||
r = socket.gethostbyaddr(ip)
|
||||
debug3('< %s' % r[0])
|
||||
check_host(r[0])
|
||||
found_host(r[0], ip)
|
||||
except (OSError, socket.error, UnicodeError):
|
||||
# This case is expected to occur regularly.
|
||||
# debug3('< %s gethostbyaddr failed on remote host' % ip)
|
||||
pass
|
||||
|
||||
|
||||
def _check_dns(hostname):
|
||||
debug2(' > dns: %s' % hostname)
|
||||
try:
|
||||
ip = socket.gethostbyname(hostname)
|
||||
debug3('< %s' % ip)
|
||||
check_host(ip)
|
||||
found_host(hostname, ip)
|
||||
except (socket.gaierror, UnicodeError):
|
||||
pass
|
||||
|
||||
|
||||
def _check_netstat():
|
||||
debug2(' > netstat')
|
||||
argv = ['netstat', '-n']
|
||||
try:
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null,
|
||||
env=get_env())
|
||||
content = p.stdout.read().decode("ASCII")
|
||||
p.wait()
|
||||
except OSError:
|
||||
_, e = sys.exc_info()[:2]
|
||||
log('%r failed: %r' % (argv, e))
|
||||
return
|
||||
|
||||
# The same IPs may appear multiple times. Consolidate them so the
|
||||
# debug message doesn't print the same IP repeatedly.
|
||||
ip_list = []
|
||||
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
|
||||
if ip not in ip_list:
|
||||
ip_list.append(ip)
|
||||
|
||||
for ip in sorted(ip_list):
|
||||
debug3('< %s' % ip)
|
||||
check_host(ip)
|
||||
|
||||
|
||||
def check_host(hostname):
|
||||
if _is_ip(hostname):
|
||||
_enqueue(_check_revdns, hostname)
|
||||
else:
|
||||
_enqueue(_check_dns, hostname)
|
||||
|
||||
|
||||
def _enqueue(op, *args):
|
||||
t = (op, args)
|
||||
if queue.get(t) is None:
|
||||
queue[t] = 0
|
||||
|
||||
|
||||
def _stdin_still_ok(timeout):
|
||||
r, _, _ = select.select([sys.stdin.fileno()], [], [], timeout)
|
||||
if r:
|
||||
b = os.read(sys.stdin.fileno(), 4096)
|
||||
if not b:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def hw_main(seed_hosts, auto_hosts):
|
||||
helpers.logprefix = 'HH: '
|
||||
|
||||
debug1('Starting hostwatch with Python version %s'
|
||||
% platform.python_version())
|
||||
|
||||
for h in seed_hosts:
|
||||
check_host(h)
|
||||
|
||||
if auto_hosts:
|
||||
read_host_cache()
|
||||
_enqueue(_check_etc_hosts)
|
||||
_enqueue(_check_netstat)
|
||||
check_host('localhost')
|
||||
check_host(socket.gethostname())
|
||||
|
||||
while 1:
|
||||
now = time.time()
|
||||
# For each item in the queue
|
||||
for t, last_polled in list(queue.items()):
|
||||
(op, args) = t
|
||||
if not _stdin_still_ok(0):
|
||||
break
|
||||
|
||||
# Determine if we need to run.
|
||||
maxtime = POLL_TIME
|
||||
# netstat runs more often than other jobs
|
||||
if op == _check_netstat:
|
||||
maxtime = NETSTAT_POLL_TIME
|
||||
|
||||
# Check if this jobs needs to run.
|
||||
if now - last_polled > maxtime:
|
||||
queue[t] = time.time()
|
||||
op(*args)
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except IOError:
|
||||
break
|
||||
|
||||
# FIXME: use a smarter timeout based on oldest last_polled
|
||||
if not _stdin_still_ok(1): # sleeps for up to 1 second
|
||||
break
|
51
sshuttle/linux.py
Normal file
51
sshuttle/linux.py
Normal file
@ -0,0 +1,51 @@
|
||||
import socket
|
||||
import subprocess as ssubprocess
|
||||
from sshuttle.helpers import log, debug1, Fatal, family_to_string, get_env
|
||||
|
||||
|
||||
def nonfatal(func, *args):
|
||||
try:
|
||||
func(*args)
|
||||
except Fatal as e:
|
||||
log('error: %s' % e)
|
||||
|
||||
|
||||
def ipt_chain_exists(family, table, name):
|
||||
if family == socket.AF_INET6:
|
||||
cmd = 'ip6tables'
|
||||
elif family == socket.AF_INET:
|
||||
cmd = 'iptables'
|
||||
else:
|
||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
||||
argv = [cmd, '-w', '-t', table, '-nL']
|
||||
try:
|
||||
output = ssubprocess.check_output(argv, env=get_env())
|
||||
for line in output.decode('ASCII', errors='replace').split('\n'):
|
||||
if line.startswith('Chain %s ' % name):
|
||||
return True
|
||||
except ssubprocess.CalledProcessError as e:
|
||||
raise Fatal('%r returned %d' % (argv, e.returncode))
|
||||
|
||||
|
||||
def ipt(family, table, *args):
|
||||
if family == socket.AF_INET6:
|
||||
argv = ['ip6tables', '-w', '-t', table] + list(args)
|
||||
elif family == socket.AF_INET:
|
||||
argv = ['iptables', '-w', '-t', table] + list(args)
|
||||
else:
|
||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
||||
debug1('%s' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv, env=get_env())
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def nft(family, table, action, *args):
|
||||
if family in (socket.AF_INET, socket.AF_INET6):
|
||||
argv = ['nft', action, 'inet', table] + list(args)
|
||||
else:
|
||||
raise Exception('Unsupported family "%s"' % family_to_string(family))
|
||||
debug1('%s' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv, env=get_env())
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
126
sshuttle/methods/__init__.py
Normal file
126
sshuttle/methods/__init__.py
Normal file
@ -0,0 +1,126 @@
|
||||
import importlib
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import errno
|
||||
import ipaddress
|
||||
from sshuttle.helpers import Fatal, debug3
|
||||
|
||||
|
||||
def original_dst(sock):
|
||||
try:
|
||||
family = sock.family
|
||||
SO_ORIGINAL_DST = 80
|
||||
|
||||
if family == socket.AF_INET:
|
||||
SOCKADDR_MIN = 16
|
||||
sockaddr_in = sock.getsockopt(socket.SOL_IP,
|
||||
SO_ORIGINAL_DST, SOCKADDR_MIN)
|
||||
port, raw_ip = struct.unpack_from('!2xH4s', sockaddr_in[:8])
|
||||
ip = str(ipaddress.IPv4Address(raw_ip))
|
||||
elif family == socket.AF_INET6:
|
||||
sockaddr_in = sock.getsockopt(41, SO_ORIGINAL_DST, 64)
|
||||
port, raw_ip = struct.unpack_from("!2xH4x16s", sockaddr_in)
|
||||
ip = str(ipaddress.IPv6Address(raw_ip))
|
||||
else:
|
||||
raise Fatal("fw: Unknown family type.")
|
||||
except socket.error as e:
|
||||
if e.args[0] == errno.ENOPROTOOPT:
|
||||
return sock.getsockname()
|
||||
raise
|
||||
return (ip, port)
|
||||
|
||||
|
||||
class Features(object):
|
||||
pass
|
||||
|
||||
|
||||
class BaseMethod(object):
|
||||
def __init__(self, name):
|
||||
self.firewall = None
|
||||
self.name = name
|
||||
|
||||
def set_firewall(self, firewall):
|
||||
self.firewall = firewall
|
||||
|
||||
@staticmethod
|
||||
def get_supported_features():
|
||||
result = Features()
|
||||
result.loopback_proxy_port = True
|
||||
result.ipv4 = True
|
||||
result.ipv6 = False
|
||||
result.udp = False
|
||||
result.dns = True
|
||||
result.user = False
|
||||
result.group = False
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def is_supported():
|
||||
"""Returns true if it appears that this method will work on this
|
||||
machine."""
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_tcp_dstip(sock):
|
||||
return original_dst(sock)
|
||||
|
||||
@staticmethod
|
||||
def recv_udp(udp_listener, bufsize):
|
||||
debug3('Accept UDP using recvfrom.')
|
||||
data, srcip = udp_listener.recvfrom(bufsize)
|
||||
return (srcip, None, data)
|
||||
|
||||
def send_udp(self, sock, srcip, dstip, data):
|
||||
if srcip is not None:
|
||||
raise Fatal("Method %s send_udp does not support setting srcip to %r"
|
||||
% (self.name, srcip))
|
||||
sock.sendto(data, dstip)
|
||||
|
||||
def setup_tcp_listener(self, tcp_listener):
|
||||
pass
|
||||
|
||||
def setup_udp_listener(self, udp_listener):
|
||||
pass
|
||||
|
||||
def assert_features(self, features):
|
||||
avail = self.get_supported_features()
|
||||
for key in ["udp", "dns", "ipv6", "ipv4", "user"]:
|
||||
if getattr(features, key) and not getattr(avail, key):
|
||||
raise Fatal(
|
||||
"Feature %s not supported with method %s." %
|
||||
(key, self.name))
|
||||
|
||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
||||
user, group, tmark):
|
||||
raise NotImplementedError()
|
||||
|
||||
def restore_firewall(self, port, family, udp, user, group):
|
||||
raise NotImplementedError()
|
||||
|
||||
def wait_for_firewall_ready(self, sshuttle_pid):
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def firewall_command(line):
|
||||
return False
|
||||
|
||||
|
||||
def get_method(method_name):
|
||||
module = importlib.import_module("sshuttle.methods.%s" % method_name)
|
||||
return module.Method(method_name)
|
||||
|
||||
|
||||
def get_auto_method():
|
||||
debug3("Selecting a method automatically...")
|
||||
# Try these methods, in order:
|
||||
methods_to_try = ["nat", "nft", "pf", "ipfw"] if sys.platform != "win32" else ["windivert"]
|
||||
for m in methods_to_try:
|
||||
method = get_method(m)
|
||||
if method.is_supported():
|
||||
debug3("Method '%s' was automatically selected." % m)
|
||||
return method
|
||||
|
||||
raise Fatal("Unable to automatically find a supported method. Check that "
|
||||
"the appropriate programs are in your PATH. We tried "
|
||||
"methods: %s" % str(methods_to_try))
|
226
sshuttle/methods/ipfw.py
Normal file
226
sshuttle/methods/ipfw.py
Normal file
@ -0,0 +1,226 @@
|
||||
import os
|
||||
import subprocess as ssubprocess
|
||||
from sshuttle.methods import BaseMethod
|
||||
from sshuttle.helpers import log, debug1, debug2, debug3, \
|
||||
Fatal, family_to_string, get_env, which
|
||||
|
||||
import socket
|
||||
|
||||
IP_BINDANY = 24
|
||||
IP_RECVDSTADDR = 7
|
||||
SOL_IPV6 = 41
|
||||
IPV6_RECVDSTADDR = 74
|
||||
|
||||
|
||||
def recv_udp(listener, bufsize):
|
||||
debug3('Accept UDP python using recvmsg.')
|
||||
data, ancdata, _, srcip = listener.recvmsg(4096,
|
||||
socket.CMSG_SPACE(4))
|
||||
dstip = None
|
||||
for cmsg_level, cmsg_type, cmsg_data in ancdata:
|
||||
if cmsg_level == socket.SOL_IP and cmsg_type == IP_RECVDSTADDR:
|
||||
port = 53
|
||||
ip = socket.inet_ntop(socket.AF_INET, cmsg_data[0:4])
|
||||
dstip = (ip, port)
|
||||
break
|
||||
return (srcip, dstip, data)
|
||||
|
||||
|
||||
def ipfw_rule_exists(n):
|
||||
argv = ['ipfw', 'list', '%d' % n]
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, env=get_env())
|
||||
|
||||
found = False
|
||||
for line in p.stdout:
|
||||
if line.startswith(b'%05d ' % n):
|
||||
if 'check-state :sshuttle' not in line:
|
||||
log('non-sshuttle ipfw rule: %r' % line.strip())
|
||||
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
|
||||
found = True
|
||||
break
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
return found
|
||||
|
||||
|
||||
_oldctls = {}
|
||||
|
||||
|
||||
def _fill_oldctls(prefix):
|
||||
argv = ['sysctl', prefix]
|
||||
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, env=get_env())
|
||||
for line in p.stdout:
|
||||
line = line.decode()
|
||||
assert line[-1] == '\n'
|
||||
(k, v) = line[:-1].split(': ', 1)
|
||||
_oldctls[k] = v.strip()
|
||||
rv = p.wait()
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
if not line:
|
||||
raise Fatal('%r returned no data' % (argv,))
|
||||
|
||||
|
||||
def _sysctl_set(name, val):
|
||||
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
|
||||
debug1('>> %s' % ' '.join(argv))
|
||||
return ssubprocess.call(argv, stdout=open(os.devnull, 'w'), env=get_env())
|
||||
# No env: No output. (Or error that won't be parsed.)
|
||||
|
||||
|
||||
_changedctls = []
|
||||
|
||||
|
||||
def sysctl_set(name, val, permanent=False):
|
||||
PREFIX = 'net.inet.ip'
|
||||
assert name.startswith(PREFIX + '.')
|
||||
val = str(val)
|
||||
if not _oldctls:
|
||||
_fill_oldctls(PREFIX)
|
||||
if not (name in _oldctls):
|
||||
debug1('>> No such sysctl: %r' % name)
|
||||
return False
|
||||
oldval = _oldctls[name]
|
||||
if val != oldval:
|
||||
rv = _sysctl_set(name, val)
|
||||
if rv == 0 and permanent:
|
||||
debug1('>> ...saving permanently in /etc/sysctl.conf')
|
||||
f = open('/etc/sysctl.conf', 'a')
|
||||
f.write('\n'
|
||||
'# Added by sshuttle\n'
|
||||
'%s=%s\n' % (name, val))
|
||||
f.close()
|
||||
else:
|
||||
_changedctls.append(name)
|
||||
return True
|
||||
|
||||
|
||||
def ipfw(*args):
|
||||
argv = ['ipfw', '-q'] + list(args)
|
||||
debug1('>> %s' % ' '.join(argv))
|
||||
rv = ssubprocess.call(argv, env=get_env())
|
||||
# No env: No output. (Or error that won't be parsed.)
|
||||
if rv:
|
||||
raise Fatal('%r returned %d' % (argv, rv))
|
||||
|
||||
|
||||
def ipfw_noexit(*args):
|
||||
argv = ['ipfw', '-q'] + list(args)
|
||||
debug1('>> %s' % ' '.join(argv))
|
||||
ssubprocess.call(argv, env=get_env())
|
||||
# No env: No output. (Or error that won't be parsed.)
|
||||
|
||||
|
||||
class Method(BaseMethod):
|
||||
|
||||
def get_supported_features(self):
|
||||
result = super(Method, self).get_supported_features()
|
||||
result.ipv6 = False
|
||||
result.udp = False # NOTE: Almost there, kernel patch needed
|
||||
result.dns = True
|
||||
return result
|
||||
|
||||
def get_tcp_dstip(self, sock):
|
||||
return sock.getsockname()
|
||||
|
||||
def recv_udp(self, udp_listener, bufsize):
|
||||
srcip, dstip, data = recv_udp(udp_listener, bufsize)
|
||||
if not dstip:
|
||||
debug1(
|
||||
"-- ignored UDP from %r: "
|
||||
"couldn't determine destination IP address" % (srcip,))
|
||||
return None
|
||||
return srcip, dstip, data
|
||||
|
||||
def send_udp(self, sock, srcip, dstip, data):
|
||||
if not srcip:
|
||||
debug1(
|
||||
"-- ignored UDP to %r: "
|
||||
"couldn't determine source IP address" % (dstip,))
|
||||
return
|
||||
|
||||
# debug3('Sending SRC: %r DST: %r' % (srcip, dstip))
|
||||
sender = socket.socket(sock.family, socket.SOCK_DGRAM)
|
||||
sender.setsockopt(socket.SOL_IP, IP_BINDANY, 1)
|
||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
sender.bind(srcip)
|
||||
sender.sendto(data, dstip)
|
||||
sender.close()
|
||||
|
||||
def setup_udp_listener(self, udp_listener):
|
||||
if udp_listener.v4 is not None:
|
||||
udp_listener.v4.setsockopt(socket.SOL_IP, IP_RECVDSTADDR, 1)
|
||||
# if udp_listener.v6 is not None:
|
||||
# udp_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVDSTADDR, 1)
|
||||
|
||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
||||
user, group, tmark):
|
||||
# IPv6 not supported
|
||||
if family not in [socket.AF_INET]:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by ipfw method_name'
|
||||
% family_to_string(family))
|
||||
|
||||
# XXX: Any risk from this?
|
||||
ipfw_noexit('delete', '1')
|
||||
|
||||
while _changedctls:
|
||||
name = _changedctls.pop()
|
||||
oldval = _oldctls[name]
|
||||
_sysctl_set(name, oldval)
|
||||
|
||||
if subnets or dnsport:
|
||||
sysctl_set('net.inet.ip.fw.enable', 1)
|
||||
|
||||
ipfw('add', '1', 'check-state', ':sshuttle')
|
||||
|
||||
ipfw('add', '1', 'skipto', '2',
|
||||
'tcp',
|
||||
'from', 'any', 'to', 'table(125)')
|
||||
ipfw('add', '1', 'fwd', '127.0.0.1,%d' % port,
|
||||
'tcp',
|
||||
'from', 'any', 'to', 'table(126)',
|
||||
'setup', 'keep-state', ':sshuttle')
|
||||
|
||||
ipfw_noexit('table', '124', 'flush')
|
||||
dnscount = 0
|
||||
for _, ip in [i for i in nslist if i[0] == family]:
|
||||
ipfw('table', '124', 'add', '%s' % (ip))
|
||||
dnscount += 1
|
||||
if dnscount > 0:
|
||||
ipfw('add', '1', 'fwd', '127.0.0.1,%d' % dnsport,
|
||||
'udp',
|
||||
'from', 'any', 'to', 'table(124)',
|
||||
'keep-state', ':sshuttle')
|
||||
ipfw('add', '1', 'allow',
|
||||
'udp',
|
||||
'from', 'any', 'to', 'any')
|
||||
|
||||
if subnets:
|
||||
# create new subnet entries
|
||||
for _, swidth, sexclude, snet, fport, lport \
|
||||
in sorted(subnets, key=lambda s: s[1], reverse=True):
|
||||
if sexclude:
|
||||
ipfw('table', '125', 'add', '%s/%s' % (snet, swidth))
|
||||
else:
|
||||
ipfw('table', '126', 'add', '%s/%s' % (snet, swidth))
|
||||
|
||||
def restore_firewall(self, port, family, udp, user, group):
|
||||
if family not in [socket.AF_INET]:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by ipfw method'
|
||||
% family_to_string(family))
|
||||
|
||||
ipfw_noexit('delete', '1')
|
||||
ipfw_noexit('table', '124', 'flush')
|
||||
ipfw_noexit('table', '125', 'flush')
|
||||
ipfw_noexit('table', '126', 'flush')
|
||||
|
||||
def is_supported(self):
|
||||
if which("ipfw"):
|
||||
return True
|
||||
debug2("ipfw method not supported because 'ipfw' command is "
|
||||
"missing.")
|
||||
return False
|
133
sshuttle/methods/nat.py
Normal file
133
sshuttle/methods/nat.py
Normal file
@ -0,0 +1,133 @@
|
||||
import socket
|
||||
from sshuttle.firewall import subnet_weight
|
||||
from sshuttle.helpers import family_to_string, which, debug2
|
||||
from sshuttle.linux import ipt, ipt_chain_exists, nonfatal
|
||||
from sshuttle.methods import BaseMethod
|
||||
|
||||
|
||||
class Method(BaseMethod):
|
||||
|
||||
# We name the chain based on the transproxy port number so that it's
|
||||
# possible to run multiple copies of sshuttle at the same time. Of course,
|
||||
# the multiple copies shouldn't have overlapping subnets, or only the most-
|
||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
||||
# "-A OUTPUT").
|
||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
||||
user, group, tmark):
|
||||
if family != socket.AF_INET and family != socket.AF_INET6:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by nat method_name'
|
||||
% family_to_string(family))
|
||||
if udp:
|
||||
raise Exception("UDP not supported by nat method_name")
|
||||
table = "nat"
|
||||
|
||||
def _ipt(*args):
|
||||
return ipt(family, table, *args)
|
||||
|
||||
def _ipm(*args):
|
||||
return ipt(family, "mangle", *args)
|
||||
|
||||
chain = 'sshuttle-%s' % port
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
self.restore_firewall(port, family, udp, user, group)
|
||||
|
||||
_ipt('-N', chain)
|
||||
_ipt('-F', chain)
|
||||
if user is not None or group is not None:
|
||||
margs = ['-I', 'OUTPUT', '1', '-m', 'owner']
|
||||
if user is not None:
|
||||
margs += ['--uid-owner', str(user)]
|
||||
if group is not None:
|
||||
margs += ['--gid-owner', str(group)]
|
||||
margs += ['-j', 'MARK', '--set-mark', str(port)]
|
||||
nonfatal(_ipm, *margs)
|
||||
args = '-m', 'mark', '--mark', str(port), '-j', chain
|
||||
else:
|
||||
args = '-j', chain
|
||||
|
||||
_ipt('-I', 'OUTPUT', '1', *args)
|
||||
_ipt('-I', 'PREROUTING', '1', *args)
|
||||
|
||||
# Redirect DNS traffic as requested. This includes routing traffic
|
||||
# to localhost DNS servers through sshuttle.
|
||||
for _, ip in [i for i in nslist if i[0] == family]:
|
||||
_ipt('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s' % ip,
|
||||
'-p', 'udp',
|
||||
'--dport', '53',
|
||||
'--to-ports', str(dnsport))
|
||||
|
||||
# create new subnet entries.
|
||||
for _, swidth, sexclude, snet, fport, lport \
|
||||
in sorted(subnets, key=subnet_weight, reverse=True):
|
||||
tcp_ports = ('-p', 'tcp')
|
||||
if fport:
|
||||
tcp_ports = tcp_ports + ('--dport', '%d:%d' % (fport, lport))
|
||||
|
||||
if sexclude:
|
||||
_ipt('-A', chain, '-j', 'RETURN',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
*tcp_ports)
|
||||
else:
|
||||
_ipt('-A', chain, '-j', 'REDIRECT',
|
||||
'--dest', '%s/%s' % (snet, swidth),
|
||||
*(tcp_ports + ('--to-ports', str(port))))
|
||||
|
||||
# Don't route any remaining local traffic through sshuttle.
|
||||
_ipt('-A', chain, '-j', 'RETURN',
|
||||
'-m', 'addrtype',
|
||||
'--dst-type', 'LOCAL')
|
||||
|
||||
def restore_firewall(self, port, family, udp, user, group):
|
||||
# only ipv4 supported with NAT
|
||||
if family != socket.AF_INET and family != socket.AF_INET6:
|
||||
raise Exception(
|
||||
'Address family "%s" unsupported by nat method_name'
|
||||
% family_to_string(family))
|
||||
if udp:
|
||||
raise Exception("UDP not supported by nat method_name")
|
||||
|
||||
table = "nat"
|
||||
|
||||
def _ipt(*args):
|
||||
return ipt(family, table, *args)
|
||||
|
||||
def _ipm(*args):
|
||||
return ipt(family, "mangle", *args)
|
||||
|
||||
chain = 'sshuttle-%s' % port
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
if ipt_chain_exists(family, table, chain):
|
||||
if user is not None or group is not None:
|
||||
margs = ['-D', 'OUTPUT', '-m', 'owner']
|
||||
if user is not None:
|
||||
margs += ['--uid-owner', str(user)]
|
||||
if group is not None:
|
||||
margs += ['--gid-owner', str(group)]
|
||||
margs += ['-j', 'MARK', '--set-mark', str(port)]
|
||||
nonfatal(_ipm, *margs)
|
||||
|
||||
args = '-m', 'mark', '--mark', str(port), '-j', chain
|
||||
else:
|
||||
args = '-j', chain
|
||||
nonfatal(_ipt, '-D', 'OUTPUT', *args)
|
||||
nonfatal(_ipt, '-D', 'PREROUTING', *args)
|
||||
nonfatal(_ipt, '-F', chain)
|
||||
_ipt('-X', chain)
|
||||
|
||||
def get_supported_features(self):
|
||||
result = super(Method, self).get_supported_features()
|
||||
result.user = True
|
||||
result.ipv6 = True
|
||||
result.group = True
|
||||
return result
|
||||
|
||||
def is_supported(self):
|
||||
if which("iptables"):
|
||||
return True
|
||||
debug2("nat method not supported because 'iptables' command "
|
||||
"is missing.")
|
||||
return False
|
114
sshuttle/methods/nft.py
Normal file
114
sshuttle/methods/nft.py
Normal file
@ -0,0 +1,114 @@
|
||||
import socket
|
||||
from sshuttle.firewall import subnet_weight
|
||||
from sshuttle.linux import nft, nonfatal
|
||||
from sshuttle.methods import BaseMethod
|
||||
from sshuttle.helpers import debug2, which
|
||||
|
||||
|
||||
class Method(BaseMethod):
|
||||
|
||||
# We name the chain based on the transproxy port number so that it's
|
||||
# possible to run multiple copies of sshuttle at the same time. Of course,
|
||||
# the multiple copies shouldn't have overlapping subnets, or only the most-
|
||||
# recently-started one will win (because we use "-I OUTPUT 1" instead of
|
||||
# "-A OUTPUT").
|
||||
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp,
|
||||
user, group, tmark):
|
||||
if udp:
|
||||
raise Exception("UDP not supported by nft")
|
||||
|
||||
if family == socket.AF_INET:
|
||||
table = 'sshuttle-ipv4-%s' % port
|
||||
if family == socket.AF_INET6:
|
||||
table = 'sshuttle-ipv6-%s' % port
|
||||
|
||||
def _nft(action, *args):
|
||||
return nft(family, table, action, *args)
|
||||
|
||||
chain = table
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
_nft('add table', '')
|
||||
_nft('add chain', 'prerouting',
|
||||
'{ type nat hook prerouting priority -100; policy accept; }')
|
||||
_nft('add chain', 'output',
|
||||
'{ type nat hook output priority -100; policy accept; }')
|
||||
_nft('add chain', chain)
|
||||
_nft('flush chain', chain)
|
||||
_nft('add rule', 'output jump %s' % chain)
|
||||
_nft('add rule', 'prerouting jump %s' % chain)
|
||||
|
||||
# setup_firewall() gets called separately for ipv4 and ipv6. Make sure
|
||||
# we only handle the version that we expect to.
|
||||
if family == socket.AF_INET:
|
||||
_nft('add rule', chain, 'meta', 'nfproto', '!=', 'ipv4', 'return')
|
||||
else:
|
||||
_nft('add rule', chain, 'meta', 'nfproto', '!=', 'ipv6', 'return')
|
||||
|
||||
# Strings to use below to simplify our code
|
||||
if family == socket.AF_INET:
|
||||
ip_version_l = 'ipv4'
|
||||
ip_version = 'ip'
|
||||
elif family == socket.AF_INET6:
|
||||
ip_version_l = 'ipv6'
|
||||
ip_version = 'ip6'
|
||||
|
||||
# Redirect DNS traffic as requested. This includes routing traffic
|
||||
# to localhost DNS servers through sshuttle.
|
||||
for _, ip in [i for i in nslist if i[0] == family]:
|
||||
_nft('add rule', chain, ip_version,
|
||||
'daddr %s' % ip, 'udp dport 53',
|
||||
('redirect to :' + str(dnsport)))
|
||||
|
||||
# Don't route any remaining local traffic through sshuttle
|
||||
_nft('add rule', chain, 'fib daddr type local return')
|
||||
|
||||
# create new subnet entries.
|
||||
for _, swidth, sexclude, snet, fport, lport \
|
||||
in sorted(subnets, key=subnet_weight, reverse=True):
|
||||
|
||||
# match using nfproto as described at
|
||||
# https://superuser.com/questions/1560376/match-ipv6-protocol-using-nftables
|
||||
if fport and fport != lport:
|
||||
tcp_ports = ('meta', 'nfproto', ip_version_l, 'tcp',
|
||||
'dport', '{ %d-%d }' % (fport, lport))
|
||||
elif fport and fport == lport:
|
||||
tcp_ports = ('meta', 'nfproto', ip_version_l, 'tcp',
|
||||
'dport', '%d' % (fport))
|
||||
else:
|
||||
tcp_ports = ('meta', 'nfproto', ip_version_l,
|
||||
'meta', 'l4proto', 'tcp')
|
||||
|
||||
if sexclude:
|
||||
_nft('add rule', chain, *(tcp_ports + (
|
||||
ip_version, 'daddr %s/%s' % (snet, swidth), 'return')))
|
||||
else:
|
||||
_nft('add rule', chain, *(tcp_ports + (
|
||||
ip_version, 'daddr %s/%s' % (snet, swidth),
|
||||
('redirect to :' + str(port)))))
|
||||
|
||||
def restore_firewall(self, port, family, udp, user, group):
|
||||
if udp:
|
||||
raise Exception("UDP not supported by nft method_name")
|
||||
|
||||
if family == socket.AF_INET:
|
||||
table = 'sshuttle-ipv4-%s' % port
|
||||
if family == socket.AF_INET6:
|
||||
table = 'sshuttle-ipv6-%s' % port
|
||||
|
||||
def _nft(action, *args):
|
||||
return nft(family, table, action, *args)
|
||||
|
||||
# basic cleanup/setup of chains
|
||||
nonfatal(_nft, 'delete table', '')
|
||||
|
||||
def get_supported_features(self):
|
||||
result = super(Method, self).get_supported_features()
|
||||
result.ipv6 = True
|
||||
return result
|
||||
|
||||
def is_supported(self):
|
||||
if which("nft"):
|
||||
return True
|
||||
debug2("nft method not supported because 'nft' command is missing.")
|
||||
return False
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user