Compare commits

...

257 Commits
dns ... v0.77.1

Author SHA1 Message Date
8bdefcd10d Release 0.77.1 2016-03-07 09:46:01 +11:00
29b6e8301f Update GPL2 license text
Closes #73.
2016-03-06 17:27:02 +11:00
083293ea0d Version 0.77 2016-03-03 10:38:33 +11:00
2c07985924 Prepare documentation for release 2016-03-03 10:35:45 +11:00
756025b1bc Add date and version to docs 2016-03-03 10:30:20 +11:00
cedc8dc146 Add support for OpenBSD 2016-03-02 18:50:37 +11:00
e8047ce3a9 Fixed Python 3 issue 2016-03-02 18:38:43 +11:00
fae4cb1dbf Override the skip on lo that ends up in the chain
In some cases (see #43) it seems that some network configurations may
end up setting a skip on lo. As sshuttle adds rules that rely on
filtering/translating packets on lo, this causes problem. This fix
overrides the skip and makes the rules be applied again.
Should fix at least some of the problems reported on #43.
2016-03-02 18:36:14 +11:00
7d8309ef05 Refactor OS specific portions of PF
This will make it easier to support other platforms/versions in the
future, e.g., OpenBSD.
2016-03-02 18:04:43 +11:00
b7d37e44fb Remove legacy file 2016-03-02 12:47:45 +11:00
4a954c547a fix byte/string bug introduced in 1c46f25e
This is the error message that this commit fixes:
TypeError: sequence item 142: expected a bytes-like object, str found

Complete what 1c46f25e started, more or less.
2016-01-31 16:26:21 -05:00
4fcf7c73da Fix regression: ensure we do bind
Closes: #68
2016-01-31 19:15:02 +11:00
ba8e948c0d Don't allocate socket until we need it
Wew were trying to allocate an IPv6 socket even though we weren't using
IPv6, causing failures on systems without IPv6 support available.

This change means a number of methods on MultiListener, e.g. setsockopt,
should not be called until after the bind call.

Closes #68
2016-01-30 11:28:59 +11:00
e06f0240cb Make sure we use Python 3.5 2016-01-30 11:27:37 +11:00
517fc2c930 Remove references to number of years
Closes: #65
2016-01-21 08:42:34 +11:00
11533869a8 Fix description of excludes in Windows setup
And some subtle grammar.

Closes: #66
2016-01-21 08:38:38 +11:00
0392a779a2 Update usage documentation 2016-01-20 21:19:44 +11:00
ee26157faa Add Windows documentation
Copied from https://coderwall.com/p/adfxgw/sshuttle-on-windows

Closes #64
2016-01-20 20:55:10 +11:00
0bdfb883aa Don't distribute sshuttle/version.py
It is autogenerated.
2016-01-18 09:00:00 +11:00
ff9756f290 Release version 0.76 2016-01-17 18:38:43 +11:00
59865269ac Update documentation
Closes #60.
2016-01-17 18:34:10 +11:00
28017303f2 Add link to documentation 2016-01-17 18:20:42 +11:00
c5af6fef8c Remove table.
Suspect it is causing sphinx to crash on readthedocs. See
https://github.com/sphinx-doc/sphinx/issues/1871
2016-01-17 18:07:52 +11:00
6835183b37 Attempt work around of sphinx bug 2016-01-17 17:58:48 +11:00
242c266e7d Move recvmsg to requirements 2016-01-17 17:58:36 +11:00
7408ab3c53 Remove coverage
Not required as we are not documenting the source code.
2016-01-17 17:24:13 +11:00
ea10ff1305 Fix broken link. 2016-01-17 16:43:25 +11:00
d9939b8460 Add changelog to documentation 2016-01-17 16:37:47 +11:00
80f363842d Add requirements.txt for readthedocs 2016-01-17 16:32:53 +11:00
262affe94f Use Sphinx for documentation
See #60
2016-01-17 16:19:13 +11:00
d80b590a71 Fix joining of seed hosts to be compatible with python 3.5
this should also be backwards compatible with python 2
2016-01-17 12:05:23 +11:00
7f0b5c698b Fix installation from wheel
Fix the following error. Looks like we have to have a function to call
for the entrypoint.

$ pip install dist/sshuttle-0.76.dev8_ngf59508f-py2-none-any.whl
Processing ./dist/sshuttle-0.76.dev8_ngf59508f-py2-none-any.whl
Installing collected packages: sshuttle
Exception:
Traceback (most recent call last):
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/basecommand.py", line 211, in main
    status = self.run(options, args)
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/commands/install.py", line 311, in run
    root=options.root_path,
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/req/req_set.py", line 646, in install
    **kwargs
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/req/req_install.py", line 803, in install
    self.move_wheel_files(self.source_dir, root=root)
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/req/req_install.py", line 998, in move_wheel_files
    isolated=self.isolated,
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/wheel.py", line 479, in move_wheel_files
    maker.make_multiple(['%s = %s' % kv for kv in console.items()])
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py", line 364, in make_multiple
    filenames.extend(self.make(specification, options))
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py", line 353, in make
    self._make_script(entry, filenames, options=options)
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py", line 244, in _make_script
    script = self._get_script_text(entry).encode('utf-8')
  File "/tmp/ddd/local/lib/python2.7/site-packages/pip/wheel.py", line 396, in _get_script_text
    "import_name": entry.suffix.split(".")[0],
AttributeError: 'NoneType' object has no attribute 'split'
2016-01-17 10:21:21 +11:00
f59508f41b Be explicit
These files were included, however just to make sure.
2016-01-13 20:51:09 +11:00
329867a090 Move pyXAPI requirement 2016-01-13 19:34:20 +11:00
1e82571b3d Experimental: Use setuptools-scm
Closes: #58
2016-01-13 19:00:08 +11:00
aab973f12e Small grammar changes 2016-01-13 19:00:08 +11:00
d982b36521 Remove verbose debugging from example usage 2016-01-13 19:00:08 +11:00
a90877b49b Use correct rst formatting for inline monospace 2016-01-13 19:00:08 +11:00
fc5545ccde Remove a print from tproxy used for debug
Closes: #61
2016-01-13 19:00:08 +11:00
629c386dc5 Add the option to disable sshuttle ipv6 support
Using --disable-ipv6 will now force sshuttle not to capture
ipv6 traffic, even if the client supports ipv6.
2016-01-12 13:07:07 +11:00
45f572f7a8 Version 0.75 2016-01-12 12:29:08 +11:00
e7fe040e10 Revert "fixes the sshuttle entry-point in setup.py"
See #57 for details.

This reverts commit b4b283b214.
2016-01-12 12:25:04 +11:00
2e237b8fbe Remember to increment version. 2016-01-10 10:05:50 +11:00
098916a8de Version 0.74 2016-01-10 10:02:14 +11:00
d3624332dc Fix documentation.
Should work even with different python versions on client and server.
2016-01-10 10:01:47 +11:00
b4b283b214 fixes the sshuttle entry-point in setup.py
This fixes the following error:

    "import_name": entry.suffix.split(".")[0],
    AttributeError: 'NoneType' object has no attribute 'split'

See
https://pythonhosted.org/setuptools/setuptools.html#automatic-script-creation
2016-01-09 20:04:58 +00:00
1c46f25e13 Fixed str being used as bytes in hostwatch
This should solve the TypeError reported in #53 and some others I found
while testing the fix.

Closes: #53
2016-01-07 14:16:03 +11:00
11838d65c2 Adds support for FreeBSD PF
The PF firewall that is included in the FreeBSD base system does not
have exactly the same data structures as the OSX version. This commit
fixes the offsets and some field types that are also different. Tested
with FreeBSD 10.2 and OSX 10.11.2.
2016-01-05 18:00:57 +11:00
e433c599e4 IPv6 routes must be added manually 2015-12-15 14:26:39 +11:00
ba60d22478 Add another test. 2015-12-15 14:23:42 +11:00
3db38c992a Replace numbered points with dot points. 2015-12-15 14:23:19 +11:00
1e81bf3dfc Mirror setup/restore logic 2015-12-15 13:39:00 +11:00
7362ba9f52 If listenip_v6 we should declare ipv6 required 2015-12-15 13:31:03 +11:00
b207d1d0d6 Fixes for --auto-nets 2015-12-15 13:30:34 +11:00
56e3b22820 Add FIXME comment. 2015-12-15 13:29:04 +11:00
02fa49627f Fix server side Python3 issues.
Closes: #49.
2015-12-15 12:51:29 +11:00
ce5187100c Add to TPROXY documentation 2015-12-15 11:48:34 +11:00
bdc7d3a97c Fix UDP Python 3.5 issues.
Closes: #48
2015-12-15 11:41:48 +11:00
90654b4fb9 Simplify selection of features 2015-12-15 11:40:55 +11:00
6b4e36c528 Declare DNS support as feature 2015-12-14 21:00:31 +11:00
eed917f062 Don't declare udp feature without recvmsg 2015-12-14 20:59:26 +11:00
74f2d9ca7e Ensure Fatal errors are really Fatal 2015-12-14 20:51:49 +11:00
1e04eb1616 Updates to TPROXY docs. 2015-12-14 20:27:47 +11:00
117afc7a68 Fixed dictionary changed size during iteration
The removal loop should probably be outside the iteration loop.
2015-12-14 16:46:11 +11:00
c61984088b Test PF on non-darwin. 2015-12-14 09:28:43 +11:00
e63e121354 Print PF rules used.
Also support multiline debug output better.
2015-12-14 09:21:15 +11:00
2b235331d0 Split setup_firewall method.
* setup_firewall sets the firewall up.
* restore_firewall restores the firewall to initial state.
2015-12-13 11:56:18 +11:00
2eeea9536a Fixed str being used as bytes in daemonize 2015-12-09 16:32:39 +11:00
9a77d03edf Respect --syslog as soon as possible
When executing with the option --syslog start redirecting to
syslog immediately after the command line options are validated.
This way when using with some init daemon, e.g., upstart all the
relevant information (connection failures, etc) can be retrieved from
the log instead of being lost to stdout or stderr.
2015-12-09 14:46:11 +11:00
4fdd715bc1 Don't change object while iterating
Closes: #40
2015-12-09 10:29:40 +11:00
bea723c598 Add tox.ini file. 2015-12-07 13:17:09 +11:00
1ae4fce6b3 Fix logging with pf method and Python 3.5 2015-12-07 13:16:47 +11:00
118171af7f Fix get_tcp_dstip with MacOSX/Python3.5 2015-12-07 07:14:26 +11:00
3367124e6b Fix more brokenness. 2015-12-06 11:45:49 +11:00
aaa6062329 Remove IPFW support.
This is no longer used by modern MacOSX and not getting tested.

It also required a do_wait() function which was a complication for
sshuttle as a whole.

Can get resurrected if required.
2015-12-06 11:33:52 +11:00
da4ce19121 Fix MacOSX tests. 2015-12-06 11:24:38 +11:00
12d4b304c3 Fix another MacOSX/Python3.5 issue. 2015-12-06 11:24:11 +11:00
bd97506f7d Fixup firewall tests. 2015-12-06 11:02:31 +11:00
53c07f7d90 hostmap shouldn't be global. 2015-12-06 11:00:12 +11:00
7e0c1534df Be more explicit 2015-12-06 10:58:51 +11:00
a3fbf860ff Fix more MacOSX/Python3.5 issues. 2015-12-05 20:21:36 +11:00
7a9e36d211 Fix MacOSX/Python3.5 issues.
Closes: #36.
2015-12-05 16:41:33 +11:00
65e81d51c6 Try Python3.5 by default.
Python 3.0, 3.1, 3.2, and 3.4 not supported however.
2015-12-05 14:41:22 +11:00
43084eb49a Fix typo. 2015-12-05 14:40:33 +11:00
bbb4d31c3f Add accidentally removed line. 2015-12-05 14:39:07 +11:00
f7682d4c33 Make firewall messages consistent 2015-12-05 14:26:20 +11:00
d07a775d50 Don't fail if can't revert errors
We will log the errors, however no point in failing; not only can this
hide errors that occured setting up the firewall, but is pointless as we
can't actually handle these errors in a good way anyway.
2015-12-05 14:14:01 +11:00
50a6e87237 Don't use Xtoken if not set 2015-12-05 14:12:57 +11:00
ed0a92e714 Remove reference to obsolete global 2015-12-05 14:12:24 +11:00
36a1d7ead9 Python 3.5 fix. 2015-12-01 10:29:24 +11:00
43d6ad6a51 Print Python version used for the various stages. 2015-12-01 10:03:24 +11:00
5ab76a6ba9 Merge pull request #33 from felixonmars/master
Fix bug reported by @matiwinnetou in #31
2015-12-01 09:47:41 +11:00
61f9ae6fb4 Fix bug reported by @matiwinnetou in #31 2015-11-30 23:45:24 +08:00
191df92824 Ensure tempfiles are chmod 600 2015-11-28 16:13:56 +11:00
6dfbc467c0 Ensure verbose is never None.
None >= 1 not valid under Python3.

Fixes #31.
2015-11-28 16:03:01 +11:00
c06c972039 Prefer Python3 by default. 2015-11-28 16:02:47 +11:00
da62fe5b80 Merge pull request #30 from felixonmars/master
Add tests_require into setup.py
2015-11-27 20:01:30 +11:00
698351cf44 Add tests_require into setup.py
pytest and mock are needed for running tests.
2015-11-27 12:52:03 +08:00
13457c773b Improve summary line. 2015-11-27 14:28:52 +11:00
780997e8a7 New release 0.73. 2015-11-27 14:22:09 +11:00
d41579c265 Add comment about IPFW support. 2015-11-27 14:18:16 +11:00
974f9aee81 Remove legacy Debian packaging.
This needs to be redone; will do so at a later stage.
2015-11-27 14:13:45 +11:00
4252e81fb0 Update documentation. 2015-11-27 14:13:18 +11:00
7e10fc0756 Add to debugging messages. 2015-11-25 13:06:43 +11:00
2c2ee12e58 Formatting change. 2015-11-25 12:59:48 +11:00
256ed7d244 Fix reversed debug messages. 2015-11-25 12:59:17 +11:00
151634cd8c Fix typo setting up UDP. 2015-11-25 12:58:39 +11:00
c0748c2388 Support IPV6 DNS servers.
Closes #28.
2015-11-24 12:23:17 +11:00
71d46d77bf Add sock paramater to Handler callbacks
As Handler objects can have multiple sockets, we need to know which one
was involved in the incoming event.
2015-11-24 12:19:31 +11:00
c1083e983f Pass correct method back from firewall.
Don't pass auto back.
2015-11-24 12:08:12 +11:00
9944b97629 Remove legacy MACOSX files.
Broken and not been maintained in some time. See #21.
2015-11-24 07:17:19 +11:00
eaad54f68b Add FIXME comment. 2015-11-18 20:08:15 +11:00
6ebf76a5d8 Avoid hardcoding packed address lengths. 2015-11-18 20:07:41 +11:00
51eb7862c4 Fix tests under PyPy. 2015-11-18 20:07:03 +11:00
75b6865a1d Tests for pf method. 2015-11-17 20:52:31 +11:00
e3a1c56e54 Add more methods tests.
Fix bug in tproxy recv_udp() method.
2015-11-17 17:55:30 +11:00
99050aacb3 Fix for Python3.5. 2015-11-17 13:14:28 +11:00
021e6f57af Add more tests. 2015-11-17 13:08:12 +11:00
9cc6d63684 Fix firewall tests. 2015-11-17 12:46:35 +11:00
43566ebda6 Remove unused import. 2015-11-17 10:58:44 +11:00
537899c1df Remove unused function. 2015-11-17 10:58:29 +11:00
641a193d3d Use readline instead of next. 2015-11-17 09:39:53 +11:00
71d17e449e Disable Python 2.6 tests
importlib is Python 2.7 only.
2015-11-17 09:33:46 +11:00
9d443e4155 Don't use nested.
Is Python 2.x only. Not supported under Python 3.x.
2015-11-17 09:32:40 +11:00
cf0aaa7134 Fix PYTHONPATH for tests. 2015-11-17 09:28:58 +11:00
54de23aae3 Add firewall tests. 2015-11-17 09:19:20 +11:00
ac723694bf Restructure code
Pull out firewall methods code into seperate files.

Fix problems starting with method=='auto'; we were making decisions
based on the method, before the method had been finalized by the
firewall.

Only very basic testing so far. What could go wrong?
2015-11-16 18:55:56 +11:00
bcd3205db1 Fix passing latency_control to server. 2015-11-16 11:32:17 +11:00
a651d748cd Remove unused code. 2015-11-16 09:23:24 +11:00
fe48c7c026 Fix PEP8 issues. 2015-11-16 09:10:02 +11:00
4bd6ec8f01 Remove broken su fallback.
Was broken by passing environment variable PYTHONPATH to process. Will
fix this if there is a use case for it.
2015-11-16 09:09:02 +11:00
ba1cf58a6c Add Python 3.5 support. 2015-11-16 09:09:01 +11:00
dd8e68b6dc More formatting fixes. 2015-11-15 17:17:16 +11:00
1f2117917f Fix up formatting. 2015-11-15 17:10:04 +11:00
e6f2395dac Fixup PEP8 issues. 2015-11-15 16:49:20 +11:00
d4f10b232a Restructure code
* Make compatible with setuptools.
* Load modules via ssh into separate modules, not the one name space.
2015-11-15 16:45:26 +11:00
41b8ad4c97 Merge pull request #25 from vieira/ns-hosts
Import resolvconf_nameservers, fix wrong types
2015-11-11 13:09:59 +11:00
a82224c141 Import resolvconf_nameservers, fix wrong types
Add resolvconf_nameservers to the list of functions imported from
helpers.
Fixed an instance where the method client.main was being called with
ns_hosts (string obtained from optional argument --ns-hosts) instead of
nslist (list of tuples that was already being passed to other methods).
Should fix issue #24.
2015-11-08 01:27:10 +00:00
0fb714893a Merge pull request #23 from vieira/ns-hosts
dns: Added --ns-hosts to tunnel only some requests
2015-10-29 13:13:59 +11:00
28be71ef9a Removed commented out code 2015-10-27 17:53:35 +00:00
d2ee34d71c dns: Added --ns-hosts to tunnel only some requests
By default, the --dns flag configures the firewall to only intercept
queries made to the nameservers defined in resolvconf. This flag enables
the user to explicitly specify the nameservers which queries will be
redirected. This can be useful when the local nameserver forwards
queries to some domains to a nameserver on the remote site of the
tunnel.
2015-10-27 17:28:52 +00:00
3cf5002b62 Merge pull request #19 from naclander/patch-1
Remove no-latency-control assertion
2015-09-16 13:08:23 +10:00
f71704f54d Remove no-latency-control assertion
Remove an assertion that would fail when --no-latency-control is set.
2015-09-15 19:30:34 -07:00
ad83059da8 Merge pull request #17 from elasticdog/master
Update ui-macos sources.list with new icon names
2015-09-08 12:00:50 +10:00
d211fc28ee Update ui-macos sources.list with new icon names
I looks like building the app UI for OS X has been broken since
9eced8d049
due to the sources.list.do file still referencing the old .png images.

Without this fix the build will stop at:

    do            chicken-tiny.png
    do: Users/elasticdog/sshuttle/src/ui-macos/chicken-tiny.png: no .do file
    do:         Sshuttle VPN.app: got exit code 1
    do:       Sshuttle VPN.app.zip: got exit code 1
    do:     dist: got exit code 1
    do:   ui-macos/all: got exit code 1
    do: all: got exit code 1
2015-09-07 18:22:30 -07:00
f4dac68dc0 Merge pull request #16 from prutschman/localhost_fix
Don't redirect excluded subsets of included subnets
2015-09-04 17:30:26 +10:00
3a73520310 Don't redirect excluded subsets of included subnets 2015-09-03 21:25:23 -07:00
e127aab776 Merge pull request #14 from reactormonk/patch-1
Switched the ./sshuttle to src/sshuttle
2015-07-26 11:15:56 +10:00
5f90ee1f04 Switched the ./sshuttle to src/sshuttle 2015-07-25 17:38:42 +02:00
d70b5f2b89 Merge pull request #13 from shaiay/master
Fixed issue #12
2015-07-23 08:08:09 +10:00
3f2de26f67 Fixed issue #12
family should be an integer. fixed parsing routes
2015-07-22 22:52:25 +03:00
53d5260f8f Merge pull request #11 from douglas/master
Fix the excludes rule for OS X Yosemite and OS X El Captain
2015-06-19 17:34:14 +10:00
f870ceba00 Fix the excludes rule for OS X Yosemite and OS X El Captain
Without this fix, the rdr rule is executed sending the packages that
should be excluded to the ssh tunnel.

What I did was make sure that the packages that are going to the
excluded subnets are processed first and only after that, the remaining
packages will be sent to the ssh tunnel.

Thanks Warr1024 on #openssh channel in freenode for telling me about
the quick keyword and the rest of guys in the channel who tried to help.
2015-06-18 18:09:18 -03:00
a38963301e Merge pull request #10 from jbd/patch-1
Check for fileno attribute in _tty_width function
2015-05-13 11:01:05 +10:00
jbd
bbd54e150d Check for fileno attribute in _tty_width function
When using Options parser within a unittest.TextTestRunner with buffering enabled (buffer=True), it fails with: 
AttributeError: StringIO instance has no attribute 'fileno'

This change will prevent this kind of error.
2015-05-12 16:43:38 +02:00
00f20657e3 Merge pull request #9 from scommab/patch-1
Make firewall.py use the right params for islocal
2015-05-03 14:42:23 +10:00
84b30be904 Make firewall.py use the right params for islocal 2015-05-01 23:36:08 -07:00
5825dddb02 Merge pull request #8 from nanoant/patch/osx-improvements
Patch/osx improvements
2015-04-21 09:49:18 +10:00
9eced8d049 OSX: New Retina compatible menu & app icon
This icons are using scale independent PDF template images which make menu item
icon look great on both Yosemite light & dark theme. Also adding new flatter
and higher resolution app icon.
2015-04-20 20:32:02 +02:00
fecb53413d OSX: Remove status item on application quit
This ensures application is not leaving empty status item.
2015-04-20 20:23:20 +02:00
1b1ed4d495 OSX: Improve app startup time
Importing everything (*) from AppKit takes a while, since we got 3 scripts
doing that, startup could take up to few seconds. This change makes script
import only what they need, improving startup time to fraction of second.
2015-04-20 18:53:06 +02:00
b19272a67a Merge pull request #7 from xtaran/master
Fix the most blatant issues of the generated .deb
2015-04-20 10:39:55 +10:00
bc2a0b7fbc Fix path to main.py in .deb 2015-04-18 14:46:55 +02:00
6a96ace497 autossh is not required but nice to have 2015-04-18 14:40:00 +02:00
163aab2ca1 Fix typo in long description of .deb 2015-04-18 14:31:30 +02:00
964977220e Change .deb section from utils to net 2015-04-18 14:30:35 +02:00
db67834164 Add missing dependency on iptables in the .deb 2015-04-18 14:30:30 +02:00
1bc2f84d16 Use a less confusing version for .deb packages built from git 2015-04-18 14:30:30 +02:00
a229fc020c Properly separate short and long description in .deb 2015-04-18 14:30:30 +02:00
d6e7a9b6ad Update homepage header of .deb 2015-04-18 14:30:30 +02:00
e6ca7148fa Fix formatting of versioned dependency in the .deb 2015-04-18 14:30:21 +02:00
95529a5137 Don't include MacOS X stuff in .deb 2015-04-18 14:30:21 +02:00
93c4af6fc8 There's no need to have hard dependency on a init system in the .deb
Otherwise the package is only installable on current Ubuntu releases
and neither on future Ubuntu releases (which will use systemd) nor on
Debian and other Debian derivatives (where the administrator can
decided which init system is used).
2015-04-18 14:30:16 +02:00
2ca9aaa450 The .deb is and needs to be architecture-independent 2015-04-18 14:14:33 +02:00
2cfc39fac8 Fix UDP channel expiration. 2015-04-12 09:59:49 +10:00
29819ea0af Merge pull request #6 from lkorth/patch-1
Fix clone url in README
2015-04-11 10:15:20 +10:00
e43a40565b Fix clone url in README 2015-04-09 09:22:31 -07:00
57d1cb1e11 Merge pull request #5 from seanzxx/yosemite_support_sudo_fix
fix sudo issue in yosemite
2015-03-23 14:11:16 +11:00
6e32d1445a add -e/-d support 2015-03-21 22:43:12 -07:00
bdad253ef5 fix mistake 2015-03-21 15:36:42 -07:00
49c55f6825 use -E/-X to enable/disable pf on yosemite 2015-03-21 15:28:17 -07:00
1874aaceb4 refine firewall initlization 2015-03-21 00:00:15 -07:00
4c31bc02a4 add anchor rule directly 2015-03-20 18:21:00 -07:00
84047089a9 fix sudo issue 2015-03-19 02:43:11 -07:00
8be9270fdb Merge pull request #4 from seanzxx/yosemite_support
Yosemite support
2015-03-19 09:55:39 +11:00
10dc229125 fix bootstrapping issue when pf started before 2015-03-18 09:25:41 -07:00
cd77ad5e7b refine error message 2015-03-15 22:53:08 -07:00
c13cb9b8ca optimize the ctypes import 2015-03-15 22:45:32 -07:00
0fe48a4682 initial support for pf in yosemite 2015-03-15 22:34:40 -07:00
6121a6dca3 sshuttle.md: fix whitespace issues. 2014-12-16 14:06:13 +11:00
c576682caf sshuttle.md: document Internet Sharing incompatibility 2014-12-16 14:04:25 +11:00
343905784b Added --exclude-from feature.
(Slightly modified by apenwarr)
2014-10-06 13:04:33 +11:00
91d705c24f Document missing --dns option in sshuttle manpage 2014-10-06 13:01:31 +11:00
e5251969b0 firewall.py: catch SIGINT and SIGTERM too.
There were still a few conditions under some OSes that would cause
firewall.py to terminate without cleaning up the firewall settings.  'pkill
sshuttle' was one of them.  Ignore a couple more signals to further ensure a
correct cleanup.

(This only affects sshuttle --firewall, which is a subprocess of the main
sshuttle process.  The firewall is supposed to exit automatically whenever
the client exits, and so far that part seems to work reliably.)
2014-10-06 13:00:57 +11:00
b8e150fc4d Use python-config to compile with latest Python version.
For OS X systems without Python 2.5, runpython.c does not compile.
Use python-config to get the paths for the latest version.
2014-10-03 14:58:26 -07:00
36378efe5e Revert Debian package specific change.
This is required so sshuttle can be run from git repository.

The way the Debian package is created is non-standard, and probably
needs redoing anyway.
2014-09-23 11:14:56 +10:00
cba8b261c6 Use the new arguments from redo v0.10.
(apenwarr: also updates to the matching, latest minimal/do)
2014-09-23 10:14:59 +10:00
39425a03c5 firewall: catch SIGHUP and SIGPIPE.
Not sure if this will fix anything, but it might stop the problem reported
on some MacOS versions where the firewall doesn't get cleaned up correctly.
2014-09-23 10:14:27 +10:00
5a39341d50 ui-macos/main.py: fix wait() to avoid deadlock.
If the subprocess was trying to write to its stdout/stderr, its process
would never actually finish because it was blocked waiting for us to read
it, but we were blocked on waitpid().  Instead, use waitpid(WNOHANG) and
continually read from the subprocess (which should be a blocking operation)
until it exits.
2014-09-23 10:11:13 +10:00
3eef3635ac ipfw: don't use 'log' parameter.
I guess we were causing the kernel to syslog on every single packet on
MacOS.  Oops.
2014-09-23 10:09:16 +10:00
f1c79c7e92 PEP8 fixes. 2014-09-16 10:24:16 +10:00
5529a04cc9 Fix whitespace. 2014-09-15 14:46:45 +10:00
035c5ad7a6 Fix: Use sock for consistency. 2014-09-15 14:44:07 +10:00
c013386ecb If IPv4 bind but IPv6 succeeds don't error. 2014-09-15 14:32:59 +10:00
a33f6199c4 Remove broken IPv6 code. 2014-09-15 14:23:09 +10:00
0f2c249e4d Remove dodgy code. 2014-09-15 14:14:52 +10:00
192e5b36e8 Added some Ubuntu notes 2014-09-15 14:14:52 +10:00
4036b7dfcf Added some requirements 2014-09-15 14:14:52 +10:00
8ec6daf02a Added a shell script to make a .deb package 2014-09-15 14:14:52 +10:00
e2507f86d5 Added a control file for the Debian package 2014-09-15 14:14:52 +10:00
e4fe62de3c Added a sample prefixes file 2014-09-15 14:14:52 +10:00
734f32d112 Sample tunnel configuration 2014-09-15 14:14:52 +10:00
a34e106b55 Changed the sshuttle binary to point to install 2014-09-15 14:14:52 +10:00
e6e80f1f04 Changed the file to be more "canonical" 2014-09-15 14:14:52 +10:00
32865bd2dd Added the PyXAPI requirement to the readme 2014-09-15 14:14:52 +10:00
2f11f50bc2 Adding more robust exit codes 2014-09-15 14:14:52 +10:00
a95491765d Added -s to accept subnets from a config file 2014-09-15 14:14:52 +10:00
d8754dc3a0 First version; still has debugging 2014-09-15 14:14:52 +10:00
3956a5df94 Moved docs out of the src directory 2014-09-15 14:14:52 +10:00
7442eb61e9 Mass relocation of files to their own subdirectory 2014-09-15 14:14:51 +10:00
6107abf10f Fixed a bug where lack of IPv6 destination = fatal
There was a problem where trying to bind .v4 and .v6 listeners would set them
to None if there was nothing to bind (if, say, you weren't binding an IPv6
listener).  Later, the code then would try to call a member function of the
listener.  The member function would not do anything if there was no listener,
but trying to dereference None yielded the broken behavior.
2014-09-15 14:14:51 +10:00
5e8ad544ee TProxy UDP support, including DNS. 2011-08-26 09:53:59 +10:00
20254bab57 TProxy IPv6 support. 2011-07-11 11:20:52 +10:00
f41c6b62e5 TProxy support as well as NAT support. 2011-07-11 11:20:51 +10:00
9a7412c08f More changes to simplify the upcomming IPv6 patch. 2011-07-11 11:20:50 +10:00
c6200eecdc Choose which method to use for intercepting traffic. 2011-07-11 11:16:51 +10:00
55f86a8b3f Rewrite binding code. DNS port may now be different from TCP port. 2011-07-11 11:16:50 +10:00
e7caae8126 Make it clear ports are for IPv4. 2011-07-11 11:16:49 +10:00
4db9b372c2 Make iptables functions work with any table, not just nat. 2011-07-11 11:16:48 +10:00
061e6a0933 Keep track of address family address belongs too. 2011-06-16 14:51:34 +10:00
50849b86b0 This hack is IPv4 specific, ensure it doesn't get used for other
address families.
2011-06-16 14:51:34 +10:00
6b7cf770f5 Improve debugging. 2011-06-16 14:42:15 +10:00
b26e1ee652 Introduce independent_listener, will be used for both IPv4 and IPv6
connections.
2011-06-06 11:14:28 +10:00
6500067905 When DNS response received, MUX channel no longer required. Delete it. 2011-06-06 11:12:23 +10:00
50c2b86f15 Rename onaccept to onaccept_tcp as it is tcp specific. 2011-06-06 10:54:57 +10:00
97dca42291 Rename dnslistener to dns_listener for consistency with tcp_listener. 2011-06-06 10:44:38 +10:00
6e53b07002 Rename listener to tcp_listener, as it is TCP specific. 2011-06-06 10:43:39 +10:00
08bd1dca46 Rename TCP specific commands to clarify they are TCP specific. 2011-06-06 10:39:50 +10:00
94566b5efc Split expiration into handling into another function. 2011-06-06 10:23:04 +10:00
a8b71f6387 Move nested functions to top level. 2011-05-31 00:42:48 -04:00
4bfcd7091d Send DNS request back on same sock we received it on. 2011-05-31 00:39:17 -04:00
bd489b3319 Pass socket through to handlers. Required for IPv6 support. 2011-05-31 00:39:17 -04:00
8ab5ef283d ssnet.py: deal with a possible connect/getsockopt(SO_ERROR) race.
Seems to affect Linux servers.  Ed Maste says the patch fixes it for him.
2011-05-29 22:42:16 -04:00
e67208a294 helpers.py: errno is used by this module, but not imported. 2011-05-15 17:35:53 -04:00
7859be13c2 ui-macos/bits/runpython.do: skip ppc64 architecture.
I don't have a Mac that can build it.  Hopefully ppc will run fine on ppc64.
2011-05-07 23:19:52 -04:00
f313d50690 ui-macos/bits/runpython.do: report which platforms we're compiling for.
Just as a quick reminder, in case you're building a fat binary and you don't
have all the architectures actually installed.
2011-05-07 23:16:42 -04:00
15e26d2e0e README.md: fix little bug
The ssh hostname should immediately follow the -r parameter.
2011-05-07 23:16:42 -04:00
e2ec475de3 ui-macos/models.py: fix a compatibility problem on MacOS for PPC.
@objc.accessor isn't the right thing to use for a Core Data Validation
function.  Yowee, PyObjc sure is non-obvious.
2011-05-07 23:16:42 -04:00
57e744dadd ./do: use the latest minimal/do from the redo project. 2011-05-03 14:19:45 -07:00
c13be0b817 ui-macos/bits/runpython.do: auto-determine arches to build for.
Some people don't have all of them installed, so auto-detect them by
looking at the available arches in /usr/libexec.
2011-05-03 14:18:37 -07:00
da2c6273f6 Add some friendly info to the README 2011-05-03 14:03:19 -07:00
7712c60c36 Insert two binary NUL bytes (\0) before SSHUTTLE0001 sync string.
...and search for those null bytes before looking for the sync string.

This helps when people have misconfigured .bashrc to print messages even in
non-interactive mode.  (On my Debian Lenny system, .bashrc doesn't seem to
run when you do 'ssh localhost ls', but on MacOS servers, it does.  Hmm...)
2011-05-03 13:59:25 -07:00
65b0390fe9 ssh.py: use 'exec python -c' instead of just 'python -c'.
This gets rid of an extra intermediate sh process on the server that we were
keeping for no good reason, since it would exit as soon as python exited
anyway.
2011-05-03 13:51:09 -07:00
c5834a9773 Handle EHOSTDOWN, ENETDOWN.
Someone on the mailing list reported getting these.

Also centralize the list of these errors, so we don't have different parts
of the code supporting a different subset of them.  Now just use
ssnet.NET_ERRS.
2011-05-03 13:32:25 -07:00
e2474543fc runpython.do: also compile for ppc architecture. 2011-04-24 22:51:27 -04:00
8636378870 Dereference symlink for sshuttle launch script
(Modified slightly by apenwarr)
2011-04-24 22:42:50 -04:00
f5eed4c809 Don't try to connect to remote IPs that start with zero.
For some reason, on Linux servers this returns EINVAL.  I don't like just
treating EINVAL as non-fatal in general, so let's catch this specific case
and ignore it.

Reported by Reza Mohammadi on the mailing list.  Interestingly, it's kind of
hard to trigger this crash since the client would have to request the
connection, and that connection shouldn't exist because the original client
program would have already gotten EINVAL.  But my MacOS machine can generate
such a connection, so a MacOS->Linux sshuttle could trigger this.
2011-04-24 22:15:20 -04:00
92 changed files with 5681 additions and 7101 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
sshuttle/version.py
*.pyc
*~
*.8

11
.travis.yml Normal file
View File

@ -0,0 +1,11 @@
language: python
python:
- 2.7
- 3.5
- pypy
install:
- travis_retry pip install -q pytest mock
script:
- PYTHONPATH=. py.test

39
CHANGES.rst Normal file
View File

@ -0,0 +1,39 @@
Release 0.77.1 (Mar 7, 2016)
============================
* Use semantic versioning. http://semver.org/
* Update GPL 2 license text.
* New release to fix PyPI.
Release 0.77 (Mar 3, 2016)
==========================
* Various bug fixes.
* Fix Documentation.
* Add fix for MacOS X issue.
* Add support for OpenBSD.
Release 0.76 (Jan 17, 2016)
===========================
* Add option to disable IPv6 support.
* Update documentation.
* Move documentation, including man page, to Sphinx.
* Use setuptools-scm for automatic versioning.
Release 0.75 (Jan 12, 2016)
===========================
* Revert change that broke sshuttle entry point.
Release 0.74 (Jan 10, 2016)
===========================
* Add CHANGES.rst file.
* Numerous bug fixes.
* Python 3.5 fixes.
* PF fixes, especially for BSD.

598
LICENSE
View File

@ -1,25 +1,22 @@
GNU LIBRARY GENERAL PUBLIC LICENSE
Version 2, June 1991
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1991 Free Software Foundation, Inc.
675 Mass Ave, Cambridge, MA 02139, USA
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the library GPL. It is
numbered 2 because it goes with version 2 of the ordinary GPL.]
Preamble
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Library General Public License, applies to some
specially designated Free Software Foundation software, and to any
other libraries whose authors decide to use it. You can use it for
your libraries, too.
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
@ -30,347 +27,195 @@ in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if
you distribute copies of the library, or if you modify it.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link a program with the library, you must provide
complete object files to the recipients so that they can relink them
with the library, after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
Our method of protecting your rights has two steps: (1) copyright
the library, and (2) offer you this license which gives you legal
permission to copy, distribute and/or modify the library.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each distributor's protection, we want to make certain
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
library. If the library is modified by someone else and passed on, we
want its recipients to know that what they have is not the original
version, so that any problems introduced by others will not reflect on
the original authors' reputations.
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that companies distributing free
software will individually obtain patent licenses, thus in effect
transforming the program into proprietary software. To prevent this,
we have made it clear that any patent must be licensed for everyone's
free use or not licensed at all.
Most GNU software, including some libraries, is covered by the ordinary
GNU General Public License, which was designed for utility programs. This
license, the GNU Library General Public License, applies to certain
designated libraries. This license is quite different from the ordinary
one; be sure to read it in full, and don't assume that anything in it is
the same as in the ordinary license.
The reason we have a separate public license for some libraries is that
they blur the distinction we usually make between modifying or adding to a
program and simply using it. Linking a program with a library, without
changing the library, is in some sense simply using the library, and is
analogous to running a utility program or application program. However, in
a textual and legal sense, the linked executable is a combined work, a
derivative of the original library, and the ordinary General Public License
treats it as such.
Because of this blurred distinction, using the ordinary General
Public License for libraries did not effectively promote software
sharing, because most developers did not use the libraries. We
concluded that weaker conditions might promote sharing better.
However, unrestricted linking of non-free programs would deprive the
users of those programs of all benefit from the free status of the
libraries themselves. This Library General Public License is intended to
permit developers of non-free programs to use free libraries, while
preserving your freedom as a user of such programs to change the free
libraries that are incorporated in them. (We have not seen how to achieve
this as regards changes in header files, but we have achieved it as regards
changes in the actual functions of the Library.) The hope is that this
will lead to faster development of free libraries.
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, while the latter only
works together with the library.
modification follow.
Note that it is possible for a library to be covered by the ordinary
General Public License rather than by this special one.
GNU LIBRARY GENERAL PUBLIC LICENSE
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library which
contains a notice placed by the copyright holder or other authorized
party saying it may be distributed under the terms of this Library
General Public License (also called "this License"). Each licensee is
addressed as "you".
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
collective works based on the Program.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also compile or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
c) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
d) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the source code distributed need not include anything that is normally
distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
the Program or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
11. If, as a consequence of a court judgment or allegation of patent
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
@ -381,101 +226,114 @@ impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Library General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
NO WARRANTY
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
NO WARRANTY
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
END OF TERMS AND CONDITIONS
Appendix: How to Apply These Terms to Your New Libraries
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
END OF TERMS AND CONDITIONS
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
How to Apply These Terms to Your New Programs
<one line to give the library's name and a brief idea of what it does.>
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This library is distributed in the hope that it will be useful,
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1990
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
That's all there is to it!
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

13
MANIFEST.in Normal file
View File

@ -0,0 +1,13 @@
include *.txt
include *.rst
include *.py
include MANIFEST.in
include LICENSE
include run
include tox.ini
exclude sshuttle/version.py
recursive-include docs *.bat
recursive-include docs *.py
recursive-include docs *.rst
recursive-include docs Makefile
recursive-include sshuttle *.py

View File

@ -1,10 +0,0 @@
all:
Makefile:
@
%: FORCE
+./do $@
.PHONY: FORCE

177
README.md
View File

@ -1,177 +0,0 @@
WARNING:
On MacOS 10.6 (at least up to 10.6.6), your network will
stop responding about 10 minutes after the first time you
start sshuttle, because of a MacOS kernel bug relating to
arp and the net.inet.ip.scopedroute sysctl. To fix it,
just switch your wireless off and on. Sshuttle makes the
kernel setting it changes permanent, so this won't happen
again, even after a reboot.
sshuttle: where transparent proxy meets VPN meets ssh
=====================================================
As far as I know, sshuttle is the only program that solves the following
common case:
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
- You have access to a remote network via ssh.
- You don't necessarily have admin access on the remote network.
- The remote network has no VPN, or only stupid/complex VPN
protocols (IPsec, PPTP, etc). Or maybe you <i>are</i> the
admin and you just got frustrated with the awful state of
VPN tools.
- You don't want to create an ssh port forward for every
single host/port on the remote network.
- You hate openssh's port forwarding because it's randomly
slow and/or stupid.
- You can't use openssh's PermitTunnel feature because
it's disabled by default on openssh servers; plus it does
TCP-over-TCP, which has terrible performance (see below).
Prerequisites
-------------
- sudo, su, or logged in as root on your client machine.
(The server doesn't need admin access.)
- If you use Linux on your client machine:
iptables installed on the client, including at
least the iptables DNAT, REDIRECT, and ttl modules.
These are installed by default on most Linux distributions.
(The server doesn't need iptables and doesn't need to be
Linux.)
- If you use MacOS or BSD on your client machine:
Your kernel needs to be compiled with IPFIREWALL_FORWARD
(MacOS has this by default) and you need to have ipfw
available. (The server doesn't need to be MacOS or BSD.)
This is how you use it:
-----------------------
- <tt>git clone git://github.com/apenwarr/sshuttle</tt>
on your client machine. You'll need root or sudo
access, and python needs to be installed.
- <tt>./sshuttle -r username@sshserver 0.0.0.0/0 -vv</tt>
(You may be prompted for one or more passwords; first, the
local password to become root using either sudo or su, and
then the remote ssh password. Or you might have sudo and ssh set
up to not require passwords, in which case you won't be
prompted at all.)
That's it! Now your local machine can access the remote network as if you
were right there. And if your "client" machine is a router, everyone on
your local network can make connections to your remote network.
You don't need to install sshuttle on the remote server;
the remote server just needs to have python available.
sshuttle will automatically upload and run its source code
to the remote python interpreter.
This creates a transparent proxy server on your local machine for all IP
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
you want; use any number of IP addresses or subnets to change which
addresses get proxied. Using 0.0.0.0/0 proxies <i>everything</i>, which is
interesting if you don't trust the people on your local network.)
Any TCP session you initiate to one of the proxied IP addresses will be
captured by sshuttle and sent over an ssh session to the remote copy of
sshuttle, which will then regenerate the connection on that end, and funnel
the data back and forth through ssh.
Fun, right? A poor man's instant VPN, and you don't even have to have
admin access on the server.
Theory of Operation
-------------------
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
of both, and kind of neither.
It's like a VPN, since it can forward every port on an entire network, not
just ports you specify. Conveniently, it lets you use the "real" IP
addresses of each host rather than faking port numbers on localhost.
On the other hand, the way it *works* is more like ssh port forwarding than
a VPN. Normally, a VPN forwards your data one packet at a time, and
doesn't care about individual connections; ie. it's "stateless" with respect
to the traffic. sshuttle is the opposite of stateless; it tracks every
single connection.
You could compare sshuttle to something like the old <a
href="http://en.wikipedia.org/wiki/Slirp">Slirp</a> program, which was a
userspace TCP/IP implementation that did something similar. But it
operated on a packet-by-packet basis on the client side, reassembling the
packets on the server side. That worked okay back in the "real live serial
port" days, because serial ports had predictable latency and buffering.
But you can't safely just forward TCP packets over a TCP session (like ssh),
because TCP's performance depends fundamentally on packet loss; it
<i>must</i> experience packet loss in order to know when to slow down! At
the same time, the outer TCP session (ssh, in this case) is a reliable
transport, which means that what you forward through the tunnel <i>never</i>
experiences packet loss. The ssh session itself experiences packet loss, of
course, but TCP fixes it up and ssh (and thus you) never know the
difference. But neither does your inner TCP session, and extremely screwy
performance ensues.
sshuttle assembles the TCP stream locally, multiplexes it statefully over
an ssh session, and disassembles it back into packets at the other end. So
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
safe.
Useless Trivia
--------------
Back in 1998 (12 years ago! Yikes!), I released the first version of <a
href="http://alumnit.ca/wiki/?TunnelVisionReadMe">Tunnel Vision</a>, a
semi-intelligent VPN client for Linux. Unfortunately, I made two big mistakes:
I implemented the key exchange myself (oops), and I ended up doing
TCP-over-TCP (double oops). The resulting program worked okay - and people
used it for years - but the performance was always a bit funny. And nobody
ever found any security flaws in my key exchange, either, but that doesn't
mean anything. :)
The same year, dcoombs and I also released Fast Forward, a proxy server
supporting transparent proxying. Among other things, we used it for
automatically splitting traffic across more than one Internet connection (a
tool we called "Double Vision").
I was still in university at the time. A couple years after that, one of my
professors was working with some graduate students on the technology that
would eventually become <a href="http://www.slipstream.com/">Slipstream
Internet Acceleration</a>. He asked me to do a contract for him to build an
initial prototype of a transparent proxy server for mobile networks. The
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
packets, you can reduce latency and improve performance vs. just forwarding
the packets over a plain VPN or mobile network. (It's unlikely that any of
my code has persisted in the Slipstream product today, but the concept is
still pretty cool. I'm still horrified that people use plain TCP on
complex mobile networks with crazily variable latency, for which it was
never really intended.)
That project I did for Slipstream was what first gave me the idea to merge
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
program that was the best of all worlds. And here we are, at last, 10 years
later. You're welcome.
--
Avery Pennarun <apenwarr@gmail.com>
Mailing list:
Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
List archives are at: http://groups.google.com/group/sshuttle

47
README.rst Normal file
View File

@ -0,0 +1,47 @@
sshuttle: where transparent proxy meets VPN meets ssh
=====================================================
As far as I know, sshuttle is the only program that solves the following
common case:
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
- You have access to a remote network via ssh.
- You don't necessarily have admin access on the remote network.
- The remote network has no VPN, or only stupid/complex VPN
protocols (IPsec, PPTP, etc). Or maybe you *are* the
admin and you just got frustrated with the awful state of
VPN tools.
- You don't want to create an ssh port forward for every
single host/port on the remote network.
- You hate openssh's port forwarding because it's randomly
slow and/or stupid.
- You can't use openssh's PermitTunnel feature because
it's disabled by default on openssh servers; plus it does
TCP-over-TCP, which has terrible performance (see below).
Obtaining sshuttle
------------------
- From PyPI::
pip install sshuttle
- Clone::
git clone https://github.com/sshuttle/sshuttle.git
./setup.py install
Documentation
-------------
The documentation for the stable version is available at:
http://sshuttle.readthedocs.org/
The documentation for the latest development version is available at:
http://sshuttle.readthedocs.org/en/latest/

11
all.do
View File

@ -1,11 +0,0 @@
exec >&2
UI=
[ "$(uname)" = "Darwin" ] && UI=ui-macos/all
redo-ifchange sshuttle.8 $UI
echo
echo "What now?"
[ -z "$UI" ] || echo "- Try the MacOS GUI: open ui-macos/Sshuttle*.app"
echo "- Run sshuttle: ./sshuttle --dns -r HOSTNAME 0/0"
echo "- Read the README: less README.md"
echo "- Read the man page: less sshuttle.md"

View File

@ -1,26 +0,0 @@
import sys, zlib
z = zlib.decompressobj()
mainmod = sys.modules[__name__]
while 1:
name = sys.stdin.readline().strip()
if name:
nbytes = int(sys.stdin.readline())
if verbosity >= 2:
sys.stderr.write('server: assembling %r (%d bytes)\n'
% (name, nbytes))
content = z.decompress(sys.stdin.read(nbytes))
exec compile(content, name, "exec")
# FIXME: this crushes everything into a single module namespace,
# then makes each of the module names point at this one. Gross.
assert(name.endswith('.py'))
modname = name[:-3]
mainmod.__dict__[modname] = mainmod
else:
break
verbose = verbosity
sys.stderr.flush()
sys.stdout.flush()
main()

View File

@ -1,2 +0,0 @@
redo ui-macos/clean
rm -f *~ */*~ .*~ */.*~ *.8 *.tmp */*.tmp *.pyc */*.pyc

387
client.py
View File

@ -1,387 +0,0 @@
import struct, socket, select, errno, re, signal, time
import compat.ssubprocess as ssubprocess
import helpers, ssnet, ssh, ssyslog
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
from helpers import *
_extra_fd = os.open('/dev/null', os.O_RDONLY)
def got_signal(signum, frame):
log('exiting on signal %d\n' % signum)
sys.exit(1)
_pidname = None
def check_daemon(pidfile):
global _pidname
_pidname = os.path.abspath(pidfile)
try:
oldpid = open(_pidname).read(1024)
except IOError, e:
if e.errno == errno.ENOENT:
return # no pidfile, ok
else:
raise Fatal("can't read %s: %s" % (_pidname, e))
if not oldpid:
os.unlink(_pidname)
return # invalid pidfile, ok
oldpid = int(oldpid.strip() or 0)
if oldpid <= 0:
os.unlink(_pidname)
return # invalid pidfile, ok
try:
os.kill(oldpid, 0)
except OSError, e:
if e.errno == errno.ESRCH:
os.unlink(_pidname)
return # outdated pidfile, ok
elif e.errno == errno.EPERM:
pass
else:
raise
raise Fatal("%s: sshuttle is already running (pid=%d)"
% (_pidname, oldpid))
def daemonize():
if os.fork():
os._exit(0)
os.setsid()
if os.fork():
os._exit(0)
outfd = os.open(_pidname, os.O_WRONLY|os.O_CREAT|os.O_EXCL, 0666)
try:
os.write(outfd, '%d\n' % os.getpid())
finally:
os.close(outfd)
os.chdir("/")
# Normal exit when killed, or try/finally won't work and the pidfile won't
# be deleted.
signal.signal(signal.SIGTERM, got_signal)
si = open('/dev/null', 'r+')
os.dup2(si.fileno(), 0)
os.dup2(si.fileno(), 1)
si.close()
ssyslog.stderr_to_syslog()
def daemon_cleanup():
try:
os.unlink(_pidname)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
def original_dst(sock):
try:
SO_ORIGINAL_DST = 80
SOCKADDR_MIN = 16
sockaddr_in = sock.getsockopt(socket.SOL_IP,
SO_ORIGINAL_DST, SOCKADDR_MIN)
(proto, port, a,b,c,d) = struct.unpack('!HHBBBB', sockaddr_in[:8])
assert(socket.htons(proto) == socket.AF_INET)
ip = '%d.%d.%d.%d' % (a,b,c,d)
return (ip,port)
except socket.error, e:
if e.args[0] == errno.ENOPROTOOPT:
return sock.getsockname()
raise
class FirewallClient:
def __init__(self, port, subnets_include, subnets_exclude, dnsport):
self.port = port
self.auto_nets = []
self.subnets_include = subnets_include
self.subnets_exclude = subnets_exclude
self.dnsport = dnsport
argvbase = ([sys.argv[1], sys.argv[0], sys.argv[1]] +
['-v'] * (helpers.verbose or 0) +
['--firewall', str(port), str(dnsport)])
if ssyslog._p:
argvbase += ['--syslog']
argv_tries = [
['sudo', '-p', '[local sudo] Password: '] + argvbase,
['su', '-c', ' '.join(argvbase)],
argvbase
]
# we can't use stdin/stdout=subprocess.PIPE here, as we normally would,
# because stupid Linux 'su' requires that stdin be attached to a tty.
# Instead, attach a *bidirectional* socket to its stdout, and use
# that for talking in both directions.
(s1,s2) = socket.socketpair()
def setup():
# run in the child process
s2.close()
e = None
if os.getuid() == 0:
argv_tries = argv_tries[-1:] # last entry only
for argv in argv_tries:
try:
if argv[0] == 'su':
sys.stderr.write('[local su] ')
self.p = ssubprocess.Popen(argv, stdout=s1, preexec_fn=setup)
e = None
break
except OSError, e:
pass
self.argv = argv
s1.close()
self.pfile = s2.makefile('wb+')
if e:
log('Spawning firewall manager: %r\n' % self.argv)
raise Fatal(e)
line = self.pfile.readline()
self.check()
if line != 'READY\n':
raise Fatal('%r expected READY, got %r' % (self.argv, line))
def check(self):
rv = self.p.poll()
if rv:
raise Fatal('%r returned %d' % (self.argv, rv))
def start(self):
self.pfile.write('ROUTES\n')
for (ip,width) in self.subnets_include+self.auto_nets:
self.pfile.write('%d,0,%s\n' % (width, ip))
for (ip,width) in self.subnets_exclude:
self.pfile.write('%d,1,%s\n' % (width, ip))
self.pfile.write('GO\n')
self.pfile.flush()
line = self.pfile.readline()
self.check()
if line != 'STARTED\n':
raise Fatal('%r expected STARTED, got %r' % (self.argv, line))
def sethostip(self, hostname, ip):
assert(not re.search(r'[^-\w]', hostname))
assert(not re.search(r'[^0-9.]', ip))
self.pfile.write('HOST %s,%s\n' % (hostname, ip))
self.pfile.flush()
def done(self):
self.pfile.close()
rv = self.p.wait()
if rv:
raise Fatal('cleanup: %r returned %d' % (self.argv, rv))
def _main(listener, fw, ssh_cmd, remotename, python, latency_control,
dnslistener, seed_hosts, auto_nets,
syslog, daemon):
handlers = []
if helpers.verbose >= 1:
helpers.logprefix = 'c : '
else:
helpers.logprefix = 'client: '
debug1('connecting to server...\n')
try:
(serverproc, serversock) = ssh.connect(ssh_cmd, remotename, python,
stderr=ssyslog._p and ssyslog._p.stdin,
options=dict(latency_control=latency_control))
except socket.error, e:
if e.args[0] == errno.EPIPE:
raise Fatal("failed to establish ssh session (1)")
else:
raise
mux = Mux(serversock, serversock)
handlers.append(mux)
expected = 'SSHUTTLE0001'
try:
initstring = serversock.recv(len(expected))
except socket.error, e:
if e.args[0] == errno.ECONNRESET:
raise Fatal("failed to establish ssh session (2)")
else:
raise
rv = serverproc.poll()
if rv:
raise Fatal('server died with error code %d' % rv)
if initstring != expected:
raise Fatal('expected server init string %r; got %r'
% (expected, initstring))
debug1('connected.\n')
print 'Connected.'
sys.stdout.flush()
if daemon:
daemonize()
log('daemonizing (%s).\n' % _pidname)
elif syslog:
debug1('switching to syslog.\n')
ssyslog.stderr_to_syslog()
def onroutes(routestr):
if auto_nets:
for line in routestr.strip().split('\n'):
(ip,width) = line.split(',', 1)
fw.auto_nets.append((ip,int(width)))
# we definitely want to do this *after* starting ssh, or we might end
# up intercepting the ssh connection!
#
# Moreover, now that we have the --auto-nets option, we have to wait
# for the server to send us that message anyway. Even if we haven't
# set --auto-nets, we might as well wait for the message first, then
# ignore its contents.
mux.got_routes = None
fw.start()
mux.got_routes = onroutes
def onhostlist(hostlist):
debug2('got host list: %r\n' % hostlist)
for line in hostlist.strip().split():
if line:
name,ip = line.split(',', 1)
fw.sethostip(name, ip)
mux.got_host_list = onhostlist
def onaccept():
global _extra_fd
try:
sock,srcip = listener.accept()
except socket.error, e:
if e.args[0] in [errno.EMFILE, errno.ENFILE]:
debug1('Rejected incoming connection: too many open files!\n')
# free up an fd so we can eat the connection
os.close(_extra_fd)
try:
sock,srcip = listener.accept()
sock.close()
finally:
_extra_fd = os.open('/dev/null', os.O_RDONLY)
return
else:
raise
dstip = original_dst(sock)
debug1('Accept: %s:%r -> %s:%r.\n' % (srcip[0],srcip[1],
dstip[0],dstip[1]))
if dstip[1] == listener.getsockname()[1] and islocal(dstip[0]):
debug1("-- ignored: that's my address!\n")
sock.close()
return
chan = mux.next_channel()
if not chan:
log('warning: too many open channels. Discarded connection.\n')
sock.close()
return
mux.send(chan, ssnet.CMD_CONNECT, '%s,%s' % dstip)
outwrap = MuxWrapper(mux, chan)
handlers.append(Proxy(SockWrapper(sock, sock), outwrap))
handlers.append(Handler([listener], onaccept))
dnsreqs = {}
def dns_done(chan, data):
peer,timeout = dnsreqs.get(chan) or (None,None)
debug3('dns_done: channel=%r peer=%r\n' % (chan, peer))
if peer:
del dnsreqs[chan]
debug3('doing sendto %r\n' % (peer,))
dnslistener.sendto(data, peer)
def ondns():
pkt,peer = dnslistener.recvfrom(4096)
now = time.time()
if pkt:
debug1('DNS request from %r: %d bytes\n' % (peer, len(pkt)))
chan = mux.next_channel()
dnsreqs[chan] = peer,now+30
mux.send(chan, ssnet.CMD_DNS_REQ, pkt)
mux.channels[chan] = lambda cmd,data: dns_done(chan,data)
for chan,(peer,timeout) in dnsreqs.items():
if timeout < now:
del dnsreqs[chan]
debug3('Remaining DNS requests: %d\n' % len(dnsreqs))
if dnslistener:
handlers.append(Handler([dnslistener], ondns))
if seed_hosts != None:
debug1('seed_hosts: %r\n' % seed_hosts)
mux.send(0, ssnet.CMD_HOST_REQ, '\n'.join(seed_hosts))
while 1:
rv = serverproc.poll()
if rv:
raise Fatal('server died with error code %d' % rv)
ssnet.runonce(handlers, mux)
if latency_control:
mux.check_fullness()
mux.callback()
def main(listenip, ssh_cmd, remotename, python, latency_control, dns,
seed_hosts, auto_nets,
subnets_include, subnets_exclude, syslog, daemon, pidfile):
if syslog:
ssyslog.start_syslog()
if daemon:
try:
check_daemon(pidfile)
except Fatal, e:
log("%s\n" % e)
return 5
debug1('Starting sshuttle proxy.\n')
if listenip[1]:
ports = [listenip[1]]
else:
ports = xrange(12300,9000,-1)
last_e = None
bound = False
debug2('Binding:')
for port in ports:
debug2(' %d' % port)
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
dnslistener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dnslistener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
listener.bind((listenip[0], port))
dnslistener.bind((listenip[0], port))
bound = True
break
except socket.error, e:
last_e = e
debug2('\n')
if not bound:
assert(last_e)
raise last_e
listener.listen(10)
listenip = listener.getsockname()
debug1('Listening on %r.\n' % (listenip,))
if dns:
dnsip = dnslistener.getsockname()
debug1('DNS listening on %r.\n' % (dnsip,))
dnsport = dnsip[1]
else:
dnsport = 0
dnslistener = None
fw = FirewallClient(listenip[1], subnets_include, subnets_exclude, dnsport)
try:
return _main(listener, fw, ssh_cmd, remotename,
python, latency_control, dnslistener,
seed_hosts, auto_nets, syslog, daemon)
finally:
try:
if daemon:
# it's not our child anymore; can't waitpid
fw.p.returncode = 0
fw.done()
finally:
if daemon:
daemon_cleanup()

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +0,0 @@
exec >&2
if pandoc </dev/null 2>/dev/null; then
pandoc -s -r markdown -w man -o $3 $1.md
else
echo "Warning: pandoc not installed; can't generate manpages."
redo-always
fi

150
do
View File

@ -1,150 +0,0 @@
#!/bin/sh
#
# A minimal alternative to djb redo that doesn't support incremental builds.
# For the full version, visit http://github.com/apenwarr/redo
#
# The author disclaims copyright to this source file and hereby places it in
# the public domain. (2010 12 14)
#
# By default, no output coloring.
GREEN=""
BOLD=""
PLAIN=""
if [ -n "$TERM" -a "$TERM" != "dumb" ] && tty <&2 >/dev/null 2>&1; then
GREEN="$(printf '\033[32m')"
BOLD="$(printf '\033[1m')"
PLAIN="$(printf '\033[m')"
fi
_dirsplit()
{
base=${1##*/}
dir=${1%$base}
}
_dirsplit "$0"
export REDO=$(cd "${dir:-.}" && echo "$PWD/$base")
DO_TOP=
if [ -z "$DO_BUILT" ]; then
DO_TOP=1
[ -n "$*" ] || set all # only toplevel redo has a default target
export DO_BUILT=$PWD/.do_built
: >>"$DO_BUILT"
echo "Removing previously built files..." >&2
sort -u "$DO_BUILT" | tee "$DO_BUILT.new" |
while read f; do printf "%s\0%s.did\0" "$f" "$f"; done |
xargs -0 rm -f 2>/dev/null
mv "$DO_BUILT.new" "$DO_BUILT"
DO_PATH=$DO_BUILT.dir
export PATH=$DO_PATH:$PATH
rm -rf "$DO_PATH"
mkdir "$DO_PATH"
for d in redo redo-ifchange; do
ln -s "$REDO" "$DO_PATH/$d";
done
[ -e /bin/true ] && TRUE=/bin/true || TRUE=/usr/bin/true
for d in redo-ifcreate redo-stamp redo-always; do
ln -s $TRUE "$DO_PATH/$d";
done
fi
_find_dofile_pwd()
{
DOFILE=default.$1.do
while :; do
DOFILE=default.${DOFILE#default.*.}
[ -e "$DOFILE" -o "$DOFILE" = default.do ] && break
done
EXT=${DOFILE#default}
EXT=${EXT%.do}
BASE=${1%$EXT}
}
_find_dofile()
{
PREFIX=
while :; do
_find_dofile_pwd "$1"
[ -e "$DOFILE" ] && break
[ "$PWD" = "/" ] && break
TARGET=${PWD##*/}/$TARGET
PREFIX=${PWD##*/}/$PREFIX
cd ..
done
BASE=$PREFIX$BASE
}
_run_dofile()
{
export DO_DEPTH="$DO_DEPTH "
export REDO_TARGET=$PWD/$TARGET
set -e
read line1 <"$PWD/$DOFILE"
cmd=${line1#"#!/"}
if [ "$cmd" != "$line1" ]; then
/$cmd "$PWD/$DOFILE" "$@" >"$TARGET.tmp2"
else
. "$PWD/$DOFILE" >"$TARGET.tmp2"
fi
}
_do()
{
DIR=$1
TARGET=$2
if [ ! -e "$TARGET" ] || [ -e "$TARGET/." -a ! -e "$TARGET.did" ]; then
printf '%sdo %s%s%s%s\n' \
"$GREEN" "$DO_DEPTH" "$BOLD" "$DIR$TARGET" "$PLAIN" >&2
echo "$PWD/$TARGET" >>"$DO_BUILT"
DOFILE=$TARGET.do
BASE=$TARGET
EXT=
[ -e "$TARGET.do" ] || _find_dofile "$TARGET"
if [ ! -e "$DOFILE" ]; then
echo "do: $TARGET: no .do file" >&2
return 1
fi
[ ! -e "$DO_BUILD" ] || : >>"$TARGET.did"
( _run_dofile "$BASE" "$EXT" "$TARGET.tmp" )
RV=$?
if [ $RV != 0 ]; then
printf "do: %s%s\n" "$DO_DEPTH" \
"$DIR$TARGET: got exit code $RV" >&2
rm -f "$TARGET.tmp" "$TARGET.tmp2"
return $RV
fi
mv "$TARGET.tmp" "$TARGET" 2>/dev/null ||
! test -s "$TARGET.tmp2" ||
mv "$TARGET.tmp2" "$TARGET" 2>/dev/null
rm -f "$TARGET.tmp2"
else
echo "do $DO_DEPTH$TARGET exists." >&2
fi
}
redo()
{
for i in "$@"; do
_dirsplit "$i"
( cd "$dir" && _do "$dir" "$base" ) || return 1
done
}
set -e
redo "$@"
if [ -n "$DO_TOP" ]; then
echo "Removing stamp files..." >&2
[ ! -e "$DO_BUILT" ] ||
while read f; do printf "%s.did\0" "$f"; done <"$DO_BUILT" |
xargs -0 rm -f 2>/dev/null
fi

177
docs/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sshuttle.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sshuttle.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/sshuttle"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sshuttle"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

4
docs/changes.rst Normal file
View File

@ -0,0 +1,4 @@
Changelog
---------
.. include:: ../CHANGES.rst

260
docs/conf.py Normal file
View File

@ -0,0 +1,260 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sshuttle documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 17 12:13:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sshuttle'
copyright = '2016, Brian May'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from setuptools_scm import get_version
version = get_version(root="..")
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sshuttledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sshuttle.tex', 'sshuttle documentation', 'Brian May', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('manpage', 'sshuttle', 'sshuttle documentation', ['Brian May'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sshuttle', 'sshuttle documentation',
'Brian May', 'sshuttle', 'A transparent proxy-based VPN using ssh',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False

37
docs/how-it-works.rst Normal file
View File

@ -0,0 +1,37 @@
How it works
============
sshuttle is not exactly a VPN, and not exactly port forwarding. It's kind
of both, and kind of neither.
It's like a VPN, since it can forward every port on an entire network, not
just ports you specify. Conveniently, it lets you use the "real" IP
addresses of each host rather than faking port numbers on localhost.
On the other hand, the way it *works* is more like ssh port forwarding than
a VPN. Normally, a VPN forwards your data one packet at a time, and
doesn't care about individual connections; ie. it's "stateless" with respect
to the traffic. sshuttle is the opposite of stateless; it tracks every
single connection.
You could compare sshuttle to something like the old `Slirp
<http://en.wikipedia.org/wiki/Slirp>`_ program, which was a userspace TCP/IP
implementation that did something similar. But it operated on a
packet-by-packet basis on the client side, reassembling the packets on the
server side. That worked okay back in the "real live serial port" days,
because serial ports had predictable latency and buffering.
But you can't safely just forward TCP packets over a TCP session (like ssh),
because TCP's performance depends fundamentally on packet loss; it
*must* experience packet loss in order to know when to slow down! At
the same time, the outer TCP session (ssh, in this case) is a reliable
transport, which means that what you forward through the tunnel *never*
experiences packet loss. The ssh session itself experiences packet loss, of
course, but TCP fixes it up and ssh (and thus you) never know the
difference. But neither does your inner TCP session, and extremely screwy
performance ensues.
sshuttle assembles the TCP stream locally, multiplexes it statefully over
an ssh session, and disassembles it back into packets at the other end. So
it never ends up doing TCP-over-TCP. It's just data-over-TCP, which is
safe.

29
docs/index.rst Normal file
View File

@ -0,0 +1,29 @@
sshuttle: where transparent proxy meets VPN meets ssh
=====================================================
:Date: |today|
:Version: |version|
Contents:
.. toctree::
:maxdepth: 2
overview
requirements
installation
usage
platform
Man Page <manpage>
how-it-works
support
trivia
changes
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

11
docs/installation.rst Normal file
View File

@ -0,0 +1,11 @@
Installation
============
- From PyPI::
pip install sshuttle
- Clone::
git clone https://github.com/sshuttle/sshuttle.git
./setup.py install

242
docs/make.bat Normal file
View File

@ -0,0 +1,242 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\sshuttle.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\sshuttle.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end

View File

@ -1,174 +1,206 @@
% sshuttle(8) Sshuttle 0.46
% Avery Pennarun <apenwarr@gmail.com>
% 2011-01-25
# NAME
sshuttle - a transparent proxy-based VPN using ssh
# SYNOPSIS
sshuttle [options...] [-r [username@]sshserver[:port]] \<subnets...\>
sshuttle
========
# DESCRIPTION
Synopsis
--------
**sshuttle** [*options*] [**-r** *[username@]sshserver[:port]*] \<*subnets* ...\>
sshuttle allows you to create a VPN connection from your
Description
-----------
:program:`sshuttle` allows you to create a VPN connection from your
machine to any remote server that you can connect to via
ssh, as long as that server has python 2.3 or higher.
To work, you must have root access on the local machine,
but you can have a normal account on the server.
It's valid to run sshuttle more than once simultaneously on
It's valid to run :program:`sshuttle` more than once simultaneously on
a single client machine, connecting to a different server
every time, so you can be on more than one VPN at once.
If run on a router, sshuttle can forward traffic for your
If run on a router, :program:`sshuttle` can forward traffic for your
entire subnet to the VPN.
# OPTIONS
Options
-------
.. program:: sshuttle
\<subnets...\>
: a list of subnets to route over the VPN, in the form
`a.b.c.d[/width]`. Valid examples are 1.2.3.4 (a
.. option:: subnets
A list of subnets to route over the VPN, in the form
``a.b.c.d[/width]``. Valid examples are 1.2.3.4 (a
single IP address), 1.2.3.4/32 (equivalent to 1.2.3.4),
1.2.3.0/24 (a 24-bit subnet, ie. with a 255.255.255.0
netmask), and 0/0 ('just route everything through the
VPN').
-l, --listen=*[ip:]port*
: use this ip address and port number as the transparent
proxy port. By default sshuttle finds an available
.. option:: --method [auto|nat|tproxy|pf]
Which firewall method should sshuttle use? For auto, sshuttle attempts to
guess the appropriate method depending on what it can find in PATH. The
default value is auto.
.. option:: -l, --listen=[ip:]port
Use this ip address and port number as the transparent
proxy port. By default :program:`sshuttle` finds an available
port automatically and listens on IP 127.0.0.1
(localhost), so you don't need to override it, and
connections are only proxied from the local machine,
not from outside machines. If you want to accept
connections from other machines on your network (ie. to
run sshuttle on a router) try enabling IP Forwarding in
your kernel, then using `--listen 0.0.0.0:0`.
run :program:`sshuttle` on a router) try enabling IP Forwarding in
your kernel, then using ``--listen 0.0.0.0:0``.
-H, --auto-hosts
: scan for remote hostnames and update the local /etc/hosts
For the tproxy method this can be an IPv6 address. Use this option twice if
required, to provide both IPv4 and IPv6 addresses.
.. option:: -H, --auto-hosts
Scan for remote hostnames and update the local /etc/hosts
file with matching entries for as long as the VPN is
open. This is nicer than changing your system's DNS
(/etc/resolv.conf) settings, for several reasons. First,
hostnames are added without domain names attached, so
you can `ssh thatserver` without worrying if your local
domain matches the remote one. Second, if you sshuttle
you can ``ssh thatserver`` without worrying if your local
domain matches the remote one. Second, if you :program:`sshuttle`
into more than one VPN at a time, it's impossible to
use more than one DNS server at once anyway, but
sshuttle correctly merges /etc/hosts entries between
:program:`sshuttle` correctly merges /etc/hosts entries between
all running copies. Third, if you're only routing a
few subnets over the VPN, you probably would prefer to
keep using your local DNS server for everything else.
-N, --auto-nets
: in addition to the subnets provided on the command
.. option:: -N, --auto-nets
In addition to the subnets provided on the command
line, ask the server which subnets it thinks we should
route, and route those automatically. The suggestions
are taken automatically from the server's routing
table.
--python
: specify the name/path of the remote python interpreter.
The default is just `python`, which means to use the
.. option:: --dns
Capture local DNS requests and forward to the remote DNS
server.
.. option:: --python
Specify the name/path of the remote python interpreter.
The default is just ``python``, which means to use the
default python interpreter on the remote system's PATH.
-r, --remote=*[username@]sshserver[:port]*
: the remote hostname and optional username and ssh
port number to use for connecting to the remote server.
.. option:: -r, --remote=[username@]sshserver[:port]
The remote hostname and optional username and ssh
port number to use for connecting to the remote server.
For example, example.com, testuser@example.com,
testuser@example.com:2222, or example.com:2244.
-x, --exclude=*subnet*
: explicitly exclude this subnet from forwarding. The
format of this option is the same as the `<subnets>`
.. option:: -x, --exclude=subnet
Explicitly exclude this subnet from forwarding. The
format of this option is the same as the ``<subnets>``
option. To exclude more than one subnet, specify the
`-x` option more than once. You can say something like
`0/0 -x 1.2.3.0/24` to forward everything except the
``-x`` option more than once. You can say something like
``0/0 -x 1.2.3.0/24`` to forward everything except the
local subnet over the VPN, for example.
-v, --verbose
: print more information about the session. This option
can be used more than once for increased verbosity. By
default, sshuttle prints only error messages.
-e, --ssh-cmd
: the command to use to connect to the remote server. The
default is just `ssh`. Use this if your ssh client is
in a non-standard location or you want to provide extra
options to the ssh command, for example, `-e 'ssh -v'`.
.. option:: -X, --exclude-from=file
--seed-hosts
: a comma-separated list of hostnames to use to
initialize the `--auto-hosts` scan algorithm.
`--auto-hosts` does things like poll local SMB servers
Exclude the subnets specified in a file, one subnet per
line. Useful when you have lots of subnets to exclude.
.. option:: -v, --verbose
Print more information about the session. This option
can be used more than once for increased verbosity. By
default, :program:`sshuttle` prints only error messages.
.. option:: -e, --ssh-cmd
The command to use to connect to the remote server. The
default is just ``ssh``. Use this if your ssh client is
in a non-standard location or you want to provide extra
options to the ssh command, for example, ``-e 'ssh -v'``.
.. option:: --seed-hosts
A comma-separated list of hostnames to use to
initialize the :option:`--auto-hosts` scan algorithm.
:option:`--auto-hosts` does things like poll local SMB servers
for lists of local hostnames, but can speed things up
if you use this option to give it a few names to start
from.
--no-latency-control
: sacrifice latency to improve bandwidth benchmarks. ssh
.. option:: --no-latency-control
Sacrifice latency to improve bandwidth benchmarks. ssh
uses really big socket buffers, which can overload the
connection if you start doing large file transfers,
thus making all your other sessions inside the same
tunnel go slowly. Normally, sshuttle tries to avoid
tunnel go slowly. Normally, :program:`sshuttle` tries to avoid
this problem using a "fullness check" that allows only
a certain amount of outstanding data to be buffered at
a time. But on high-bandwidth links, this can leave a
lot of your bandwidth underutilized. It also makes
sshuttle seem slow in bandwidth benchmarks (benchmarks
rarely test ping latency, which is what sshuttle is
:program:`sshuttle` seem slow in bandwidth benchmarks (benchmarks
rarely test ping latency, which is what :program:`sshuttle` is
trying to control). This option disables the latency
control feature, maximizing bandwidth usage. Use at
your own risk.
-D, --daemon
: automatically fork into the background after connecting
to the remote server. Implies `--syslog`.
--syslog
: after connecting, send all log messages to the
`syslog`(3) service instead of stderr. This is
implicit if you use `--daemon`.
--pidfile=*pidfilename*
: when using `--daemon`, save sshuttle's pid to
*pidfilename*. The default is `sshuttle.pid` in the
.. option:: -D, --daemon
Automatically fork into the background after connecting
to the remote server. Implies :option:`--syslog`.
.. option:: --syslog
after connecting, send all log messages to the
:manpage:`syslog(3)` service instead of stderr. This is
implicit if you use :option:`--daemon`.
.. option:: --pidfile=pidfilename
when using :option:`--daemon`, save :program:`sshuttle`'s pid to
*pidfilename*. The default is ``sshuttle.pid`` in the
current directory.
--server
: (internal use only) run the sshuttle server on
stdin/stdout. This is what the client runs on
the remote end.
.. option:: --disable-ipv6
--firewall
: (internal use only) run the firewall manager. This is
the only part of sshuttle that must run as root. If
you start sshuttle as a non-root user, it will
automatically run `sudo` or `su` to start the firewall
manager, but the core of sshuttle still runs as a
If using the tproxy method, this will disable IPv6 support.
.. option:: --firewall
(internal use only) run the firewall manager. This is
the only part of :program:`sshuttle` that must run as root. If
you start :program:`sshuttle` as a non-root user, it will
automatically run ``sudo`` or ``su`` to start the firewall
manager, but the core of :program:`sshuttle` still runs as a
normal user.
--hostwatch
: (internal use only) run the hostwatch daemon. This
.. option:: --hostwatch
(internal use only) run the hostwatch daemon. This
process runs on the server side and collects hostnames for
the `--auto-hosts` option. Using this option by itself
makes it a lot easier to debug and test the `--auto-hosts`
the :option:`--auto-hosts` option. Using this option by itself
makes it a lot easier to debug and test the :option:`--auto-hosts`
feature.
# EXAMPLES
Test locally by proxying all local connections, without using ssh:
Examples
--------
Test locally by proxying all local connections, without using ssh::
$ sshuttle -v 0/0
Starting sshuttle proxy.
Listening on ('0.0.0.0', 12300).
[local sudo] Password:
[local sudo] Password:
firewall manager ready.
c : connecting to server...
s: available routes:
@ -186,7 +218,7 @@ Test locally by proxying all local connections, without using ssh:
c : SW#6:192.168.42.106:50035: deleting
Test connection to a remote server, with automatic hostname
and subnet guessing:
and subnet guessing::
$ sshuttle -vNHr example.org
@ -209,22 +241,22 @@ and subnet guessing:
c : SW#6:192.168.42.121:60554: deleting
# DISCUSSION
When it starts, sshuttle creates an ssh session to the
server specified by the `-r` option. If `-r` is omitted,
Discussion
----------
When it starts, :program:`sshuttle` creates an ssh session to the
server specified by the ``-r`` option. If ``-r`` is omitted,
it will start both its client and server locally, which is
sometimes useful for testing.
After connecting to the remote server, sshuttle uploads its
After connecting to the remote server, :program:`sshuttle` uploads its
(python) source code to the remote end and executes it
there. Thus, you don't need to install sshuttle on the
remote server, and there are never sshuttle version
there. Thus, you don't need to install :program:`sshuttle` on the
remote server, and there are never :program:`sshuttle` version
conflicts between client and server.
Unlike most VPNs, sshuttle forwards sessions, not packets.
Unlike most VPNs, :program:`sshuttle` forwards sessions, not packets.
That is, it uses kernel transparent proxying (`iptables
REDIRECT` rules on Linux, or `ipfw fwd` rules on BSD) to
REDIRECT` rules on Linux) to
capture outgoing TCP sessions, then creates entirely
separate TCP sessions out to the original destination at
the other end of the tunnel.
@ -243,7 +275,7 @@ tcp-based encrypted streams like ssh or ssl, and have to
implement their own encryption from scratch, which is very
complex and error prone.
sshuttle's simplicity comes from the fact that it can
:program:`sshuttle`'s simplicity comes from the fact that it can
safely use the existing ssh encrypted tunnel without
incurring a performance penalty. It does this by letting
the client-side kernel manage the incoming tcp stream, and
@ -252,19 +284,6 @@ there is no need for congestion control to be shared
between the two separate streams, so a tcp-based tunnel is
fine.
.. seealso::
# BUGS
On MacOS 10.6 (at least up to 10.6.6), your network will
stop responding about 10 minutes after the first time you
start sshuttle, because of a MacOS kernel bug relating to
arp and the net.inet.ip.scopedroute sysctl. To fix it,
just switch your wireless off and on. Sshuttle makes the
kernel setting it changes permanent, so this won't happen
again, even after a reboot.
# SEE ALSO
`ssh`(1), `python`(1)
:manpage:`ssh(1)`, :manpage:`python(1)`

26
docs/overview.rst Normal file
View File

@ -0,0 +1,26 @@
Overview
========
As far as I know, sshuttle is the only program that solves the following
common case:
- Your client machine (or router) is Linux, FreeBSD, or MacOS.
- You have access to a remote network via ssh.
- You don't necessarily have admin access on the remote network.
- The remote network has no VPN, or only stupid/complex VPN
protocols (IPsec, PPTP, etc). Or maybe you *are* the
admin and you just got frustrated with the awful state of
VPN tools.
- You don't want to create an ssh port forward for every
single host/port on the remote network.
- You hate openssh's port forwarding because it's randomly
slow and/or stupid.
- You can't use openssh's PermitTunnel feature because
it's disabled by default on openssh servers; plus it does
TCP-over-TCP, which has terrible performance (see below).

10
docs/platform.rst Normal file
View File

@ -0,0 +1,10 @@
Platform Specific Notes
=======================
Contents:
.. toctree::
:maxdepth: 2
tproxy
windows

72
docs/requirements.rst Normal file
View File

@ -0,0 +1,72 @@
Requirements
============
Client side Requirements
------------------------
- sudo, or root access on your client machine.
(The server doesn't need admin access.)
- Python 2.7 or Python 3.5.
Linux with NAT method
~~~~~~~~~~~~~~~~~~~~~
Supports:
* IPv4 TCP
* IPv4 DNS
Requires:
* iptables DNAT, REDIRECT, and ttl modules.
Linux with TPROXY method
~~~~~~~~~~~~~~~~~~~~~~~~
Supports:
* IPv4 TCP
* IPv4 UDP (requires ``recmsg`` - see below)
* IPv6 DNS (requires ``recmsg`` - see below)
* IPv6 TCP
* IPv6 UDP (requires ``recmsg`` - see below)
* IPv6 DNS (requires ``recmsg`` - see below)
.. _PyXAPI: http://www.pps.univ-paris-diderot.fr/~ylg/PyXAPI/
Full UDP or DNS support with the TPROXY method requires the ``recvmsg()``
syscall. This is not available in Python 2, however is in Python 3.5 and
later. Under Python 2 you might find it sufficient installing PyXAPI_ to get
the ``recvmsg()`` function. See :doc:`tproxy` for more information.
MacOS / FreeBSD / OpenBSD
~~~~~~~~~~~~~~~~~~~~~~~~~
Method: pf
Supports:
* IPv4 TCP
* IPv4 DNS
Requires:
* You need to have the pfctl command.
Windows
~~~~~~~
Not officially supported, however can be made to work with Vagrant. Requires
cmd.exe with Administrator access. See :doc:`windows` for more information.
Server side Requirements
------------------------
Python 2.7 or Python 3.5.
Additional Suggested Software
-----------------------------
- You may want to use autossh, available in various package management
systems

11
docs/support.rst Normal file
View File

@ -0,0 +1,11 @@
Support
=======
Mailing list:
* Subscribe by sending a message to <sshuttle+subscribe@googlegroups.com>
* List archives are at: http://groups.google.com/group/sshuttle
Issue tracker and pull requests at github:
* https://github.com/sshuttle/sshuttle

42
docs/tproxy.rst Normal file
View File

@ -0,0 +1,42 @@
TPROXY
======
TPROXY is the only method that has full support of IPv6 and UDP.
There are some things you need to consider for TPROXY to work:
- The following commands need to be run first as root. This only needs to be
done once after booting up::
ip route add local default dev lo table 100
ip rule add fwmark 1 lookup 100
ip -6 route add local default dev lo table 100
ip -6 rule add fwmark 1 lookup 100
- The ``--auto-nets`` feature does not detect IPv6 routes automatically. Add IPv6
routes manually. e.g. by adding ``'::/0'`` to the end of the command line.
- The client needs to be run as root. e.g.::
sudo SSH_AUTH_SOCK="$SSH_AUTH_SOCK" $HOME/tree/sshuttle.tproxy/sshuttle --method=tproxy ...
- You may need to exclude the IP address of the server you are connecting to.
Otherwise sshuttle may attempt to intercept the ssh packets, which will not
work. Use the ``--exclude`` parameter for this.
- Similarly, UDP return packets (including DNS) could get intercepted and
bounced back. This is the case if you have a broad subnet such as
``0.0.0.0/0`` or ``::/0`` that includes the IP address of the client. Use the
``--exclude`` parameter for this.
- You need the ``--method=tproxy`` parameter, as above.
- The routes for the outgoing packets must already exist. For example, if your
connection does not have IPv6 support, no IPv6 routes will exist, IPv6
packets will not be generated and sshuttle cannot intercept them::
telnet -6 www.google.com 80
Trying 2404:6800:4001:805::1010...
telnet: Unable to connect to remote host: Network is unreachable
Add some dummy routes to external interfaces. Make sure they get removed
however after sshuttle exits.

36
docs/trivia.rst Normal file
View File

@ -0,0 +1,36 @@
Useless Trivia
==============
This section written by the original author, Avery Pennarun
<apenwarr@gmail.com>.
Back in 1998, I released the first version of `Tunnel
Vision <http://alumnit.ca/wiki/?TunnelVisionReadMe>`_, a semi-intelligent VPN
client for Linux. Unfortunately, I made two big mistakes: I implemented the
key exchange myself (oops), and I ended up doing TCP-over-TCP (double oops).
The resulting program worked okay - and people used it for years - but the
performance was always a bit funny. And nobody ever found any security flaws
in my key exchange, either, but that doesn't mean anything. :)
The same year, dcoombs and I also released Fast Forward, a proxy server
supporting transparent proxying. Among other things, we used it for
automatically splitting traffic across more than one Internet connection (a
tool we called "Double Vision").
I was still in university at the time. A couple years after that, one of my
professors was working with some graduate students on the technology that would
eventually become `Slipstream Internet Acceleration
<http://www.slipstream.com/>`_. He asked me to do a contract for him to build
an initial prototype of a transparent proxy server for mobile networks. The
idea was similar to sshuttle: if you reassemble and then disassemble the TCP
packets, you can reduce latency and improve performance vs. just forwarding
the packets over a plain VPN or mobile network. (It's unlikely that any of my
code has persisted in the Slipstream product today, but the concept is still
pretty cool. I'm still horrified that people use plain TCP on complex mobile
networks with crazily variable latency, for which it was never really
intended.)
That project I did for Slipstream was what first gave me the idea to merge
the concepts of Fast Forward, Double Vision, and Tunnel Vision into a single
program that was the best of all worlds. And here we are, at last.
You're welcome.

62
docs/usage.rst Normal file
View File

@ -0,0 +1,62 @@
Usage
=====
.. note::
For information on usage with Windows, see the :doc:`windows` section.
For information on using the TProxy method, see the :doc:`tproxy` section.
Forward all traffic::
sshuttle -r username@sshserver 0.0.0.0/0
- Use the :option:`sshuttle -r` parameter to specify a remote server.
- By default sshuttle will automatically choose a method to use. Override with
the :option:`sshuttle --method` parameter.
- There is a shortcut for 0.0.0.0/0 for those that value
their wrists::
sshuttle -r username@sshserver 0/0
If you would also like your DNS queries to be proxied
through the DNS server of the server you are connect to::
sshuttle --dns -r username@sshserver 0/0
The above is probably what you want to use to prevent
local network attacks such as Firesheep and friends.
See the documentation for the :option:`sshuttle --dns` parameter.
(You may be prompted for one or more passwords; first, the local password to
become root using sudo, and then the remote ssh password. Or you might have
sudo and ssh set up to not require passwords, in which case you won't be
prompted at all.)
Usage Notes
-----------
That's it! Now your local machine can access the remote network as if you
were right there. And if your "client" machine is a router, everyone on
your local network can make connections to your remote network.
You don't need to install sshuttle on the remote server;
the remote server just needs to have python available.
sshuttle will automatically upload and run its source code
to the remote python interpreter.
This creates a transparent proxy server on your local machine for all IP
addresses that match 0.0.0.0/0. (You can use more specific IP addresses if
you want; use any number of IP addresses or subnets to change which
addresses get proxied. Using 0.0.0.0/0 proxies *everything*, which is
interesting if you don't trust the people on your local network.)
Any TCP session you initiate to one of the proxied IP addresses will be
captured by sshuttle and sent over an ssh session to the remote copy of
sshuttle, which will then regenerate the connection on that end, and funnel
the data back and forth through ssh.
Fun, right? A poor man's instant VPN, and you don't even have to have
admin access on the server.

19
docs/windows.rst Normal file
View File

@ -0,0 +1,19 @@
Microsoft Windows
=================
Currently there is no built in support for running sshuttle directly on
Microsoft Windows.
What we can really do is to create a Linux VM with Vagrant (or simply
Virtualbox if you like). In the Vagrant settings, remember to turn on bridged
NIC. Then, run sshuttle inside the VM like below::
sshuttle -l 0.0.0.0 -x 10.0.0.0/8 -x 192.168.0.0/16 0/0
10.0.0.0/8 excludes NAT traffic of Vagrant and 192.168.0.0/16 excludes
traffic to local area network (assuming that we're using 192.168.0.0 subnet).
Assuming the VM has the IP 192.168.1.200 obtained on the bridge NIC (we can
configure that in Vagrant), we can then ask Windows to route all its traffic
via the VM by running the following in cmd.exe with admin right::
route add 0.0.0.0 mask 0.0.0.0 192.168.1.200

View File

@ -1,460 +0,0 @@
import re, errno, socket, select, struct
import compat.ssubprocess as ssubprocess
import helpers, ssyslog
from helpers import *
# python doesn't have a definition for this
IPPROTO_DIVERT = 254
def nonfatal(func, *args):
try:
func(*args)
except Fatal, e:
log('error: %s\n' % e)
def ipt_chain_exists(name):
argv = ['iptables', '-t', 'nat', '-nL']
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
for line in p.stdout:
if line.startswith('Chain %s ' % name):
return True
rv = p.wait()
if rv:
raise Fatal('%r returned %d' % (argv, rv))
def ipt(*args):
argv = ['iptables', '-t', 'nat'] + list(args)
debug1('>> %s\n' % ' '.join(argv))
rv = ssubprocess.call(argv)
if rv:
raise Fatal('%r returned %d' % (argv, rv))
_no_ttl_module = False
def ipt_ttl(*args):
global _no_ttl_module
if not _no_ttl_module:
# we avoid infinite loops by generating server-side connections
# with ttl 42. This makes the client side not recapture those
# connections, in case client == server.
try:
argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42']
ipt(*argsplus)
except Fatal:
ipt(*args)
# we only get here if the non-ttl attempt succeeds
log('sshuttle: warning: your iptables is missing '
'the ttl module.\n')
_no_ttl_module = True
else:
ipt(*args)
# We name the chain based on the transproxy port number so that it's possible
# to run multiple copies of sshuttle at the same time. Of course, the
# multiple copies shouldn't have overlapping subnets, or only the most-
# recently-started one will win (because we use "-I OUTPUT 1" instead of
# "-A OUTPUT").
def do_iptables(port, dnsport, subnets):
chain = 'sshuttle-%s' % port
# basic cleanup/setup of chains
if ipt_chain_exists(chain):
nonfatal(ipt, '-D', 'OUTPUT', '-j', chain)
nonfatal(ipt, '-D', 'PREROUTING', '-j', chain)
nonfatal(ipt, '-F', chain)
ipt('-X', chain)
if subnets or dnsport:
ipt('-N', chain)
ipt('-F', chain)
ipt('-I', 'OUTPUT', '1', '-j', chain)
ipt('-I', 'PREROUTING', '1', '-j', chain)
if subnets:
# create new subnet entries. Note that we're sorting in a very
# particular order: we need to go from most-specific (largest swidth)
# to least-specific, and at any given level of specificity, we want
# excludes to come first. That's why the columns are in such a non-
# intuitive order.
for swidth,sexclude,snet in sorted(subnets, reverse=True):
if sexclude:
ipt('-A', chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet,swidth),
'-p', 'tcp')
else:
ipt_ttl('-A', chain, '-j', 'REDIRECT',
'--dest', '%s/%s' % (snet,swidth),
'-p', 'tcp',
'--to-ports', str(port))
if dnsport:
nslist = resolvconf_nameservers()
for ip in nslist:
ipt_ttl('-A', chain, '-j', 'REDIRECT',
'--dest', '%s/32' % ip,
'-p', 'udp',
'--dport', '53',
'--to-ports', str(dnsport))
def ipfw_rule_exists(n):
argv = ['ipfw', 'list']
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
found = False
for line in p.stdout:
if line.startswith('%05d ' % n):
if not ('ipttl 42' in line
or ('skipto %d' % (n+1)) in line
or 'check-state' in line):
log('non-sshuttle ipfw rule: %r\n' % line.strip())
raise Fatal('non-sshuttle ipfw rule #%d already exists!' % n)
found = True
rv = p.wait()
if rv:
raise Fatal('%r returned %d' % (argv, rv))
return found
_oldctls = {}
def _fill_oldctls(prefix):
argv = ['sysctl', prefix]
p = ssubprocess.Popen(argv, stdout = ssubprocess.PIPE)
for line in p.stdout:
assert(line[-1] == '\n')
(k,v) = line[:-1].split(': ', 1)
_oldctls[k] = v
rv = p.wait()
if rv:
raise Fatal('%r returned %d' % (argv, rv))
if not line:
raise Fatal('%r returned no data' % (argv,))
def _sysctl_set(name, val):
argv = ['sysctl', '-w', '%s=%s' % (name, val)]
debug1('>> %s\n' % ' '.join(argv))
return ssubprocess.call(argv, stdout = open('/dev/null', 'w'))
_changedctls = []
def sysctl_set(name, val, permanent=False):
PREFIX = 'net.inet.ip'
assert(name.startswith(PREFIX + '.'))
val = str(val)
if not _oldctls:
_fill_oldctls(PREFIX)
if not (name in _oldctls):
debug1('>> No such sysctl: %r\n' % name)
return False
oldval = _oldctls[name]
if val != oldval:
rv = _sysctl_set(name, val)
if rv==0 and permanent:
debug1('>> ...saving permanently in /etc/sysctl.conf\n')
f = open('/etc/sysctl.conf', 'a')
f.write('\n'
'# Added by sshuttle\n'
'%s=%s\n' % (name, val))
f.close()
else:
_changedctls.append(name)
return True
def _udp_unpack(p):
src = (socket.inet_ntoa(p[12:16]), struct.unpack('!H', p[20:22])[0])
dst = (socket.inet_ntoa(p[16:20]), struct.unpack('!H', p[22:24])[0])
return src, dst
def _udp_repack(p, src, dst):
addrs = socket.inet_aton(src[0]) + socket.inet_aton(dst[0])
ports = struct.pack('!HH', src[1], dst[1])
return p[:12] + addrs + ports + p[24:]
_real_dns_server = [None]
def _handle_diversion(divertsock, dnsport):
p,tag = divertsock.recvfrom(4096)
src,dst = _udp_unpack(p)
debug3('got diverted packet from %r to %r\n' % (src, dst))
if dst[1] == 53:
# outgoing DNS
debug3('...packet is a DNS request.\n')
_real_dns_server[0] = dst
dst = ('127.0.0.1', dnsport)
elif src[1] == dnsport:
if islocal(src[0]):
debug3('...packet is a DNS response.\n')
src = _real_dns_server[0]
else:
log('weird?! unexpected divert from %r to %r\n' % (src, dst))
assert(0)
newp = _udp_repack(p, src, dst)
divertsock.sendto(newp, tag)
def ipfw(*args):
argv = ['ipfw', '-q'] + list(args)
debug1('>> %s\n' % ' '.join(argv))
rv = ssubprocess.call(argv)
if rv:
raise Fatal('%r returned %d' % (argv, rv))
def do_ipfw(port, dnsport, subnets):
sport = str(port)
xsport = str(port+1)
# cleanup any existing rules
if ipfw_rule_exists(port):
ipfw('delete', sport)
while _changedctls:
name = _changedctls.pop()
oldval = _oldctls[name]
_sysctl_set(name, oldval)
if subnets or dnsport:
sysctl_set('net.inet.ip.fw.enable', 1)
changed = sysctl_set('net.inet.ip.scopedroute', 0, permanent=True)
if changed:
log("\n"
" WARNING: ONE-TIME NETWORK DISRUPTION:\n"
" =====================================\n"
"sshuttle has changed a MacOS kernel setting to work around\n"
"a bug in MacOS 10.6. This will cause your network to drop\n"
"within 5-10 minutes unless you restart your network\n"
"interface (change wireless networks or unplug/plug the\n"
"ethernet port) NOW, then restart sshuttle. The fix is\n"
"permanent; you only have to do this once.\n\n")
sys.exit(1)
ipfw('add', sport, 'check-state', 'ip',
'from', 'any', 'to', 'any')
if subnets:
# create new subnet entries
for swidth,sexclude,snet in sorted(subnets, reverse=True):
if sexclude:
ipfw('add', sport, 'skipto', xsport,
'log', 'tcp',
'from', 'any', 'to', '%s/%s' % (snet,swidth))
else:
ipfw('add', sport, 'fwd', '127.0.0.1,%d' % port,
'log', 'tcp',
'from', 'any', 'to', '%s/%s' % (snet,swidth),
'not', 'ipttl', '42', 'keep-state', 'setup')
# This part is much crazier than it is on Linux, because MacOS (at least
# 10.6, and probably other versions, and maybe FreeBSD too) doesn't
# correctly fixup the dstip/dstport for UDP packets when it puts them
# through a 'fwd' rule. It also doesn't fixup the srcip/srcport in the
# response packet. In Linux iptables, all that happens magically for us,
# so we just redirect the packets and relax.
#
# On MacOS, we have to fix the ports ourselves. For that, we use a
# 'divert' socket, which receives raw packets and lets us mangle them.
#
# Here's how it works. Let's say the local DNS server is 1.1.1.1:53,
# and the remote DNS server is 2.2.2.2:53, and the local transproxy port
# is 10.0.0.1:12300, and a client machine is making a request from
# 10.0.0.5:9999. We see a packet like this:
# 10.0.0.5:9999 -> 1.1.1.1:53
# Since the destip:port matches one of our local nameservers, it will
# match a 'fwd' rule, thus grabbing it on the local machine. However,
# the local kernel will then see a packet addressed to *:53 and
# not know what to do with it; there's nobody listening on port 53. Thus,
# we divert it, rewriting it into this:
# 10.0.0.5:9999 -> 10.0.0.1:12300
# This gets proxied out to the server, which sends it to 2.2.2.2:53,
# and the answer comes back, and the proxy sends it back out like this:
# 10.0.0.1:12300 -> 10.0.0.5:9999
# But that's wrong! The original machine expected an answer from
# 1.1.1.1:53, so we have to divert the *answer* and rewrite it:
# 1.1.1.1:53 -> 10.0.0.5:9999
#
# See? Easy stuff.
if dnsport:
divertsock = socket.socket(socket.AF_INET, socket.SOCK_RAW,
IPPROTO_DIVERT)
divertsock.bind(('0.0.0.0', port)) # IP field is ignored
nslist = resolvconf_nameservers()
for ip in nslist:
# relabel and then catch outgoing DNS requests
ipfw('add', sport, 'divert', sport,
'log', 'udp',
'from', 'any', 'to', '%s/32' % ip, '53',
'not', 'ipttl', '42')
# relabel DNS responses
ipfw('add', sport, 'divert', sport,
'log', 'udp',
'from', 'any', str(dnsport), 'to', 'any',
'not', 'ipttl', '42')
def do_wait():
while 1:
r,w,x = select.select([sys.stdin, divertsock], [], [])
if divertsock in r:
_handle_diversion(divertsock, dnsport)
if sys.stdin in r:
return
else:
do_wait = None
return do_wait
def program_exists(name):
paths = (os.getenv('PATH') or os.defpath).split(os.pathsep)
for p in paths:
fn = '%s/%s' % (p, name)
if os.path.exists(fn):
return not os.path.isdir(fn) and os.access(fn, os.X_OK)
hostmap = {}
def rewrite_etc_hosts(port):
HOSTSFILE='/etc/hosts'
BAKFILE='%s.sbak' % HOSTSFILE
APPEND='# sshuttle-firewall-%d AUTOCREATED' % port
old_content = ''
st = None
try:
old_content = open(HOSTSFILE).read()
st = os.stat(HOSTSFILE)
except IOError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
if old_content.strip() and not os.path.exists(BAKFILE):
os.link(HOSTSFILE, BAKFILE)
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
f = open(tmpname, 'w')
for line in old_content.rstrip().split('\n'):
if line.find(APPEND) >= 0:
continue
f.write('%s\n' % line)
for (name,ip) in sorted(hostmap.items()):
f.write('%-30s %s\n' % ('%s %s' % (ip,name), APPEND))
f.close()
if st:
os.chown(tmpname, st.st_uid, st.st_gid)
os.chmod(tmpname, st.st_mode)
else:
os.chown(tmpname, 0, 0)
os.chmod(tmpname, 0644)
os.rename(tmpname, HOSTSFILE)
def restore_etc_hosts(port):
global hostmap
hostmap = {}
rewrite_etc_hosts(port)
# This is some voodoo for setting up the kernel's transparent
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
# otherwise we delete it, then make them from scratch.
#
# This code is supposed to clean up after itself by deleting its rules on
# exit. In case that fails, it's not the end of the world; future runs will
# supercede it in the transproxy list, at least, so the leftover rules
# are hopefully harmless.
def main(port, dnsport, syslog):
assert(port > 0)
assert(port <= 65535)
assert(dnsport >= 0)
assert(dnsport <= 65535)
if os.getuid() != 0:
raise Fatal('you must be root (or enable su/sudo) to set the firewall')
if program_exists('ipfw'):
do_it = do_ipfw
elif program_exists('iptables'):
do_it = do_iptables
else:
raise Fatal("can't find either ipfw or iptables; check your PATH")
# because of limitations of the 'su' command, the *real* stdin/stdout
# are both attached to stdout initially. Clone stdout into stdin so we
# can read from it.
os.dup2(1, 0)
if syslog:
ssyslog.start_syslog()
ssyslog.stderr_to_syslog()
debug1('firewall manager ready.\n')
sys.stdout.write('READY\n')
sys.stdout.flush()
# ctrl-c shouldn't be passed along to me. When the main sshuttle dies,
# I'll die automatically.
os.setsid()
# we wait until we get some input before creating the rules. That way,
# sshuttle can launch us as early as possible (and get sudo password
# authentication as early in the startup process as possible).
line = sys.stdin.readline(128)
if not line:
return # parent died; nothing to do
subnets = []
if line != 'ROUTES\n':
raise Fatal('firewall: expected ROUTES but got %r' % line)
while 1:
line = sys.stdin.readline(128)
if not line:
raise Fatal('firewall: expected route but got %r' % line)
elif line == 'GO\n':
break
try:
(width,exclude,ip) = line.strip().split(',', 2)
except:
raise Fatal('firewall: expected route or GO but got %r' % line)
subnets.append((int(width), bool(int(exclude)), ip))
try:
if line:
debug1('firewall manager: starting transproxy.\n')
do_wait = do_it(port, dnsport, subnets)
sys.stdout.write('STARTED\n')
try:
sys.stdout.flush()
except IOError:
# the parent process died for some reason; he's surely been loud
# enough, so no reason to report another error
return
# Now we wait until EOF or any other kind of exception. We need
# to stay running so that we don't need a *second* password
# authentication at shutdown time - that cleanup is important!
while 1:
if do_wait: do_wait()
line = sys.stdin.readline(128)
if line.startswith('HOST '):
(name,ip) = line[5:].strip().split(',', 1)
hostmap[name] = ip
rewrite_etc_hosts(port)
elif line:
raise Fatal('expected EOF, got %r' % line)
else:
break
finally:
try:
debug1('firewall manager: undoing changes.\n')
except:
pass
do_it(port, 0, [])
restore_etc_hosts(port)

130
main.py
View File

@ -1,130 +0,0 @@
import sys, os, re
import helpers, options, client, server, firewall, hostwatch
import compat.ssubprocess as ssubprocess
from helpers import *
# list of:
# 1.2.3.4/5 or just 1.2.3.4
def parse_subnets(subnets_str):
subnets = []
for s in subnets_str:
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP subnet format' % s)
(a,b,c,d,width) = m.groups()
(a,b,c,d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
if width == None:
width = 32
else:
width = int(width)
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
if width > 32:
raise Fatal('*/%d is greater than the maximum of 32' % width)
subnets.append(('%d.%d.%d.%d' % (a,b,c,d), width))
return subnets
# 1.2.3.4:567 or just 1.2.3.4 or just 567
def parse_ipport(s):
s = str(s)
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP:port format' % s)
(a,b,c,d,port) = m.groups()
(a,b,c,d,port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
int(port or 0))
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a,b,c,d))
if port > 65535:
raise Fatal('*:%d is greater than the maximum of 65535' % port)
if a == None:
a = b = c = d = 0
return ('%d.%d.%d.%d' % (a,b,c,d), port)
optspec = """
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
sshuttle --server
sshuttle --firewall <port> <subnets...>
sshuttle --hostwatch
--
l,listen= transproxy to this ip address and port number [127.0.0.1:0]
H,auto-hosts scan for remote hostnames and update local /etc/hosts
N,auto-nets automatically determine subnets to route
dns capture local DNS requests and forward to the remote DNS server
python= path to python interpreter on the remote server
r,remote= ssh hostname (and optional username) of remote sshuttle server
x,exclude= exclude this subnet (can be used more than once)
v,verbose increase debug message verbosity
e,ssh-cmd= the command to use to connect to the remote [ssh]
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
no-latency-control sacrifice latency to improve bandwidth benchmarks
wrap= restart counting channel numbers after this number (for testing)
D,daemon run in the background as a daemon
syslog send log messages to syslog (default if you use --daemon)
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
server (internal use only)
firewall (internal use only)
hostwatch (internal use only)
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[2:])
if opt.daemon:
opt.syslog = 1
if opt.wrap:
import ssnet
ssnet.MAX_CHANNEL = int(opt.wrap)
helpers.verbose = opt.verbose
try:
if opt.server:
if len(extra) != 0:
o.fatal('no arguments expected')
server.latency_control = opt.latency_control
sys.exit(server.main())
elif opt.firewall:
if len(extra) != 2:
o.fatal('exactly two arguments expected')
sys.exit(firewall.main(int(extra[0]), int(extra[1]), opt.syslog))
elif opt.hostwatch:
sys.exit(hostwatch.hw_main(extra))
else:
if len(extra) < 1 and not opt.auto_nets:
o.fatal('at least one subnet (or -N) expected')
includes = extra
excludes = ['127.0.0.0/8']
for k,v in flags:
if k in ('-x','--exclude'):
excludes.append(v)
remotename = opt.remote
if remotename == '' or remotename == '-':
remotename = None
if opt.seed_hosts and not opt.auto_hosts:
o.fatal('--seed-hosts only works if you also use -H')
if opt.seed_hosts:
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
elif opt.auto_hosts:
sh = []
else:
sh = None
sys.exit(client.main(parse_ipport(opt.listen or '0.0.0.0:0'),
opt.ssh_cmd,
remotename,
opt.python,
opt.latency_control,
opt.dns,
sh,
opt.auto_nets,
parse_subnets(includes),
parse_subnets(excludes),
opt.syslog, opt.daemon, opt.pidfile))
except Fatal, e:
log('fatal: %s\n' % e)
sys.exit(99)
except KeyboardInterrupt:
log('\n')
log('Keyboard interrupt: exiting.\n')
sys.exit(1)

1
requirements.txt Normal file
View File

@ -0,0 +1 @@
setuptools_scm

6
run Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
if python3.5 -V 2>/dev/null; then
exec python3.5 -m "sshuttle" "$@"
else
exec python -m "sshuttle" "$@"
fi

247
server.py
View File

@ -1,247 +0,0 @@
import re, struct, socket, select, traceback, time
if not globals().get('skip_imports'):
import ssnet, helpers, hostwatch
import compat.ssubprocess as ssubprocess
from ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
from helpers import *
def _ipmatch(ipstr):
if ipstr == 'default':
ipstr = '0.0.0.0/0'
m = re.match(r'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
if m:
g = m.groups()
ips = g[0]
width = int(g[4] or 32)
if g[1] == None:
ips += '.0.0.0'
width = min(width, 8)
elif g[2] == None:
ips += '.0.0'
width = min(width, 16)
elif g[3] == None:
ips += '.0'
width = min(width, 24)
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
def _ipstr(ip, width):
if width >= 32:
return ip
else:
return "%s/%d" % (ip, width)
def _maskbits(netmask):
if not netmask:
return 32
for i in range(32):
if netmask[0] & _shl(1, i):
return 32-i
return 0
def _shl(n, bits):
return n * int(2**bits)
def _list_routes():
argv = ['netstat', '-rn']
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
routes = []
for line in p.stdout:
cols = re.split(r'\s+', line)
ipw = _ipmatch(cols[0])
if not ipw:
continue # some lines won't be parseable; never mind
maskw = _ipmatch(cols[2]) # linux only
mask = _maskbits(maskw) # returns 32 if maskw is null
width = min(ipw[1], mask)
ip = ipw[0] & _shl(_shl(1, width) - 1, 32-width)
routes.append((socket.inet_ntoa(struct.pack('!I', ip)), width))
rv = p.wait()
if rv != 0:
log('WARNING: %r returned %d\n' % (argv, rv))
log('WARNING: That prevents --auto-nets from working.\n')
return routes
def list_routes():
for (ip,width) in _list_routes():
if not ip.startswith('0.') and not ip.startswith('127.'):
yield (ip,width)
def _exc_dump():
exc_info = sys.exc_info()
return ''.join(traceback.format_exception(*exc_info))
def start_hostwatch(seed_hosts):
s1,s2 = socket.socketpair()
pid = os.fork()
if not pid:
# child
rv = 99
try:
try:
s2.close()
os.dup2(s1.fileno(), 1)
os.dup2(s1.fileno(), 0)
s1.close()
rv = hostwatch.hw_main(seed_hosts) or 0
except Exception, e:
log('%s\n' % _exc_dump())
rv = 98
finally:
os._exit(rv)
s1.close()
return pid,s2
class Hostwatch:
def __init__(self):
self.pid = 0
self.sock = None
class DnsProxy(Handler):
def __init__(self, mux, chan, request):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Handler.__init__(self, [sock])
self.timeout = time.time()+30
self.mux = mux
self.chan = chan
self.tries = 0
self.peer = None
self.request = request
self.sock = sock
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
self.try_send()
def try_send(self):
if self.tries >= 3:
return
self.tries += 1
self.peer = resolvconf_random_nameserver()
self.sock.connect((self.peer, 53))
debug2('DNS: sending to %r\n' % self.peer)
try:
self.sock.send(self.request)
except socket.error, e:
if e.args[0] in [errno.ECONNREFUSED, errno.EHOSTUNREACH]:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS send to %r: %s\n' % (self.peer, e))
self.try_send()
return
else:
log('DNS send to %r: %s\n' % (self.peer, e))
return
def callback(self):
try:
data = self.sock.recv(4096)
except socket.error, e:
if e.args[0] in [errno.ECONNREFUSED, errno.EHOSTUNREACH]:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS recv from %r: %s\n' % (self.peer, e))
self.try_send()
return
else:
log('DNS recv from %r: %s\n' % (self.peer, e))
return
debug2('DNS response: %d bytes\n' % len(data))
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
self.ok = False
def main():
if helpers.verbose >= 1:
helpers.logprefix = ' s: '
else:
helpers.logprefix = 'server: '
debug1('latency control setting = %r\n' % latency_control)
routes = list(list_routes())
debug1('available routes:\n')
for r in routes:
debug1(' %s/%d\n' % r)
# synchronization header
sys.stdout.write('SSHUTTLE0001')
sys.stdout.flush()
handlers = []
mux = Mux(socket.fromfd(sys.stdin.fileno(),
socket.AF_INET, socket.SOCK_STREAM),
socket.fromfd(sys.stdout.fileno(),
socket.AF_INET, socket.SOCK_STREAM))
handlers.append(mux)
routepkt = ''
for r in routes:
routepkt += '%s,%d\n' % r
mux.send(0, ssnet.CMD_ROUTES, routepkt)
hw = Hostwatch()
hw.leftover = ''
def hostwatch_ready():
assert(hw.pid)
content = hw.sock.recv(4096)
if content:
lines = (hw.leftover + content).split('\n')
if lines[-1]:
# no terminating newline: entry isn't complete yet!
hw.leftover = lines.pop()
lines.append('')
else:
hw.leftover = ''
mux.send(0, ssnet.CMD_HOST_LIST, '\n'.join(lines))
else:
raise Fatal('hostwatch process died')
def got_host_req(data):
if not hw.pid:
(hw.pid,hw.sock) = start_hostwatch(data.strip().split())
handlers.append(Handler(socks = [hw.sock],
callback = hostwatch_ready))
mux.got_host_req = got_host_req
def new_channel(channel, data):
(dstip,dstport) = data.split(',', 1)
dstport = int(dstport)
outwrap = ssnet.connect_dst(dstip,dstport)
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
mux.new_channel = new_channel
dnshandlers = {}
def dns_req(channel, data):
debug2('Incoming DNS request.\n')
h = DnsProxy(mux, channel, data)
handlers.append(h)
dnshandlers[channel] = h
mux.got_dns_req = dns_req
while mux.ok:
if hw.pid:
assert(hw.pid > 0)
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
if rpid:
raise Fatal('hostwatch exited unexpectedly: code 0x%04x\n' % rv)
ssnet.runonce(handlers, mux)
if latency_control:
mux.check_fullness()
mux.callback()
if dnshandlers:
now = time.time()
for channel,h in dnshandlers.items():
if h.timeout < now or not h.ok:
del dnshandlers[channel]
h.ok = False

61
setup.py Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
def version_scheme(version):
from setuptools_scm.version import guess_next_dev_version
version = guess_next_dev_version(version)
return version.lstrip("v")
setup(
name="sshuttle",
use_scm_version={
'write_to': "sshuttle/version.py",
'version_scheme': version_scheme,
},
setup_requires=['setuptools_scm'],
# version=version,
url='https://github.com/sshuttle/sshuttle',
author='Brian May',
author_email='brian@linuxpenguins.xyz',
description='Full-featured" VPN over an SSH tunnel',
packages=find_packages(),
license="GPL2+",
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: "
"GNU General Public License v2 or later (GPLv2+)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Topic :: System :: Networking",
],
entry_points={
'console_scripts': [
'sshuttle = sshuttle.cmdline:main',
],
},
tests_require=['pytest', 'mock'],
keywords="ssh vpn",
)

106
ssh.py
View File

@ -1,106 +0,0 @@
import sys, os, re, socket, zlib
import compat.ssubprocess as ssubprocess
import helpers
from helpers import *
def readfile(name):
basedir = os.path.dirname(os.path.abspath(sys.argv[0]))
path = [basedir] + sys.path
for d in path:
fullname = os.path.join(d, name)
if os.path.exists(fullname):
return open(fullname, 'rb').read()
raise Exception("can't find file %r in any of %r" % (name, path))
def empackage(z, filename, data=None):
(path,basename) = os.path.split(filename)
if not data:
data = readfile(filename)
content = z.compress(data)
content += z.flush(zlib.Z_SYNC_FLUSH)
return '%s\n%d\n%s' % (basename, len(content), content)
def connect(ssh_cmd, rhostport, python, stderr, options):
main_exe = sys.argv[0]
portl = []
rhostIsIPv6 = False
if (rhostport or '').count(':') > 1:
rhostIsIPv6 = True
if rhostport.count(']') or rhostport.count('['):
result = rhostport.split(']')
rhost = result[0].strip('[')
if len(result) > 1:
result[1] = result[1].strip(':')
if result[1] is not '':
portl = ['-p', str(int(result[1]))]
else: # can't disambiguate IPv6 colons and a port number. pass the hostname through.
rhost = rhostport
else: # IPv4
l = (rhostport or '').split(':', 1)
rhost = l[0]
if len(l) > 1:
portl = ['-p', str(int(l[1]))]
if rhost == '-':
rhost = None
ipv6flag = []
if rhostIsIPv6:
ipv6flag = ['-6']
z = zlib.compressobj(1)
content = readfile('assembler.py')
optdata = ''.join("%s=%r\n" % (k,v) for (k,v) in options.items())
content2 = (empackage(z, 'cmdline_options.py', optdata) +
empackage(z, 'helpers.py') +
empackage(z, 'compat/ssubprocess.py') +
empackage(z, 'ssnet.py') +
empackage(z, 'hostwatch.py') +
empackage(z, 'server.py') +
"\n")
pyscript = r"""
import sys;
skip_imports=1;
verbosity=%d;
exec compile(sys.stdin.read(%d), "assembler.py", "exec")
""" % (helpers.verbose or 0, len(content))
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
if not rhost:
# ignore the --python argument when running locally; we already know
# which python version works.
argv = [sys.argv[1], '-c', pyscript]
else:
if ssh_cmd:
sshl = ssh_cmd.split(' ')
else:
sshl = ['ssh']
if python:
pycmd = "'%s' -c '%s'" % (python, pyscript)
else:
pycmd = ("P=python2; $P -V 2>/dev/null || P=python; "
"\"$P\" -c '%s'") % pyscript
argv = (sshl +
portl +
ipv6flag +
[rhost, '--', pycmd])
(s1,s2) = socket.socketpair()
def setup():
# runs in the child process
s2.close()
s1a,s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
s1.close()
debug2('executing: %r\n' % argv)
p = ssubprocess.Popen(argv, stdin=s1a, stdout=s1b, preexec_fn=setup,
close_fds=True, stderr=stderr)
os.close(s1a)
os.close(s1b)
s2.sendall(content)
s2.sendall(content2)
return p, s2

View File

@ -1,7 +0,0 @@
#!/bin/sh
DIR=$(dirname "$0")
if python2 -V 2>/dev/null; then
exec python2 "$DIR/main.py" python2 "$@"
else
exec python "$DIR/main.py" python "$@"
fi

4
sshuttle/__main__.py Normal file
View File

@ -0,0 +1,4 @@
"""Coverage.py's main entry point."""
import sys
from sshuttle.cmdline import main
sys.exit(main())

36
sshuttle/assembler.py Normal file
View File

@ -0,0 +1,36 @@
import sys
import zlib
import imp
z = zlib.decompressobj()
while 1:
name = stdin.readline().strip()
if name:
name = name.decode("ASCII")
nbytes = int(stdin.readline())
if verbosity >= 2:
sys.stderr.write('server: assembling %r (%d bytes)\n'
% (name, nbytes))
content = z.decompress(stdin.read(nbytes))
module = imp.new_module(name)
parent, _, parent_name = name.rpartition(".")
if parent != "":
setattr(sys.modules[parent], parent_name, module)
code = compile(content, name, "exec")
exec(code, module.__dict__)
sys.modules[name] = module
else:
break
sys.stderr.flush()
sys.stdout.flush()
import sshuttle.helpers
sshuttle.helpers.verbose = verbosity
import sshuttle.cmdline_options as options
from sshuttle.server import main
main(options.latency_control)

721
sshuttle/client.py Normal file
View File

@ -0,0 +1,721 @@
import socket
import errno
import re
import signal
import time
import subprocess as ssubprocess
import sshuttle.helpers as helpers
import os
import sshuttle.ssnet as ssnet
import sshuttle.ssh as ssh
import sshuttle.ssyslog as ssyslog
import sys
import platform
from sshuttle.ssnet import SockWrapper, Handler, Proxy, Mux, MuxWrapper
from sshuttle.helpers import log, debug1, debug2, debug3, Fatal, islocal, \
resolvconf_nameservers
from sshuttle.methods import get_method, Features
_extra_fd = os.open('/dev/null', os.O_RDONLY)
def got_signal(signum, frame):
log('exiting on signal %d\n' % signum)
sys.exit(1)
_pidname = None
def check_daemon(pidfile):
global _pidname
_pidname = os.path.abspath(pidfile)
try:
oldpid = open(_pidname).read(1024)
except IOError as e:
if e.errno == errno.ENOENT:
return # no pidfile, ok
else:
raise Fatal("can't read %s: %s" % (_pidname, e))
if not oldpid:
os.unlink(_pidname)
return # invalid pidfile, ok
oldpid = int(oldpid.strip() or 0)
if oldpid <= 0:
os.unlink(_pidname)
return # invalid pidfile, ok
try:
os.kill(oldpid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
os.unlink(_pidname)
return # outdated pidfile, ok
elif e.errno == errno.EPERM:
pass
else:
raise
raise Fatal("%s: sshuttle is already running (pid=%d)"
% (_pidname, oldpid))
def daemonize():
if os.fork():
os._exit(0)
os.setsid()
if os.fork():
os._exit(0)
outfd = os.open(_pidname, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o666)
try:
os.write(outfd, b'%d\n' % os.getpid())
finally:
os.close(outfd)
os.chdir("/")
# Normal exit when killed, or try/finally won't work and the pidfile won't
# be deleted.
signal.signal(signal.SIGTERM, got_signal)
si = open('/dev/null', 'r+')
os.dup2(si.fileno(), 0)
os.dup2(si.fileno(), 1)
si.close()
def daemon_cleanup():
try:
os.unlink(_pidname)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
class MultiListener:
def __init__(self, type=socket.SOCK_STREAM, proto=0):
self.type = type
self.proto = proto
self.v6 = None
self.v4 = None
self.bind_called = False
def setsockopt(self, level, optname, value):
assert(self.bind_called)
if self.v6:
self.v6.setsockopt(level, optname, value)
if self.v4:
self.v4.setsockopt(level, optname, value)
def add_handler(self, handlers, callback, method, mux):
assert(self.bind_called)
socks = []
if self.v6:
socks.append(self.v6)
if self.v4:
socks.append(self.v4)
handlers.append(
Handler(
socks,
lambda sock: callback(sock, method, mux, handlers)
)
)
def listen(self, backlog):
assert(self.bind_called)
if self.v6:
self.v6.listen(backlog)
if self.v4:
try:
self.v4.listen(backlog)
except socket.error as e:
# on some systems v4 bind will fail if the v6 suceeded,
# in this case the v6 socket will receive v4 too.
if e.errno == errno.EADDRINUSE and self.v6:
self.v4 = None
else:
raise e
def bind(self, address_v6, address_v4):
assert(not self.bind_called)
self.bind_called = True
if address_v6 is not None:
self.v6 = socket.socket(socket.AF_INET6, self.type, self.proto)
self.v6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.v6.bind(address_v6)
else:
self.v6 = None
if address_v4 is not None:
self.v4 = socket.socket(socket.AF_INET, self.type, self.proto)
self.v4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.v4.bind(address_v4)
else:
self.v4 = None
def print_listening(self, what):
assert(self.bind_called)
if self.v6:
listenip = self.v6.getsockname()
debug1('%s listening on %r.\n' % (what, listenip))
debug2('%s listening with %r.\n' % (what, self.v6))
if self.v4:
listenip = self.v4.getsockname()
debug1('%s listening on %r.\n' % (what, listenip))
debug2('%s listening with %r.\n' % (what, self.v4))
class FirewallClient:
def __init__(self, method_name):
self.auto_nets = []
python_path = os.path.dirname(os.path.dirname(__file__))
argvbase = ([sys.executable, sys.argv[0]] +
['-v'] * (helpers.verbose or 0) +
['--method', method_name] +
['--firewall'])
if ssyslog._p:
argvbase += ['--syslog']
argv_tries = [
['sudo', '-p', '[local sudo] Password: ',
('PYTHONPATH=%s' % python_path), '--'] + argvbase,
argvbase
]
# we can't use stdin/stdout=subprocess.PIPE here, as we normally would,
# because stupid Linux 'su' requires that stdin be attached to a tty.
# Instead, attach a *bidirectional* socket to its stdout, and use
# that for talking in both directions.
(s1, s2) = socket.socketpair()
def setup():
# run in the child process
s2.close()
e = None
if os.getuid() == 0:
argv_tries = argv_tries[-1:] # last entry only
for argv in argv_tries:
try:
if argv[0] == 'su':
sys.stderr.write('[local su] ')
self.p = ssubprocess.Popen(argv, stdout=s1, preexec_fn=setup)
e = None
break
except OSError as e:
pass
self.argv = argv
s1.close()
if sys.version_info < (3, 0):
# python 2.7
self.pfile = s2.makefile('wb+')
else:
# python 3.5
self.pfile = s2.makefile('rwb')
if e:
log('Spawning firewall manager: %r\n' % self.argv)
raise Fatal(e)
line = self.pfile.readline()
self.check()
if line[0:5] != b'READY':
raise Fatal('%r expected READY, got %r' % (self.argv, line))
method_name = line[6:-1]
self.method = get_method(method_name.decode("ASCII"))
self.method.set_firewall(self)
def setup(self, subnets_include, subnets_exclude, nslist,
redirectport_v6, redirectport_v4, dnsport_v6, dnsport_v4, udp):
self.subnets_include = subnets_include
self.subnets_exclude = subnets_exclude
self.nslist = nslist
self.redirectport_v6 = redirectport_v6
self.redirectport_v4 = redirectport_v4
self.dnsport_v6 = dnsport_v6
self.dnsport_v4 = dnsport_v4
self.udp = udp
def check(self):
rv = self.p.poll()
if rv:
raise Fatal('%r returned %d' % (self.argv, rv))
def start(self):
self.pfile.write(b'ROUTES\n')
for (family, ip, width) in self.subnets_include + self.auto_nets:
self.pfile.write(b'%d,%d,0,%s\n'
% (family, width, ip.encode("ASCII")))
for (family, ip, width) in self.subnets_exclude:
self.pfile.write(b'%d,%d,1,%s\n'
% (family, width, ip.encode("ASCII")))
self.pfile.write(b'NSLIST\n')
for (family, ip) in self.nslist:
self.pfile.write(b'%d,%s\n'
% (family, ip.encode("ASCII")))
self.pfile.write(
b'PORTS %d,%d,%d,%d\n'
% (self.redirectport_v6, self.redirectport_v4,
self.dnsport_v6, self.dnsport_v4))
udp = 0
if self.udp:
udp = 1
self.pfile.write(b'GO %d\n' % udp)
self.pfile.flush()
line = self.pfile.readline()
self.check()
if line != b'STARTED\n':
raise Fatal('%r expected STARTED, got %r' % (self.argv, line))
def sethostip(self, hostname, ip):
assert(not re.search(b'[^-\w]', hostname))
assert(not re.search(b'[^0-9.]', ip))
self.pfile.write(b'HOST %s,%s\n' % (hostname, ip))
self.pfile.flush()
def done(self):
self.pfile.close()
rv = self.p.wait()
if rv:
raise Fatal('cleanup: %r returned %d' % (self.argv, rv))
dnsreqs = {}
udp_by_src = {}
def expire_connections(now, mux):
remove = []
for chan, timeout in dnsreqs.items():
if timeout < now:
debug3('expiring dnsreqs channel=%d\n' % chan)
remove.append(chan)
del mux.channels[chan]
for chan in remove:
del dnsreqs[chan]
debug3('Remaining DNS requests: %d\n' % len(dnsreqs))
remove = []
for peer, (chan, timeout) in udp_by_src.items():
if timeout < now:
debug3('expiring UDP channel channel=%d peer=%r\n' % (chan, peer))
mux.send(chan, ssnet.CMD_UDP_CLOSE, b'')
remove.append(peer)
del mux.channels[chan]
for peer in remove:
del udp_by_src[peer]
debug3('Remaining UDP channels: %d\n' % len(udp_by_src))
def onaccept_tcp(listener, method, mux, handlers):
global _extra_fd
try:
sock, srcip = listener.accept()
except socket.error as e:
if e.args[0] in [errno.EMFILE, errno.ENFILE]:
debug1('Rejected incoming connection: too many open files!\n')
# free up an fd so we can eat the connection
os.close(_extra_fd)
try:
sock, srcip = listener.accept()
sock.close()
finally:
_extra_fd = os.open('/dev/null', os.O_RDONLY)
return
else:
raise
dstip = method.get_tcp_dstip(sock)
debug1('Accept TCP: %s:%r -> %s:%r.\n' % (srcip[0], srcip[1],
dstip[0], dstip[1]))
if dstip[1] == sock.getsockname()[1] and islocal(dstip[0], sock.family):
debug1("-- ignored: that's my address!\n")
sock.close()
return
chan = mux.next_channel()
if not chan:
log('warning: too many open channels. Discarded connection.\n')
sock.close()
return
mux.send(chan, ssnet.CMD_TCP_CONNECT, b'%d,%s,%d' %
(sock.family, dstip[0].encode("ASCII"), dstip[1]))
outwrap = MuxWrapper(mux, chan)
handlers.append(Proxy(SockWrapper(sock, sock), outwrap))
expire_connections(time.time(), mux)
def udp_done(chan, data, method, sock, dstip):
(src, srcport, data) = data.split(b",", 2)
srcip = (src, int(srcport))
debug3('doing send from %r to %r\n' % (srcip, dstip,))
method.send_udp(sock, srcip, dstip, data)
def onaccept_udp(listener, method, mux, handlers):
now = time.time()
t = method.recv_udp(listener, 4096)
if t is None:
return
srcip, dstip, data = t
debug1('Accept UDP: %r -> %r.\n' % (srcip, dstip,))
if srcip in udp_by_src:
chan, timeout = udp_by_src[srcip]
else:
chan = mux.next_channel()
mux.channels[chan] = lambda cmd, data: udp_done(
chan, data, method, listener, dstip=srcip)
mux.send(chan, ssnet.CMD_UDP_OPEN, b"%d" % listener.family)
udp_by_src[srcip] = chan, now + 30
hdr = b"%s,%d," % (dstip[0].encode("ASCII"), dstip[1])
mux.send(chan, ssnet.CMD_UDP_DATA, hdr + data)
expire_connections(now, mux)
def dns_done(chan, data, method, sock, srcip, dstip, mux):
debug3('dns_done: channel=%d src=%r dst=%r\n' % (chan, srcip, dstip))
del mux.channels[chan]
del dnsreqs[chan]
method.send_udp(sock, srcip, dstip, data)
def ondns(listener, method, mux, handlers):
now = time.time()
t = method.recv_udp(listener, 4096)
if t is None:
return
srcip, dstip, data = t
debug1('DNS request from %r to %r: %d bytes\n' % (srcip, dstip, len(data)))
chan = mux.next_channel()
dnsreqs[chan] = now + 30
mux.send(chan, ssnet.CMD_DNS_REQ, data)
mux.channels[chan] = lambda cmd, data: dns_done(
chan, data, method, listener, srcip=dstip, dstip=srcip, mux=mux)
expire_connections(now, mux)
def _main(tcp_listener, udp_listener, fw, ssh_cmd, remotename,
python, latency_control,
dns_listener, seed_hosts, auto_nets, daemon):
debug1('Starting client with Python version %s\n'
% platform.python_version())
method = fw.method
handlers = []
if helpers.verbose >= 1:
helpers.logprefix = 'c : '
else:
helpers.logprefix = 'client: '
debug1('connecting to server...\n')
try:
(serverproc, serversock) = ssh.connect(
ssh_cmd, remotename, python,
stderr=ssyslog._p and ssyslog._p.stdin,
options=dict(latency_control=latency_control))
except socket.error as e:
if e.args[0] == errno.EPIPE:
raise Fatal("failed to establish ssh session (1)")
else:
raise
mux = Mux(serversock, serversock)
handlers.append(mux)
expected = b'SSHUTTLE0001'
try:
v = 'x'
while v and v != b'\0':
v = serversock.recv(1)
v = 'x'
while v and v != b'\0':
v = serversock.recv(1)
initstring = serversock.recv(len(expected))
except socket.error as e:
if e.args[0] == errno.ECONNRESET:
raise Fatal("failed to establish ssh session (2)")
else:
raise
rv = serverproc.poll()
if rv:
raise Fatal('server died with error code %d' % rv)
if initstring != expected:
raise Fatal('expected server init string %r; got %r'
% (expected, initstring))
log('Connected.\n')
sys.stdout.flush()
if daemon:
daemonize()
log('daemonizing (%s).\n' % _pidname)
def onroutes(routestr):
if auto_nets:
for line in routestr.strip().split(b'\n'):
(family, ip, width) = line.split(b',', 2)
family = int(family)
width = int(width)
ip = ip.decode("ASCII")
if family == socket.AF_INET6 and tcp_listener.v6 is None:
debug2("Ignored auto net %d/%s/%d\n" % (family, ip, width))
if family == socket.AF_INET and tcp_listener.v4 is None:
debug2("Ignored auto net %d/%s/%d\n" % (family, ip, width))
else:
debug2("Adding auto net %d/%s/%d\n" % (family, ip, width))
fw.auto_nets.append((family, ip, width))
# we definitely want to do this *after* starting ssh, or we might end
# up intercepting the ssh connection!
#
# Moreover, now that we have the --auto-nets option, we have to wait
# for the server to send us that message anyway. Even if we haven't
# set --auto-nets, we might as well wait for the message first, then
# ignore its contents.
mux.got_routes = None
fw.start()
mux.got_routes = onroutes
def onhostlist(hostlist):
debug2('got host list: %r\n' % hostlist)
for line in hostlist.strip().split():
if line:
name, ip = line.split(b',', 1)
fw.sethostip(name, ip)
mux.got_host_list = onhostlist
tcp_listener.add_handler(handlers, onaccept_tcp, method, mux)
if udp_listener:
udp_listener.add_handler(handlers, onaccept_udp, method, mux)
if dns_listener:
dns_listener.add_handler(handlers, ondns, method, mux)
if seed_hosts is not None:
debug1('seed_hosts: %r\n' % seed_hosts)
mux.send(0, ssnet.CMD_HOST_REQ, str.encode('\n'.join(seed_hosts)))
while 1:
rv = serverproc.poll()
if rv:
raise Fatal('server died with error code %d' % rv)
ssnet.runonce(handlers, mux)
if latency_control:
mux.check_fullness()
def main(listenip_v6, listenip_v4,
ssh_cmd, remotename, python, latency_control, dns, nslist,
method_name, seed_hosts, auto_nets,
subnets_include, subnets_exclude, daemon, pidfile):
if daemon:
try:
check_daemon(pidfile)
except Fatal as e:
log("%s\n" % e)
return 5
debug1('Starting sshuttle proxy.\n')
fw = FirewallClient(method_name)
# Get family specific subnet lists
if dns:
nslist += resolvconf_nameservers()
subnets = subnets_include + subnets_exclude # we don't care here
subnets_v6 = [i for i in subnets if i[0] == socket.AF_INET6]
nslist_v6 = [i for i in nslist if i[0] == socket.AF_INET6]
subnets_v4 = [i for i in subnets if i[0] == socket.AF_INET]
nslist_v4 = [i for i in nslist if i[0] == socket.AF_INET]
# Check features available
avail = fw.method.get_supported_features()
required = Features()
if listenip_v6 == "auto":
if avail.ipv6:
listenip_v6 = ('::1', 0)
else:
listenip_v6 = None
required.ipv6 = len(subnets_v6) > 0 or len(nslist_v6) > 0 \
or listenip_v6 is not None
required.udp = avail.udp
required.dns = len(nslist) > 0
fw.method.assert_features(required)
if required.ipv6 and listenip_v6 is None:
raise Fatal("IPv6 required but not listening.")
# display features enabled
debug1("IPv6 enabled: %r\n" % required.ipv6)
debug1("UDP enabled: %r\n" % required.udp)
debug1("DNS enabled: %r\n" % required.dns)
# bind to required ports
if listenip_v4 == "auto":
listenip_v4 = ('127.0.0.1', 0)
if listenip_v6 and listenip_v6[1] and listenip_v4 and listenip_v4[1]:
# if both ports given, no need to search for a spare port
ports = [0, ]
else:
# if at least one port missing, we have to search
ports = range(12300, 9000, -1)
# search for free ports and try to bind
last_e = None
redirectport_v6 = 0
redirectport_v4 = 0
bound = False
debug2('Binding redirector:')
for port in ports:
debug2(' %d' % port)
tcp_listener = MultiListener()
if required.udp:
udp_listener = MultiListener(socket.SOCK_DGRAM)
else:
udp_listener = None
if listenip_v6 and listenip_v6[1]:
lv6 = listenip_v6
redirectport_v6 = lv6[1]
elif listenip_v6:
lv6 = (listenip_v6[0], port)
redirectport_v6 = port
else:
lv6 = None
redirectport_v6 = 0
if listenip_v4 and listenip_v4[1]:
lv4 = listenip_v4
redirectport_v4 = lv4[1]
elif listenip_v4:
lv4 = (listenip_v4[0], port)
redirectport_v4 = port
else:
lv4 = None
redirectport_v4 = 0
try:
tcp_listener.bind(lv6, lv4)
if udp_listener:
udp_listener.bind(lv6, lv4)
bound = True
break
except socket.error as e:
if e.errno == errno.EADDRINUSE:
last_e = e
else:
raise e
debug2('\n')
if not bound:
assert(last_e)
raise last_e
tcp_listener.listen(10)
tcp_listener.print_listening("TCP redirector")
if udp_listener:
udp_listener.print_listening("UDP redirector")
bound = False
if required.dns:
# search for spare port for DNS
debug2('Binding DNS:')
ports = range(12300, 9000, -1)
for port in ports:
debug2(' %d' % port)
dns_listener = MultiListener(socket.SOCK_DGRAM)
if listenip_v6:
lv6 = (listenip_v6[0], port)
dnsport_v6 = port
else:
lv6 = None
dnsport_v6 = 0
if listenip_v4:
lv4 = (listenip_v4[0], port)
dnsport_v4 = port
else:
lv4 = None
dnsport_v4 = 0
try:
dns_listener.bind(lv6, lv4)
bound = True
break
except socket.error as e:
if e.errno == errno.EADDRINUSE:
last_e = e
else:
raise e
debug2('\n')
dns_listener.print_listening("DNS")
if not bound:
assert(last_e)
raise last_e
else:
dnsport_v6 = 0
dnsport_v4 = 0
dns_listener = None
# Last minute sanity checks.
# These should never fail.
# If these do fail, something is broken above.
if len(subnets_v6) > 0:
assert required.ipv6
if redirectport_v6 == 0:
raise Fatal("IPv6 subnets defined but not listening")
if len(nslist_v6) > 0:
assert required.dns
assert required.ipv6
if dnsport_v6 == 0:
raise Fatal("IPv6 ns servers defined but not listening")
if len(subnets_v4) > 0:
if redirectport_v4 == 0:
raise Fatal("IPv4 subnets defined but not listening")
if len(nslist_v4) > 0:
if dnsport_v4 == 0:
raise Fatal("IPv4 ns servers defined but not listening")
# setup method specific stuff on listeners
fw.method.setup_tcp_listener(tcp_listener)
if udp_listener:
fw.method.setup_udp_listener(udp_listener)
if dns_listener:
fw.method.setup_udp_listener(dns_listener)
# start the firewall
fw.setup(subnets_include, subnets_exclude, nslist,
redirectport_v6, redirectport_v4, dnsport_v6, dnsport_v4,
required.udp)
# start the client process
try:
return _main(tcp_listener, udp_listener, fw, ssh_cmd, remotename,
python, latency_control, dns_listener,
seed_hosts, auto_nets, daemon)
finally:
try:
if daemon:
# it's not our child anymore; can't waitpid
fw.p.returncode = 0
fw.done()
finally:
if daemon:
daemon_cleanup()

240
sshuttle/cmdline.py Normal file
View File

@ -0,0 +1,240 @@
import sys
import re
import socket
import sshuttle.helpers as helpers
import sshuttle.options as options
import sshuttle.client as client
import sshuttle.firewall as firewall
import sshuttle.hostwatch as hostwatch
import sshuttle.ssyslog as ssyslog
from sshuttle.helpers import family_ip_tuple, log, Fatal
# 1.2.3.4/5 or just 1.2.3.4
def parse_subnet4(s):
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP subnet format' % s)
(a, b, c, d, width) = m.groups()
(a, b, c, d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
if width is None:
width = 32
else:
width = int(width)
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
if width > 32:
raise Fatal('*/%d is greater than the maximum of 32' % width)
return(socket.AF_INET, '%d.%d.%d.%d' % (a, b, c, d), width)
# 1:2::3/64 or just 1:2::3
def parse_subnet6(s):
m = re.match(r'(?:([a-fA-F\d:]+))?(?:/(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP subnet format' % s)
(net, width) = m.groups()
if width is None:
width = 128
else:
width = int(width)
if width > 128:
raise Fatal('*/%d is greater than the maximum of 128' % width)
return(socket.AF_INET6, net, width)
# Subnet file, supporting empty lines and hash-started comment lines
def parse_subnet_file(s):
try:
handle = open(s, 'r')
except OSError:
raise Fatal('Unable to open subnet file: %s' % s)
raw_config_lines = handle.readlines()
config_lines = []
for line_no, line in enumerate(raw_config_lines):
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
config_lines.append(line)
return config_lines
# list of:
# 1.2.3.4/5 or just 1.2.3.4
# 1:2::3/64 or just 1:2::3
def parse_subnets(subnets_str):
subnets = []
for s in subnets_str:
if ':' in s:
subnet = parse_subnet6(s)
else:
subnet = parse_subnet4(s)
subnets.append(subnet)
return subnets
# 1.2.3.4:567 or just 1.2.3.4 or just 567
def parse_ipport4(s):
s = str(s)
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP:port format' % s)
(a, b, c, d, port) = m.groups()
(a, b, c, d, port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
int(port or 0))
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
if port > 65535:
raise Fatal('*:%d is greater than the maximum of 65535' % port)
if a is None:
a = b = c = d = 0
return ('%d.%d.%d.%d' % (a, b, c, d), port)
# [1:2::3]:456 or [1:2::3] or 456
def parse_ipport6(s):
s = str(s)
m = re.match(r'(?:\[([^]]*)])?(?::)?(?:(\d+))?$', s)
if not m:
raise Fatal('%s is not a valid IP:port format' % s)
(ip, port) = m.groups()
(ip, port) = (ip or '::', int(port or 0))
return (ip, port)
def parse_list(list):
return re.split(r'[\s,]+', list.strip()) if list else []
optspec = """
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
sshuttle --firewall <port> <subnets...>
sshuttle --hostwatch
--
l,listen= transproxy to this ip address and port number
H,auto-hosts scan for remote hostnames and update local /etc/hosts
N,auto-nets automatically determine subnets to route
dns capture local DNS requests and forward to the remote DNS server
ns-hosts= capture and forward remote DNS requests to the following servers
method= auto, nat, tproxy or pf
python= path to python interpreter on the remote server
r,remote= ssh hostname (and optional username) of remote sshuttle server
x,exclude= exclude this subnet (can be used more than once)
X,exclude-from= exclude the subnets in a file (whitespace separated)
v,verbose increase debug message verbosity
V,version print the sshuttle version number and exit
e,ssh-cmd= the command to use to connect to the remote [ssh]
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
no-latency-control sacrifice latency to improve bandwidth benchmarks
wrap= restart counting channel numbers after this number (for testing)
disable-ipv6 disables ipv6 support
D,daemon run in the background as a daemon
s,subnets= file where the subnets are stored, instead of on the command line
syslog send log messages to syslog (default if you use --daemon)
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
server (internal use only)
firewall (internal use only)
hostwatch (internal use only)
"""
def main():
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if opt.version:
from sshuttle.version import version
print(version)
return 0
if opt.daemon:
opt.syslog = 1
if opt.wrap:
import sshuttle.ssnet as ssnet
ssnet.MAX_CHANNEL = int(opt.wrap)
helpers.verbose = opt.verbose or 0
try:
if opt.firewall:
if len(extra) != 0:
o.fatal('exactly zero arguments expected')
return firewall.main(opt.method, opt.syslog)
elif opt.hostwatch:
return hostwatch.hw_main(extra)
else:
if len(extra) < 1 and not opt.auto_nets and not opt.subnets:
o.fatal('at least one subnet, subnet file, or -N expected')
includes = extra
excludes = ['127.0.0.0/8']
for k, v in flags:
if k in ('-x', '--exclude'):
excludes.append(v)
if k in ('-X', '--exclude-from'):
excludes += open(v).read().split()
remotename = opt.remote
if remotename == '' or remotename == '-':
remotename = None
nslist = [family_ip_tuple(ns) for ns in parse_list(opt.ns_hosts)]
if opt.seed_hosts and not opt.auto_hosts:
o.fatal('--seed-hosts only works if you also use -H')
if opt.seed_hosts:
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
elif opt.auto_hosts:
sh = []
else:
sh = None
if opt.subnets:
includes = parse_subnet_file(opt.subnets)
if not opt.method:
method_name = "auto"
elif opt.method in ["auto", "nat", "tproxy", "pf"]:
method_name = opt.method
else:
o.fatal("method_name %s not supported" % opt.method)
if opt.listen:
ipport_v6 = None
ipport_v4 = None
list = opt.listen.split(",")
for ip in list:
if '[' in ip and ']' in ip:
ipport_v6 = parse_ipport6(ip)
else:
ipport_v4 = parse_ipport4(ip)
else:
# parse_ipport4('127.0.0.1:0')
ipport_v4 = "auto"
# parse_ipport6('[::1]:0')
ipport_v6 = "auto" if not opt.disable_ipv6 else None
if opt.syslog:
ssyslog.start_syslog()
ssyslog.stderr_to_syslog()
return_code = client.main(ipport_v6, ipport_v4,
opt.ssh_cmd,
remotename,
opt.python,
opt.latency_control,
opt.dns,
nslist,
method_name,
sh,
opt.auto_nets,
parse_subnets(includes),
parse_subnets(excludes),
opt.daemon, opt.pidfile)
if return_code == 0:
log('Normal exit code, exiting...')
else:
log('Abnormal exit code detected, failing...' % return_code)
return return_code
except Fatal as e:
log('fatal: %s\n' % e)
return 99
except KeyboardInterrupt:
log('\n')
log('Keyboard interrupt: exiting.\n')
return 1

264
sshuttle/firewall.py Normal file
View File

@ -0,0 +1,264 @@
import errno
import socket
import signal
import sshuttle.ssyslog as ssyslog
import sys
import os
import platform
import traceback
from sshuttle.helpers import debug1, debug2, Fatal
from sshuttle.methods import get_auto_method, get_method
HOSTSFILE = '/etc/hosts'
def rewrite_etc_hosts(hostmap, port):
BAKFILE = '%s.sbak' % HOSTSFILE
APPEND = '# sshuttle-firewall-%d AUTOCREATED' % port
old_content = ''
st = None
try:
old_content = open(HOSTSFILE).read()
st = os.stat(HOSTSFILE)
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if old_content.strip() and not os.path.exists(BAKFILE):
os.link(HOSTSFILE, BAKFILE)
tmpname = "%s.%d.tmp" % (HOSTSFILE, port)
f = open(tmpname, 'w')
for line in old_content.rstrip().split('\n'):
if line.find(APPEND) >= 0:
continue
f.write('%s\n' % line)
for (name, ip) in sorted(hostmap.items()):
f.write('%-30s %s\n' % ('%s %s' % (ip, name), APPEND))
f.close()
if st is not None:
os.chown(tmpname, st.st_uid, st.st_gid)
os.chmod(tmpname, st.st_mode)
else:
os.chown(tmpname, 0, 0)
os.chmod(tmpname, 0o600)
os.rename(tmpname, HOSTSFILE)
def restore_etc_hosts(port):
rewrite_etc_hosts({}, port)
# Isolate function that needs to be replaced for tests
def setup_daemon():
if os.getuid() != 0:
raise Fatal('you must be root (or enable su/sudo) to set the firewall')
# don't disappear if our controlling terminal or stdout/stderr
# disappears; we still have to clean up.
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# ctrl-c shouldn't be passed along to me. When the main sshuttle dies,
# I'll die automatically.
os.setsid()
# because of limitations of the 'su' command, the *real* stdin/stdout
# are both attached to stdout initially. Clone stdout into stdin so we
# can read from it.
os.dup2(1, 0)
return sys.stdin, sys.stdout
# This is some voodoo for setting up the kernel's transparent
# proxying stuff. If subnets is empty, we just delete our sshuttle rules;
# otherwise we delete it, then make them from scratch.
#
# This code is supposed to clean up after itself by deleting its rules on
# exit. In case that fails, it's not the end of the world; future runs will
# supercede it in the transproxy list, at least, so the leftover rules
# are hopefully harmless.
def main(method_name, syslog):
stdin, stdout = setup_daemon()
hostmap = {}
debug1('firewall manager: Starting firewall with Python version %s\n'
% platform.python_version())
if method_name == "auto":
method = get_auto_method()
else:
method = get_method(method_name)
if syslog:
ssyslog.start_syslog()
ssyslog.stderr_to_syslog()
debug1('firewall manager: ready method name %s.\n' % method.name)
stdout.write('READY %s\n' % method.name)
stdout.flush()
# we wait until we get some input before creating the rules. That way,
# sshuttle can launch us as early as possible (and get sudo password
# authentication as early in the startup process as possible).
line = stdin.readline(128)
if not line:
return # parent died; nothing to do
subnets = []
if line != 'ROUTES\n':
raise Fatal('firewall: expected ROUTES but got %r' % line)
while 1:
line = stdin.readline(128)
if not line:
raise Fatal('firewall: expected route but got %r' % line)
elif line.startswith("NSLIST\n"):
break
try:
(family, width, exclude, ip) = line.strip().split(',', 3)
except:
raise Fatal('firewall: expected route or NSLIST but got %r' % line)
subnets.append((int(family), int(width), bool(int(exclude)), ip))
debug2('firewall manager: Got subnets: %r\n' % subnets)
nslist = []
if line != 'NSLIST\n':
raise Fatal('firewall: expected NSLIST but got %r' % line)
while 1:
line = stdin.readline(128)
if not line:
raise Fatal('firewall: expected nslist but got %r' % line)
elif line.startswith("PORTS "):
break
try:
(family, ip) = line.strip().split(',', 1)
except:
raise Fatal('firewall: expected nslist or PORTS but got %r' % line)
nslist.append((int(family), ip))
debug2('firewall manager: Got partial nslist: %r\n' % nslist)
debug2('firewall manager: Got nslist: %r\n' % nslist)
if not line.startswith('PORTS '):
raise Fatal('firewall: expected PORTS but got %r' % line)
_, _, ports = line.partition(" ")
ports = ports.split(",")
if len(ports) != 4:
raise Fatal('firewall: expected 4 ports but got %n' % len(ports))
port_v6 = int(ports[0])
port_v4 = int(ports[1])
dnsport_v6 = int(ports[2])
dnsport_v4 = int(ports[3])
assert(port_v6 >= 0)
assert(port_v6 <= 65535)
assert(port_v4 >= 0)
assert(port_v4 <= 65535)
assert(dnsport_v6 >= 0)
assert(dnsport_v6 <= 65535)
assert(dnsport_v4 >= 0)
assert(dnsport_v4 <= 65535)
debug2('firewall manager: Got ports: %d,%d,%d,%d\n'
% (port_v6, port_v4, dnsport_v6, dnsport_v4))
line = stdin.readline(128)
if not line:
raise Fatal('firewall: expected GO but got %r' % line)
elif not line.startswith("GO "):
raise Fatal('firewall: expected GO but got %r' % line)
_, _, udp = line.partition(" ")
udp = bool(int(udp))
debug2('firewall manager: Got udp: %r\n' % udp)
subnets_v6 = [i for i in subnets if i[0] == socket.AF_INET6]
nslist_v6 = [i for i in nslist if i[0] == socket.AF_INET6]
subnets_v4 = [i for i in subnets if i[0] == socket.AF_INET]
nslist_v4 = [i for i in nslist if i[0] == socket.AF_INET]
try:
debug1('firewall manager: setting up.\n')
if len(subnets_v6) > 0 or len(nslist_v6) > 0:
debug2('firewall manager: setting up IPv6.\n')
method.setup_firewall(
port_v6, dnsport_v6, nslist_v6,
socket.AF_INET6, subnets_v6, udp)
if len(subnets_v4) > 0 or len(nslist_v4) > 0:
debug2('firewall manager: setting up IPv4.\n')
method.setup_firewall(
port_v4, dnsport_v4, nslist_v4,
socket.AF_INET, subnets_v4, udp)
stdout.write('STARTED\n')
try:
stdout.flush()
except IOError:
# the parent process died for some reason; he's surely been loud
# enough, so no reason to report another error
return
# Now we wait until EOF or any other kind of exception. We need
# to stay running so that we don't need a *second* password
# authentication at shutdown time - that cleanup is important!
while 1:
line = stdin.readline(128)
if line.startswith('HOST '):
(name, ip) = line[5:].strip().split(',', 1)
hostmap[name] = ip
debug2('firewall manager: setting up /etc/hosts.\n')
rewrite_etc_hosts(hostmap, port_v6 or port_v4)
elif line:
if not method.firewall_command(line):
raise Fatal('firewall: expected command, got %r' % line)
else:
break
finally:
try:
debug1('firewall manager: undoing changes.\n')
except:
pass
try:
if len(subnets_v6) > 0 or len(nslist_v6) > 0:
debug2('firewall manager: undoing IPv6 changes.\n')
method.restore_firewall(port_v6, socket.AF_INET6, udp)
except:
try:
debug1("firewall manager: "
"Error trying to undo IPv6 firewall.\n")
for line in traceback.format_exc().splitlines():
debug1("---> %s\n" % line)
except:
pass
try:
if len(subnets_v4) > 0 or len(nslist_v4) > 0:
debug2('firewall manager: undoing IPv4 changes.\n')
method.restore_firewall(port_v4, socket.AF_INET, udp)
except:
try:
debug1("firewall manager: "
"Error trying to undo IPv4 firewall.\n")
for line in traceback.format_exc().splitlines():
debug1("firewall manager: ---> %s\n" % line)
except:
pass
try:
debug2('firewall manager: undoing /etc/hosts changes.\n')
restore_etc_hosts(port_v6 or port_v4)
except:
try:
debug1("firewall manager: "
"Error trying to undo /etc/hosts changes.\n")
for line in traceback.format_exc().splitlines():
debug1("firewall manager: ---> %s\n" % line)
except:
pass

View File

@ -1,26 +1,40 @@
import sys, os, socket
import sys
import socket
import errno
logprefix = ''
verbose = 0
def log(s):
global logprefix
try:
sys.stdout.flush()
sys.stderr.write(logprefix + s)
if s.find("\n") != -1:
prefix = logprefix
s = s.rstrip("\n")
for line in s.split("\n"):
sys.stderr.write(prefix + line + "\n")
prefix = "---> "
else:
sys.stderr.write(logprefix + s)
sys.stderr.flush()
except IOError:
# this could happen if stderr gets forcibly disconnected, eg. because
# our tty closes. That sucks, but it's no reason to abort the program.
pass
def debug1(s):
if verbose >= 1:
log(s)
def debug2(s):
if verbose >= 2:
log(s)
def debug3(s):
if verbose >= 3:
log(s)
@ -30,19 +44,12 @@ class Fatal(Exception):
pass
def list_contains_any(l, sub):
for i in sub:
if i in l:
return True
return False
def resolvconf_nameservers():
l = []
for line in open('/etc/resolv.conf'):
words = line.lower().split()
if len(words) >= 2 and words[0] == 'nameserver':
l.append(words[1])
l.append(family_ip_tuple(words[1]))
return l
@ -55,15 +62,15 @@ def resolvconf_random_nameserver():
random.shuffle(l)
return l[0]
else:
return '127.0.0.1'
return (socket.AF_INET, '127.0.0.1')
def islocal(ip):
sock = socket.socket()
def islocal(ip, family):
sock = socket.socket(family)
try:
try:
sock.bind((ip, 0))
except socket.error, e:
except socket.error as e:
if e.args[0] == errno.EADDRNOTAVAIL:
return False # not a local IP
else:
@ -73,3 +80,17 @@ def islocal(ip):
return True # it's a local IP, or there would have been an error
def family_ip_tuple(ip):
if ':' in ip:
return (socket.AF_INET6, ip)
else:
return (socket.AF_INET, ip)
def family_to_string(family):
if family == socket.AF_INET6:
return "AF_INET6"
elif family == socket.AF_INET:
return "AF_INET"
else:
return str(family)

View File

@ -1,12 +1,19 @@
import time, socket, re, select, errno
if not globals().get('skip_imports'):
import compat.ssubprocess as ssubprocess
import helpers
from helpers import *
import time
import socket
import re
import select
import errno
import os
import sys
import platform
POLL_TIME = 60*15
import subprocess as ssubprocess
import sshuttle.helpers as helpers
from sshuttle.helpers import log, debug1, debug2, debug3
POLL_TIME = 60 * 15
NETSTAT_POLL_TIME = 30
CACHEFILE=os.path.expanduser('~/.sshuttle.hosts')
CACHEFILE = os.path.expanduser('~/.sshuttle.hosts')
_nmb_ok = True
@ -15,7 +22,7 @@ hostnames = {}
queue = {}
try:
null = open('/dev/null', 'wb')
except IOError, e:
except IOError as e:
log('warning: %s\n' % e)
null = os.popen("sh -c 'while read x; do :; done'", 'wb', 4096)
@ -28,9 +35,10 @@ def write_host_cache():
tmpname = '%s.%d.tmp' % (CACHEFILE, os.getpid())
try:
f = open(tmpname, 'wb')
for name,ip in sorted(hostnames.items()):
f.write('%s,%s\n' % (name, ip))
for name, ip in sorted(hostnames.items()):
f.write(('%s,%s\n' % (name, ip)).encode("ASCII"))
f.close()
os.chmod(tmpname, 0o600)
os.rename(tmpname, CACHEFILE)
finally:
try:
@ -42,7 +50,7 @@ def write_host_cache():
def read_host_cache():
try:
f = open(CACHEFILE)
except IOError, e:
except IOError as e:
if e.errno == errno.ENOENT:
return
else:
@ -50,18 +58,18 @@ def read_host_cache():
for line in f:
words = line.strip().split(',')
if len(words) == 2:
(name,ip) = words
(name, ip) = words
name = re.sub(r'[^-\w]', '-', name).strip()
ip = re.sub(r'[^0-9.]', '', ip).strip()
if name and ip:
found_host(name, ip)
def found_host(hostname, ip):
hostname = re.sub(r'\..*', '', hostname)
hostname = re.sub(r'[^-\w]', '_', hostname)
if (ip.startswith('127.') or ip.startswith('255.')
or hostname == 'localhost'):
if (ip.startswith('127.') or ip.startswith('255.')
or hostname == 'localhost'):
return
oldip = hostnames.get(hostname)
if oldip != ip:
@ -94,7 +102,7 @@ def _check_revdns(ip):
debug3('< %s\n' % r[0])
check_host(r[0])
found_host(r[0], ip)
except socket.herror, e:
except socket.herror:
pass
@ -105,7 +113,7 @@ def _check_dns(hostname):
debug3('< %s\n' % ip)
check_host(ip)
found_host(hostname, ip)
except socket.gaierror, e:
except socket.gaierror:
pass
@ -114,16 +122,16 @@ def _check_netstat():
argv = ['netstat', '-n']
try:
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
content = p.stdout.read()
content = p.stdout.read().decode("ASCII")
p.wait()
except OSError, e:
except OSError as e:
log('%r failed: %r\n' % (argv, e))
return
for ip in re.findall(r'\d+\.\d+\.\d+\.\d+', content):
debug3('< %s\n' % ip)
check_host(ip)
def _check_smb(hostname):
return
@ -136,7 +144,7 @@ def _check_smb(hostname):
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
lines = p.stdout.readlines()
p.wait()
except OSError, e:
except OSError as e:
log('%r failed: %r\n' % (argv, e))
_smb_ok = False
return
@ -187,13 +195,13 @@ def _check_nmb(hostname, is_workgroup, is_master):
global _nmb_ok
if not _nmb_ok:
return
argv = ['nmblookup'] + ['-M']*is_master + ['--', hostname]
argv = ['nmblookup'] + ['-M'] * is_master + ['--', hostname]
debug2(' > n%d%d: %s\n' % (is_workgroup, is_master, hostname))
try:
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE, stderr=null)
lines = p.stdout.readlines()
rv = p.wait()
except OSError, e:
except OSError as e:
log('%r failed: %r\n' % (argv, e))
_nmb_ok = False
return
@ -228,13 +236,13 @@ def check_workgroup(hostname):
def _enqueue(op, *args):
t = (op,args)
if queue.get(t) == None:
t = (op, args)
if queue.get(t) is None:
queue[t] = 0
def _stdin_still_ok(timeout):
r,w,x = select.select([sys.stdin.fileno()], [], [], timeout)
r, w, x = select.select([sys.stdin.fileno()], [], [], timeout)
if r:
b = os.read(sys.stdin.fileno(), 4096)
if not b:
@ -248,8 +256,11 @@ def hw_main(seed_hosts):
else:
helpers.logprefix = 'hostwatch: '
debug1('Starting hostwatch with Python version %s\n'
% platform.python_version())
read_host_cache()
_enqueue(_check_etc_hosts)
_enqueue(_check_netstat)
check_host('localhost')
@ -261,8 +272,8 @@ def hw_main(seed_hosts):
while 1:
now = time.time()
for t,last_polled in queue.items():
(op,args) = t
for t, last_polled in list(queue.items()):
(op, args) = t
if not _stdin_still_ok(0):
break
maxtime = POLL_TIME
@ -275,7 +286,7 @@ def hw_main(seed_hosts):
sys.stdout.flush()
except IOError:
break
# FIXME: use a smarter timeout based on oldest last_polled
if not _stdin_still_ok(1):
break

62
sshuttle/linux.py Normal file
View File

@ -0,0 +1,62 @@
import socket
import subprocess as ssubprocess
from sshuttle.helpers import log, debug1, Fatal, family_to_string
def nonfatal(func, *args):
try:
func(*args)
except Fatal as e:
log('error: %s\n' % e)
def ipt_chain_exists(family, table, name):
if family == socket.AF_INET6:
cmd = 'ip6tables'
elif family == socket.AF_INET:
cmd = 'iptables'
else:
raise Exception('Unsupported family "%s"' % family_to_string(family))
argv = [cmd, '-t', table, '-nL']
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
for line in p.stdout:
if line.startswith(b'Chain %s ' % name.encode("ASCII")):
return True
rv = p.wait()
if rv:
raise Fatal('%r returned %d' % (argv, rv))
def ipt(family, table, *args):
if family == socket.AF_INET6:
argv = ['ip6tables', '-t', table] + list(args)
elif family == socket.AF_INET:
argv = ['iptables', '-t', table] + list(args)
else:
raise Exception('Unsupported family "%s"' % family_to_string(family))
debug1('>> %s\n' % ' '.join(argv))
rv = ssubprocess.call(argv)
if rv:
raise Fatal('%r returned %d' % (argv, rv))
_no_ttl_module = False
def ipt_ttl(family, *args):
global _no_ttl_module
if not _no_ttl_module:
# we avoid infinite loops by generating server-side connections
# with ttl 42. This makes the client side not recapture those
# connections, in case client == server.
try:
argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42']
ipt(family, *argsplus)
except Fatal:
ipt(family, *args)
# we only get here if the non-ttl attempt succeeds
log('sshuttle: warning: your iptables is missing '
'the ttl module.\n')
_no_ttl_module = True
else:
ipt(family, *args)

View File

@ -0,0 +1,105 @@
import os
import importlib
import socket
import struct
import errno
from sshuttle.helpers import Fatal, debug3
def original_dst(sock):
try:
SO_ORIGINAL_DST = 80
SOCKADDR_MIN = 16
sockaddr_in = sock.getsockopt(socket.SOL_IP,
SO_ORIGINAL_DST, SOCKADDR_MIN)
(proto, port, a, b, c, d) = struct.unpack('!HHBBBB', sockaddr_in[:8])
# FIXME: decoding is IPv4 only.
assert(socket.htons(proto) == socket.AF_INET)
ip = '%d.%d.%d.%d' % (a, b, c, d)
return (ip, port)
except socket.error as e:
if e.args[0] == errno.ENOPROTOOPT:
return sock.getsockname()
raise
class Features(object):
pass
class BaseMethod(object):
def __init__(self, name):
self.firewall = None
self.name = name
def set_firewall(self, firewall):
self.firewall = firewall
def get_supported_features(self):
result = Features()
result.ipv6 = False
result.udp = False
result.dns = True
return result
def get_tcp_dstip(self, sock):
return original_dst(sock)
def recv_udp(self, udp_listener, bufsize):
debug3('Accept UDP using recvfrom.\n')
data, srcip = udp_listener.recvfrom(bufsize)
return (srcip, None, data)
def send_udp(self, sock, srcip, dstip, data):
if srcip is not None:
Fatal("Method %s send_udp does not support setting srcip to %r"
% (self.name, srcip))
sock.sendto(data, dstip)
def setup_tcp_listener(self, tcp_listener):
pass
def setup_udp_listener(self, udp_listener):
pass
def assert_features(self, features):
avail = self.get_supported_features()
for key in ["udp", "dns", "ipv6"]:
if getattr(features, key) and not getattr(avail, key):
raise Fatal(
"Feature %s not supported with method %s.\n" %
(key, self.name))
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp):
raise NotImplementedError()
def restore_firewall(self, port, family, udp):
raise NotImplementedError()
def firewall_command(self, line):
return False
def _program_exists(name):
paths = (os.getenv('PATH') or os.defpath).split(os.pathsep)
for p in paths:
fn = '%s/%s' % (p, name)
if os.path.exists(fn):
return not os.path.isdir(fn) and os.access(fn, os.X_OK)
def get_method(method_name):
module = importlib.import_module("sshuttle.methods.%s" % method_name)
return module.Method(method_name)
def get_auto_method():
if _program_exists('iptables'):
method_name = "nat"
elif _program_exists('pfctl'):
method_name = "pf"
else:
raise Fatal(
"can't find either iptables or pfctl; check your PATH")
return get_method(method_name)

89
sshuttle/methods/nat.py Normal file
View File

@ -0,0 +1,89 @@
import socket
from sshuttle.helpers import family_to_string
from sshuttle.linux import ipt, ipt_ttl, ipt_chain_exists, nonfatal
from sshuttle.methods import BaseMethod
class Method(BaseMethod):
# We name the chain based on the transproxy port number so that it's
# possible to run multiple copies of sshuttle at the same time. Of course,
# the multiple copies shouldn't have overlapping subnets, or only the most-
# recently-started one will win (because we use "-I OUTPUT 1" instead of
# "-A OUTPUT").
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp):
# only ipv4 supported with NAT
if family != socket.AF_INET:
raise Exception(
'Address family "%s" unsupported by nat method_name'
% family_to_string(family))
if udp:
raise Exception("UDP not supported by nat method_name")
table = "nat"
def _ipt(*args):
return ipt(family, table, *args)
def _ipt_ttl(*args):
return ipt_ttl(family, table, *args)
chain = 'sshuttle-%s' % port
# basic cleanup/setup of chains
self.restore_firewall(port, family, udp)
_ipt('-N', chain)
_ipt('-F', chain)
_ipt('-I', 'OUTPUT', '1', '-j', chain)
_ipt('-I', 'PREROUTING', '1', '-j', chain)
# create new subnet entries. Note that we're sorting in a very
# particular order: we need to go from most-specific (largest
# swidth) to least-specific, and at any given level of specificity,
# we want excludes to come first. That's why the columns are in
# such a non- intuitive order.
for f, swidth, sexclude, snet \
in sorted(subnets, key=lambda s: s[1], reverse=True):
if sexclude:
_ipt('-A', chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet, swidth),
'-p', 'tcp')
else:
_ipt_ttl('-A', chain, '-j', 'REDIRECT',
'--dest', '%s/%s' % (snet, swidth),
'-p', 'tcp',
'--to-ports', str(port))
for f, ip in [i for i in nslist if i[0] == family]:
_ipt_ttl('-A', chain, '-j', 'REDIRECT',
'--dest', '%s/32' % ip,
'-p', 'udp',
'--dport', '53',
'--to-ports', str(dnsport))
def restore_firewall(self, port, family, udp):
# only ipv4 supported with NAT
if family != socket.AF_INET:
raise Exception(
'Address family "%s" unsupported by nat method_name'
% family_to_string(family))
if udp:
raise Exception("UDP not supported by nat method_name")
table = "nat"
def _ipt(*args):
return ipt(family, table, *args)
def _ipt_ttl(*args):
return ipt_ttl(family, table, *args)
chain = 'sshuttle-%s' % port
# basic cleanup/setup of chains
if ipt_chain_exists(family, table, chain):
nonfatal(_ipt, '-D', 'OUTPUT', '-j', chain)
nonfatal(_ipt, '-D', 'PREROUTING', '-j', chain)
nonfatal(_ipt, '-F', chain)
_ipt('-X', chain)

420
sshuttle/methods/pf.py Normal file
View File

@ -0,0 +1,420 @@
import os
import sys
import re
import socket
import struct
import subprocess as ssubprocess
from fcntl import ioctl
from ctypes import c_char, c_uint8, c_uint16, c_uint32, Union, Structure, \
sizeof, addressof, memmove
from sshuttle.helpers import debug1, debug2, debug3, Fatal, family_to_string
from sshuttle.methods import BaseMethod
_pf_context = {'started_by_sshuttle': False, 'Xtoken': None}
_pf_fd = None
class Generic(object):
MAXPATHLEN = 1024
PF_CHANGE_ADD_TAIL = 2
PF_CHANGE_GET_TICKET = 6
PF_PASS = 0
PF_RDR = 8
PF_OUT = 2
ACTION_OFFSET = 0
POOL_TICKET_OFFSET = 8
ANCHOR_CALL_OFFSET = 1040
class pf_addr(Structure):
class _pfa(Union):
_fields_ = [("v4", c_uint32), # struct in_addr
("v6", c_uint32 * 4), # struct in6_addr
("addr8", c_uint8 * 16),
("addr16", c_uint16 * 8),
("addr32", c_uint32 * 4)]
_fields_ = [("pfa", _pfa)]
_anonymous_ = ("pfa",)
def __init__(self):
self.status = b''
self.pfioc_pooladdr = c_char * 1136
self.DIOCNATLOOK = (
(0x40000000 | 0x80000000) |
((sizeof(self.pfioc_natlook) & 0x1fff) << 16) |
((ord('D')) << 8) | (23))
self.DIOCCHANGERULE = (
(0x40000000 | 0x80000000) |
((sizeof(self.pfioc_rule) & 0x1fff) << 16) |
((ord('D')) << 8) | (26))
self.DIOCBEGINADDRS = (
(0x40000000 | 0x80000000) |
((sizeof(self.pfioc_pooladdr) & 0x1fff) << 16) |
((ord('D')) << 8) | (51))
def enable(self):
if b'INFO:\nStatus: Disabled' in self.status:
pfctl('-e')
_pf_context['started_by_sshuttle'] = True
def disable(self):
if _pf_context['started_by_sshuttle']:
pfctl('-d')
def query_nat(self, family, proto, src_ip, src_port, dst_ip, dst_port):
[proto, family, src_port, dst_port] = [
int(v) for v in [proto, family, src_port, dst_port]]
packed_src_ip = socket.inet_pton(family, src_ip)
packed_dst_ip = socket.inet_pton(family, dst_ip)
assert len(packed_src_ip) == len(packed_dst_ip)
length = len(packed_src_ip)
pnl = self.pfioc_natlook()
pnl.proto = proto
pnl.direction = self.PF_OUT
pnl.af = family
memmove(addressof(pnl.saddr), packed_src_ip, length)
memmove(addressof(pnl.daddr), packed_dst_ip, length)
self._add_natlook_ports(pnl, src_port, dst_port)
ioctl(pf_get_dev(), self.DIOCNATLOOK,
(c_char * sizeof(pnl)).from_address(addressof(pnl)))
ip = socket.inet_ntop(
pnl.af, (c_char * length).from_address(addressof(pnl.rdaddr)).raw)
port = socket.ntohs(self._get_natlook_port(pnl.rdxport))
return (ip, port)
def _add_natlook_ports(self, pnl, src_port, dst_port):
pnl.sxport = socket.htons(src_port)
pnl.dxport = socket.htons(dst_port)
def _get_natlook_port(self, xport):
return xport
def add_anchors(self, status=None):
if status is None:
status = pfctl('-s all')[0]
self.status = status
if b'\nanchor "sshuttle"' not in status:
self._add_anchor_rule(self.PF_PASS, b"sshuttle")
def _add_anchor_rule(self, type, name, pr=None):
if pr is None:
pr = self.pfioc_rule()
memmove(addressof(pr) + self.ANCHOR_CALL_OFFSET, name,
min(self.MAXPATHLEN, len(name))) # anchor_call = name
memmove(addressof(pr) + self.RULE_ACTION_OFFSET,
struct.pack('I', type), 4) # rule.action = type
memmove(addressof(pr) + self.ACTION_OFFSET, struct.pack(
'I', self.PF_CHANGE_GET_TICKET), 4) # action = PF_CHANGE_GET_TICKET
ioctl(pf_get_dev(), pf.DIOCCHANGERULE, pr)
memmove(addressof(pr) + self.ACTION_OFFSET, struct.pack(
'I', self.PF_CHANGE_ADD_TAIL), 4) # action = PF_CHANGE_ADD_TAIL
ioctl(pf_get_dev(), pf.DIOCCHANGERULE, pr)
def add_rules(self, rules):
assert isinstance(rules, bytes)
debug3("rules:\n" + rules.decode("ASCII"))
pfctl('-a sshuttle -f /dev/stdin', rules)
class FreeBsd(Generic):
RULE_ACTION_OFFSET = 2968
def __new__(cls):
class pfioc_natlook(Structure):
pf_addr = Generic.pf_addr
_fields_ = [("saddr", pf_addr),
("daddr", pf_addr),
("rsaddr", pf_addr),
("rdaddr", pf_addr),
("sxport", c_uint16),
("dxport", c_uint16),
("rsxport", c_uint16),
("rdxport", c_uint16),
("af", c_uint8), # sa_family_t
("proto", c_uint8),
("proto_variant", c_uint8),
("direction", c_uint8)]
freebsd = Generic.__new__(cls)
freebsd.pfioc_rule = c_char * 3040
freebsd.pfioc_natlook = pfioc_natlook
return freebsd
def __init__(self):
super(FreeBsd, self).__init__()
def add_anchors(self):
status = pfctl('-s all')[0]
if b'\nrdr-anchor "sshuttle"' not in status:
self._add_anchor_rule(self.PF_RDR, b'sshuttle')
super(FreeBsd, self).add_anchors(status=status)
def _add_anchor_rule(self, type, name):
pr = self.pfioc_rule()
ppa = self.pfioc_pooladdr()
ioctl(pf_get_dev(), self.DIOCBEGINADDRS, ppa)
# pool ticket
memmove(addressof(pr) + self.POOL_TICKET_OFFSET, ppa[4:8], 4)
super(FreeBsd, self)._add_anchor_rule(type, name, pr=pr)
def add_rules(self, includes, port, dnsport, nslist):
tables = [
b'table <forward_subnets> {%s}' % b','.join(includes)
]
translating_rules = [
b'rdr pass on lo0 proto tcp '
b'to <forward_subnets> -> 127.0.0.1 port %r' % port
]
filtering_rules = [
b'pass out route-to lo0 inet proto tcp '
b'to <forward_subnets> keep state'
]
if len(nslist) > 0:
tables.append(
b'table <dns_servers> {%s}' %
b','.join([ns[1].encode("ASCII") for ns in nslist]))
translating_rules.append(
b'rdr pass on lo0 proto udp to '
b'<dns_servers> port 53 -> 127.0.0.1 port %r' % dnsport)
filtering_rules.append(
b'pass out route-to lo0 inet proto udp to '
b'<dns_servers> port 53 keep state')
rules = b'\n'.join(tables + translating_rules + filtering_rules) \
+ b'\n'
super(FreeBsd, self).add_rules(rules)
class OpenBsd(Generic):
POOL_TICKET_OFFSET = 4
RULE_ACTION_OFFSET = 3324
ANCHOR_CALL_OFFSET = 1036
def __init__(self):
class pfioc_natlook(Structure):
pf_addr = Generic.pf_addr
_fields_ = [("saddr", pf_addr),
("daddr", pf_addr),
("rsaddr", pf_addr),
("rdaddr", pf_addr),
("rdomain", c_uint16),
("rrdomain", c_uint16),
("sxport", c_uint16),
("dxport", c_uint16),
("rsxport", c_uint16),
("rdxport", c_uint16),
("af", c_uint8), # sa_family_t
("proto", c_uint8),
("proto_variant", c_uint8),
("direction", c_uint8)]
self.pfioc_rule = c_char * 3400
self.pfioc_natlook = pfioc_natlook
super(OpenBsd, self).__init__()
def add_anchors(self):
# before adding anchors and rules we must override the skip lo
# that comes by default in openbsd pf.conf so the rules we will add,
# which rely on translating/filtering packets on lo, can work
pfctl('-f /dev/stdin', b'match on lo\n')
super(OpenBsd, self).add_anchors()
def add_rules(self, includes, port, dnsport, nslist):
tables = [
b'table <forward_subnets> {%s}' % b','.join(includes)
]
translating_rules = [
b'pass in on lo0 inet proto tcp '
b'divert-to 127.0.0.1 port %r' % port
]
filtering_rules = [
b'pass out inet proto tcp '
b'to <forward_subnets> route-to lo0 keep state'
]
if len(nslist) > 0:
tables.append(
b'table <dns_servers> {%s}' %
b','.join([ns[1].encode("ASCII") for ns in nslist]))
translating_rules.append(
b'pass in on lo0 inet proto udp to <dns_servers>'
b'port 53 rdr-to 127.0.0.1 port %r' % dnsport)
filtering_rules.append(
b'pass out inet proto udp to '
b'<dns_servers> port 53 route-to lo0 keep state')
rules = b'\n'.join(tables + translating_rules + filtering_rules) \
+ b'\n'
super(OpenBsd, self).add_rules(rules)
class Darwin(FreeBsd):
RULE_ACTION_OFFSET = 3068
def __init__(self):
class pf_state_xport(Union):
_fields_ = [("port", c_uint16),
("call_id", c_uint16),
("spi", c_uint32)]
class pfioc_natlook(Structure):
pf_addr = Generic.pf_addr
_fields_ = [("saddr", pf_addr),
("daddr", pf_addr),
("rsaddr", pf_addr),
("rdaddr", pf_addr),
("sxport", pf_state_xport),
("dxport", pf_state_xport),
("rsxport", pf_state_xport),
("rdxport", pf_state_xport),
("af", c_uint8), # sa_family_t
("proto", c_uint8),
("proto_variant", c_uint8),
("direction", c_uint8)]
self.pfioc_rule = c_char * 3104
self.pfioc_natlook = pfioc_natlook
super(Darwin, self).__init__()
def enable(self):
o = pfctl('-E')
_pf_context['Xtoken'] = \
re.search(b'Token : (.+)', o[1]).group(1)
def disable(self):
if _pf_context['Xtoken'] is not None:
pfctl('-X %s' % _pf_context['Xtoken'].decode("ASCII"))
def add_anchors(self):
# before adding anchors and rules we must override the skip lo
# that in some cases ends up in the chain so the rules we will add,
# which rely on translating/filtering packets on lo, can work
pfctl('-f /dev/stdin', b'pass on lo\n')
super(Darwin, self).add_anchors()
def _add_natlook_ports(self, pnl, src_port, dst_port):
pnl.sxport.port = socket.htons(src_port)
pnl.dxport.port = socket.htons(dst_port)
def _get_natlook_port(self, xport):
return xport.port
if sys.platform == 'darwin':
pf = Darwin()
elif sys.platform.startswith('openbsd'):
pf = OpenBsd()
else:
pf = FreeBsd()
def pfctl(args, stdin=None):
argv = ['pfctl'] + list(args.split(" "))
debug1('>> %s\n' % ' '.join(argv))
p = ssubprocess.Popen(argv, stdin=ssubprocess.PIPE,
stdout=ssubprocess.PIPE,
stderr=ssubprocess.PIPE)
o = p.communicate(stdin)
if p.returncode:
raise Fatal('%r returned %d' % (argv, p.returncode))
return o
def pf_get_dev():
global _pf_fd
if _pf_fd is None:
_pf_fd = os.open('/dev/pf', os.O_RDWR)
return _pf_fd
class Method(BaseMethod):
def get_tcp_dstip(self, sock):
pfile = self.firewall.pfile
peer = sock.getpeername()
proxy = sock.getsockname()
argv = (sock.family, socket.IPPROTO_TCP,
peer[0].encode("ASCII"), peer[1],
proxy[0].encode("ASCII"), proxy[1])
out_line = b"QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % argv
pfile.write(out_line)
pfile.flush()
in_line = pfile.readline()
debug2(out_line.decode("ASCII") + ' > ' + in_line.decode("ASCII"))
if in_line.startswith(b'QUERY_PF_NAT_SUCCESS '):
(ip, port) = in_line[21:].split(b',')
return (ip.decode("ASCII"), int(port))
return sock.getsockname()
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp):
tables = []
translating_rules = []
filtering_rules = []
if family != socket.AF_INET:
raise Exception(
'Address family "%s" unsupported by pf method_name'
% family_to_string(family))
if udp:
raise Exception("UDP not supported by pf method_name")
if len(subnets) > 0:
includes = []
# If a given subnet is both included and excluded, list the
# exclusion first; the table will ignore the second, opposite
# definition
for f, swidth, sexclude, snet in sorted(
subnets, key=lambda s: (s[1], s[2]), reverse=True):
includes.append(b"%s%s/%d" %
(b"!" if sexclude else b"",
snet.encode("ASCII"),
swidth))
pf.add_anchors()
pf.add_rules(includes, port, dnsport, nslist)
pf.enable()
def restore_firewall(self, port, family, udp):
if family != socket.AF_INET:
raise Exception(
'Address family "%s" unsupported by pf method_name'
% family_to_string(family))
if udp:
raise Exception("UDP not supported by pf method_name")
pfctl('-a sshuttle -F all')
pf.disable()
def firewall_command(self, line):
if line.startswith('QUERY_PF_NAT '):
try:
dst = pf.query_nat(*(line[13:].split(',')))
sys.stdout.write('QUERY_PF_NAT_SUCCESS %s,%r\n' % dst)
except IOError as e:
sys.stdout.write('QUERY_PF_NAT_FAILURE %s\n' % e)
sys.stdout.flush()
return True
else:
return False

268
sshuttle/methods/tproxy.py Normal file
View File

@ -0,0 +1,268 @@
import struct
from sshuttle.helpers import family_to_string
from sshuttle.linux import ipt, ipt_ttl, ipt_chain_exists
from sshuttle.methods import BaseMethod
from sshuttle.helpers import debug1, debug3, Fatal
recvmsg = None
try:
# try getting recvmsg from python
import socket as pythonsocket
getattr(pythonsocket.socket, "recvmsg")
socket = pythonsocket
recvmsg = "python"
except AttributeError:
# try getting recvmsg from socket_ext library
try:
import socket_ext
getattr(socket_ext.socket, "recvmsg")
socket = socket_ext
recvmsg = "socket_ext"
except ImportError:
import socket
IP_TRANSPARENT = 19
IP_ORIGDSTADDR = 20
IP_RECVORIGDSTADDR = IP_ORIGDSTADDR
SOL_IPV6 = 41
IPV6_ORIGDSTADDR = 74
IPV6_RECVORIGDSTADDR = IPV6_ORIGDSTADDR
if recvmsg == "python":
def recv_udp(listener, bufsize):
debug3('Accept UDP python using recvmsg.\n')
data, ancdata, msg_flags, srcip = listener.recvmsg(
4096, socket.CMSG_SPACE(24))
dstip = None
family = None
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_IP and cmsg_type == IP_ORIGDSTADDR:
family, port = struct.unpack('=HH', cmsg_data[0:4])
port = socket.htons(port)
if family == socket.AF_INET:
start = 4
length = 4
else:
raise Fatal("Unsupported socket type '%s'" % family)
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
dstip = (ip, port)
break
elif cmsg_level == SOL_IPV6 and cmsg_type == IPV6_ORIGDSTADDR:
family, port = struct.unpack('=HH', cmsg_data[0:4])
port = socket.htons(port)
if family == socket.AF_INET6:
start = 8
length = 16
else:
raise Fatal("Unsupported socket type '%s'" % family)
ip = socket.inet_ntop(family, cmsg_data[start:start + length])
dstip = (ip, port)
break
return (srcip, dstip, data)
elif recvmsg == "socket_ext":
def recv_udp(listener, bufsize):
debug3('Accept UDP using socket_ext recvmsg.\n')
srcip, data, adata, flags = listener.recvmsg(
(bufsize,), socket.CMSG_SPACE(24))
dstip = None
family = None
for a in adata:
if a.cmsg_level == socket.SOL_IP and a.cmsg_type == IP_ORIGDSTADDR:
family, port = struct.unpack('=HH', a.cmsg_data[0:4])
port = socket.htons(port)
if family == socket.AF_INET:
start = 4
length = 4
else:
raise Fatal("Unsupported socket type '%s'" % family)
ip = socket.inet_ntop(
family, a.cmsg_data[start:start + length])
dstip = (ip, port)
break
elif a.cmsg_level == SOL_IPV6 and a.cmsg_type == IPV6_ORIGDSTADDR:
family, port = struct.unpack('=HH', a.cmsg_data[0:4])
port = socket.htons(port)
if family == socket.AF_INET6:
start = 8
length = 16
else:
raise Fatal("Unsupported socket type '%s'" % family)
ip = socket.inet_ntop(
family, a.cmsg_data[start:start + length])
dstip = (ip, port)
break
return (srcip, dstip, data[0])
else:
def recv_udp(listener, bufsize):
debug3('Accept UDP using recvfrom.\n')
data, srcip = listener.recvfrom(bufsize)
return (srcip, None, data)
class Method(BaseMethod):
def get_supported_features(self):
result = super(Method, self).get_supported_features()
result.ipv6 = True
if recvmsg is None:
result.udp = False
result.dns = False
else:
result.udp = True
result.dns = True
return result
def get_tcp_dstip(self, sock):
return sock.getsockname()
def recv_udp(self, udp_listener, bufsize):
srcip, dstip, data = recv_udp(udp_listener, bufsize)
if not dstip:
debug1(
"-- ignored UDP from %r: "
"couldn't determine destination IP address\n" % (srcip,))
return None
return srcip, dstip, data
def send_udp(self, sock, srcip, dstip, data):
if not srcip:
debug1(
"-- ignored UDP to %r: "
"couldn't determine source IP address\n" % (dstip,))
return
sender = socket.socket(sock.family, socket.SOCK_DGRAM)
sender.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sender.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
sender.bind(srcip)
sender.sendto(data, dstip)
sender.close()
def setup_tcp_listener(self, tcp_listener):
tcp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
def setup_udp_listener(self, udp_listener):
udp_listener.setsockopt(socket.SOL_IP, IP_TRANSPARENT, 1)
if udp_listener.v4 is not None:
udp_listener.v4.setsockopt(
socket.SOL_IP, IP_RECVORIGDSTADDR, 1)
if udp_listener.v6 is not None:
udp_listener.v6.setsockopt(SOL_IPV6, IPV6_RECVORIGDSTADDR, 1)
def setup_firewall(self, port, dnsport, nslist, family, subnets, udp):
if family not in [socket.AF_INET, socket.AF_INET6]:
raise Exception(
'Address family "%s" unsupported by tproxy method'
% family_to_string(family))
table = "mangle"
def _ipt(*args):
return ipt(family, table, *args)
def _ipt_ttl(*args):
return ipt_ttl(family, table, *args)
mark_chain = 'sshuttle-m-%s' % port
tproxy_chain = 'sshuttle-t-%s' % port
divert_chain = 'sshuttle-d-%s' % port
# basic cleanup/setup of chains
self.restore_firewall(port, family, udp)
_ipt('-N', mark_chain)
_ipt('-F', mark_chain)
_ipt('-N', divert_chain)
_ipt('-F', divert_chain)
_ipt('-N', tproxy_chain)
_ipt('-F', tproxy_chain)
_ipt('-I', 'OUTPUT', '1', '-j', mark_chain)
_ipt('-I', 'PREROUTING', '1', '-j', tproxy_chain)
_ipt('-A', divert_chain, '-j', 'MARK', '--set-mark', '1')
_ipt('-A', divert_chain, '-j', 'ACCEPT')
_ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
'-m', 'tcp', '-p', 'tcp')
if udp:
_ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain,
'-m', 'udp', '-p', 'udp')
for f, ip in [i for i in nslist if i[0] == family]:
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1',
'--dest', '%s/32' % ip,
'-m', 'udp', '-p', 'udp', '--dport', '53')
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1',
'--dest', '%s/32' % ip,
'-m', 'udp', '-p', 'udp', '--dport', '53',
'--on-port', str(dnsport))
for f, swidth, sexclude, snet \
in sorted(subnets, key=lambda s: s[1], reverse=True):
if sexclude:
_ipt('-A', mark_chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'tcp', '-p', 'tcp')
_ipt('-A', tproxy_chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'tcp', '-p', 'tcp')
else:
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'tcp', '-p', 'tcp')
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'tcp', '-p', 'tcp',
'--on-port', str(port))
if udp:
if sexclude:
_ipt('-A', mark_chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'udp', '-p', 'udp')
_ipt('-A', tproxy_chain, '-j', 'RETURN',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'udp', '-p', 'udp')
else:
_ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'udp', '-p', 'udp')
_ipt('-A', tproxy_chain, '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1',
'--dest', '%s/%s' % (snet, swidth),
'-m', 'udp', '-p', 'udp',
'--on-port', str(port))
def restore_firewall(self, port, family, udp):
if family not in [socket.AF_INET, socket.AF_INET6]:
raise Exception(
'Address family "%s" unsupported by tproxy method'
% family_to_string(family))
table = "mangle"
def _ipt(*args):
return ipt(family, table, *args)
def _ipt_ttl(*args):
return ipt_ttl(family, table, *args)
mark_chain = 'sshuttle-m-%s' % port
tproxy_chain = 'sshuttle-t-%s' % port
divert_chain = 'sshuttle-d-%s' % port
# basic cleanup/setup of chains
if ipt_chain_exists(family, table, mark_chain):
_ipt('-D', 'OUTPUT', '-j', mark_chain)
_ipt('-F', mark_chain)
_ipt('-X', mark_chain)
if ipt_chain_exists(family, table, tproxy_chain):
_ipt('-D', 'PREROUTING', '-j', tproxy_chain)
_ipt('-F', tproxy_chain)
_ipt('-X', tproxy_chain)
if ipt_chain_exists(family, table, divert_chain):
_ipt('-F', divert_chain)
_ipt('-X', divert_chain)

View File

@ -1,9 +1,16 @@
"""Command-line options parser.
With the help of an options spec string, easily parse command-line options.
"""
import sys, os, textwrap, getopt, re, struct
import sys
import os
import textwrap
import getopt
import re
import struct
class OptDict:
def __init__(self):
self._opts = {}
@ -46,24 +53,29 @@ def _atoi(v):
def _remove_negative_kv(k, v):
if k.startswith('no-') or k.startswith('no_'):
return k[3:], not v
return k,v
return k, v
def _remove_negative_k(k):
return _remove_negative_kv(k, None)[0]
def _tty_width():
if not hasattr(sys.stderr, "fileno"):
return _atoi(os.environ.get('WIDTH')) or 70
s = struct.pack("HHHH", 0, 0, 0, 0)
try:
import fcntl, termios
import fcntl
import termios
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
except (IOError, ImportError):
return _atoi(os.environ.get('WIDTH')) or 70
(ysize,xsize,ypix,xpix) = struct.unpack('HHHH', s)
(ysize, xsize, ypix, xpix) = struct.unpack('HHHH', s)
return xsize or 70
class Options:
"""Option parser.
When constructed, two strings are mandatory. The first one is the command
name showed before error messages. The second one is a string called an
@ -76,6 +88,7 @@ class Options:
By default, the parser function is getopt.gnu_getopt, and the abort
behaviour is to exit the program.
"""
def __init__(self, optspec, optfunc=getopt.gnu_getopt,
onabort=_default_onabort):
self.optspec = optspec
@ -95,7 +108,8 @@ class Options:
first_syn = True
while lines:
l = lines.pop()
if l == '--': break
if l == '--':
break
out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l))
first_syn = False
out.append('\n')
@ -122,7 +136,7 @@ class Options:
flagl = flags.split(',')
flagl_nice = []
for _f in flagl:
f,dvi = _remove_negative_kv(_f, _intify(defval))
f, dvi = _remove_negative_kv(_f, _intify(defval))
self._aliases[f] = _remove_negative_k(flagl[0])
self._hasparms[f] = has_parm
self._defaults[f] = dvi
@ -140,8 +154,8 @@ class Options:
flags_nice += ' ...'
prefix = ' %-20s ' % flags_nice
argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(),
initial_indent=prefix,
subsequent_indent=' '*28))
initial_indent=prefix,
subsequent_indent=' ' * 28))
out.append(argtext + '\n')
last_was_option = True
else:
@ -170,17 +184,18 @@ class Options:
and "extra" is a list of positional arguments.
"""
try:
(flags,extra) = self.optfunc(args, self._shortopts, self._longopts)
except getopt.GetoptError, e:
(flags, extra) = self.optfunc(
args, self._shortopts, self._longopts)
except getopt.GetoptError as e:
self.fatal(e)
opt = OptDict()
for k,v in self._defaults.iteritems():
for k, v in self._defaults.items():
k = self._aliases[k]
opt[k] = v
for (k,v) in flags:
for (k, v) in flags:
k = k.lstrip('-')
if k in ('h', '?', 'help'):
self.usage()
@ -195,6 +210,6 @@ class Options:
else:
v = _intify(v)
opt[k] = v
for (f1,f2) in self._aliases.iteritems():
for (f1, f2) in self._aliases.items():
opt[f1] = opt._opts.get(f2)
return (opt,flags,extra)
return (opt, flags, extra)

349
sshuttle/server.py Normal file
View File

@ -0,0 +1,349 @@
import re
import struct
import socket
import traceback
import time
import sys
import os
import platform
import sshuttle.ssnet as ssnet
import sshuttle.helpers as helpers
import sshuttle.hostwatch as hostwatch
import subprocess as ssubprocess
from sshuttle.ssnet import Handler, Proxy, Mux, MuxWrapper
from sshuttle.helpers import log, debug1, debug2, debug3, Fatal, \
resolvconf_random_nameserver
def _ipmatch(ipstr):
if ipstr == b'default':
ipstr = b'0.0.0.0/0'
m = re.match(b'^(\d+(\.\d+(\.\d+(\.\d+)?)?)?)(?:/(\d+))?$', ipstr)
if m:
g = m.groups()
ips = g[0]
width = int(g[4] or 32)
if g[1] is None:
ips += b'.0.0.0'
width = min(width, 8)
elif g[2] is None:
ips += b'.0.0'
width = min(width, 16)
elif g[3] is None:
ips += b'.0'
width = min(width, 24)
ips = ips.decode("ASCII")
return (struct.unpack('!I', socket.inet_aton(ips))[0], width)
def _ipstr(ip, width):
if width >= 32:
return ip
else:
return "%s/%d" % (ip, width)
def _maskbits(netmask):
if not netmask:
return 32
for i in range(32):
if netmask[0] & _shl(1, i):
return 32 - i
return 0
def _shl(n, bits):
return n * int(2 ** bits)
def _list_routes():
# FIXME: IPv4 only
argv = ['netstat', '-rn']
p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE)
routes = []
for line in p.stdout:
cols = re.split(b'\s+', line)
ipw = _ipmatch(cols[0])
if not ipw:
continue # some lines won't be parseable; never mind
maskw = _ipmatch(cols[2]) # linux only
mask = _maskbits(maskw) # returns 32 if maskw is null
width = min(ipw[1], mask)
ip = ipw[0] & _shl(_shl(1, width) - 1, 32 - width)
routes.append(
(socket.AF_INET, socket.inet_ntoa(struct.pack('!I', ip)), width))
rv = p.wait()
if rv != 0:
log('WARNING: %r returned %d\n' % (argv, rv))
log('WARNING: That prevents --auto-nets from working.\n')
return routes
def list_routes():
for (family, ip, width) in _list_routes():
if not ip.startswith('0.') and not ip.startswith('127.'):
yield (family, ip, width)
def _exc_dump():
exc_info = sys.exc_info()
return ''.join(traceback.format_exception(*exc_info))
def start_hostwatch(seed_hosts):
s1, s2 = socket.socketpair()
pid = os.fork()
if not pid:
# child
rv = 99
try:
try:
s2.close()
os.dup2(s1.fileno(), 1)
os.dup2(s1.fileno(), 0)
s1.close()
rv = hostwatch.hw_main(seed_hosts) or 0
except Exception:
log('%s\n' % _exc_dump())
rv = 98
finally:
os._exit(rv)
s1.close()
return pid, s2
class Hostwatch:
def __init__(self):
self.pid = 0
self.sock = None
class DnsProxy(Handler):
def __init__(self, mux, chan, request):
Handler.__init__(self, [])
self.timeout = time.time() + 30
self.mux = mux
self.chan = chan
self.tries = 0
self.request = request
self.peers = {}
self.try_send()
def try_send(self):
if self.tries >= 3:
return
self.tries += 1
family, peer = resolvconf_random_nameserver()
sock = socket.socket(family, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
sock.connect((peer, 53))
self.peers[sock] = peer
debug2('DNS: sending to %r (try %d)\n' % (peer, self.tries))
try:
sock.send(self.request)
self.socks.append(sock)
except socket.error as e:
if e.args[0] in ssnet.NET_ERRS:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS send to %r: %s\n' % (peer, e))
self.try_send()
return
else:
log('DNS send to %r: %s\n' % (peer, e))
return
def callback(self, sock):
peer = self.peers[sock]
try:
data = sock.recv(4096)
except socket.error as e:
self.socks.remove(sock)
del self.peers[sock]
if e.args[0] in ssnet.NET_ERRS:
# might have been spurious; try again.
# Note: these errors sometimes are reported by recv(),
# and sometimes by send(). We have to catch both.
debug2('DNS recv from %r: %s\n' % (peer, e))
self.try_send()
return
else:
log('DNS recv from %r: %s\n' % (peer, e))
return
debug2('DNS response: %d bytes\n' % len(data))
self.mux.send(self.chan, ssnet.CMD_DNS_RESPONSE, data)
self.ok = False
class UdpProxy(Handler):
def __init__(self, mux, chan, family):
sock = socket.socket(family, socket.SOCK_DGRAM)
Handler.__init__(self, [sock])
self.timeout = time.time() + 30
self.mux = mux
self.chan = chan
self.sock = sock
if family == socket.AF_INET:
self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
def send(self, dstip, data):
debug2('UDP: sending to %r port %d\n' % dstip)
try:
self.sock.sendto(data, dstip)
except socket.error as e:
log('UDP send to %r port %d: %s\n' % (dstip[0], dstip[1], e))
return
def callback(self, sock):
try:
data, peer = sock.recvfrom(4096)
except socket.error as e:
log('UDP recv from %r port %d: %s\n' % (peer[0], peer[1], e))
return
debug2('UDP response: %d bytes\n' % len(data))
hdr = "%s,%r," % (peer[0], peer[1])
self.mux.send(self.chan, ssnet.CMD_UDP_DATA, hdr + data)
def main(latency_control):
debug1('Starting server with Python version %s\n'
% platform.python_version())
if helpers.verbose >= 1:
helpers.logprefix = ' s: '
else:
helpers.logprefix = 'server: '
debug1('latency control setting = %r\n' % latency_control)
routes = list(list_routes())
debug1('available routes:\n')
for r in routes:
debug1(' %d/%s/%d\n' % r)
# synchronization header
sys.stdout.write('\0\0SSHUTTLE0001')
sys.stdout.flush()
handlers = []
mux = Mux(socket.fromfd(sys.stdin.fileno(),
socket.AF_INET, socket.SOCK_STREAM),
socket.fromfd(sys.stdout.fileno(),
socket.AF_INET, socket.SOCK_STREAM))
handlers.append(mux)
routepkt = b''
for r in routes:
routepkt += b'%d,%s,%d\n' % (r[0], r[1].encode("ASCII"), r[2])
mux.send(0, ssnet.CMD_ROUTES, routepkt)
hw = Hostwatch()
hw.leftover = b''
def hostwatch_ready(sock):
assert(hw.pid)
content = hw.sock.recv(4096)
if content:
lines = (hw.leftover + content).split(b'\n')
if lines[-1]:
# no terminating newline: entry isn't complete yet!
hw.leftover = lines.pop()
lines.append(b'')
else:
hw.leftover = b''
mux.send(0, ssnet.CMD_HOST_LIST, b'\n'.join(lines))
else:
raise Fatal('hostwatch process died')
def got_host_req(data):
if not hw.pid:
(hw.pid, hw.sock) = start_hostwatch(data.strip().split())
handlers.append(Handler(socks=[hw.sock],
callback=hostwatch_ready))
mux.got_host_req = got_host_req
def new_channel(channel, data):
(family, dstip, dstport) = data.split(b',', 2)
family = int(family)
dstport = int(dstport)
outwrap = ssnet.connect_dst(family, dstip, dstport)
handlers.append(Proxy(MuxWrapper(mux, channel), outwrap))
mux.new_channel = new_channel
dnshandlers = {}
def dns_req(channel, data):
debug2('Incoming DNS request channel=%d.\n' % channel)
h = DnsProxy(mux, channel, data)
handlers.append(h)
dnshandlers[channel] = h
mux.got_dns_req = dns_req
udphandlers = {}
def udp_req(channel, cmd, data):
debug2('Incoming UDP request channel=%d, cmd=%d\n' % (channel, cmd))
if cmd == ssnet.CMD_UDP_DATA:
(dstip, dstport, data) = data.split(",", 2)
dstport = int(dstport)
debug2('is incoming UDP data. %r %d.\n' % (dstip, dstport))
h = udphandlers[channel]
h.send((dstip, dstport), data)
elif cmd == ssnet.CMD_UDP_CLOSE:
debug2('is incoming UDP close\n')
h = udphandlers[channel]
h.ok = False
del mux.channels[channel]
def udp_open(channel, data):
debug2('Incoming UDP open.\n')
family = int(data)
mux.channels[channel] = lambda cmd, data: udp_req(channel, cmd, data)
if channel in udphandlers:
raise Fatal('UDP connection channel %d already open' % channel)
else:
h = UdpProxy(mux, channel, family)
handlers.append(h)
udphandlers[channel] = h
mux.got_udp_open = udp_open
while mux.ok:
if hw.pid:
assert(hw.pid > 0)
(rpid, rv) = os.waitpid(hw.pid, os.WNOHANG)
if rpid:
raise Fatal(
'hostwatch exited unexpectedly: code 0x%04x\n' % rv)
ssnet.runonce(handlers, mux)
if latency_control:
mux.check_fullness()
if dnshandlers:
now = time.time()
remove = []
for channel, h in dnshandlers.items():
if h.timeout < now or not h.ok:
debug3('expiring dnsreqs channel=%d\n' % channel)
remove.append(channel)
h.ok = False
for channel in remove:
del dnshandlers[channel]
if udphandlers:
remove = []
for channel, h in udphandlers.items():
if not h.ok:
debug3('expiring UDP channel=%d\n' % channel)
remove.append(channel)
h.ok = False
for channel in remove:
del udphandlers[channel]

130
sshuttle/ssh.py Normal file
View File

@ -0,0 +1,130 @@
import sys
import os
import re
import socket
import zlib
import imp
import subprocess as ssubprocess
import sshuttle.helpers as helpers
from sshuttle.helpers import debug2
def readfile(name):
tokens = name.split(".")
f = None
token = tokens[0]
token_name = [token]
token_str = ".".join(token_name)
try:
f, pathname, description = imp.find_module(token_str)
for token in tokens[1:]:
module = imp.load_module(token_str, f, pathname, description)
if f is not None:
f.close()
token_name.append(token)
token_str = ".".join(token_name)
f, pathname, description = imp.find_module(
token, module.__path__)
if f is not None:
contents = f.read()
else:
contents = ""
finally:
if f is not None:
f.close()
return contents.encode("UTF8")
def empackage(z, name, data=None):
if not data:
data = readfile(name)
content = z.compress(data)
content += z.flush(zlib.Z_SYNC_FLUSH)
return b'%s\n%d\n%s' % (name.encode("ASCII"), len(content), content)
def connect(ssh_cmd, rhostport, python, stderr, options):
portl = []
if (rhostport or '').count(':') > 1:
if rhostport.count(']') or rhostport.count('['):
result = rhostport.split(']')
rhost = result[0].strip('[')
if len(result) > 1:
result[1] = result[1].strip(':')
if result[1] is not '':
portl = ['-p', str(int(result[1]))]
# can't disambiguate IPv6 colons and a port number. pass the hostname
# through.
else:
rhost = rhostport
else: # IPv4
l = (rhostport or '').split(':', 1)
rhost = l[0]
if len(l) > 1:
portl = ['-p', str(int(l[1]))]
if rhost == '-':
rhost = None
z = zlib.compressobj(1)
content = readfile('sshuttle.assembler')
optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in list(options.items()))
optdata = optdata.encode("UTF8")
content2 = (empackage(z, 'sshuttle') +
empackage(z, 'sshuttle.cmdline_options', optdata) +
empackage(z, 'sshuttle.helpers') +
empackage(z, 'sshuttle.ssnet') +
empackage(z, 'sshuttle.hostwatch') +
empackage(z, 'sshuttle.server') +
b"\n")
pyscript = r"""
import sys;
verbosity=%d;
stdin=getattr(sys.stdin,"buffer",sys.stdin);
exec(compile(stdin.read(%d), "assembler.py", "exec"))
""" % (helpers.verbose or 0, len(content))
pyscript = re.sub(r'\s+', ' ', pyscript.strip())
if not rhost:
# ignore the --python argument when running locally; we already know
# which python version works.
argv = [sys.argv[1], '-c', pyscript]
else:
if ssh_cmd:
sshl = ssh_cmd.split(' ')
else:
sshl = ['ssh']
if python:
pycmd = "'%s' -c '%s'" % (python, pyscript)
else:
pycmd = ("P=python3.5; $P -V 2>/dev/null || P=python; "
"exec \"$P\" -c '%s'") % pyscript
argv = (sshl +
portl +
[rhost, '--', pycmd])
(s1, s2) = socket.socketpair()
def setup():
# runs in the child process
s2.close()
s1a, s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
s1.close()
debug2('executing: %r\n' % argv)
p = ssubprocess.Popen(argv, stdin=s1a, stdout=s1b, preexec_fn=setup,
close_fds=True, stderr=stderr)
os.close(s1a)
os.close(s1b)
s2.sendall(content)
s2.sendall(content2)
return p, s2

View File

@ -1,9 +1,12 @@
import struct, socket, errno, select
if not globals().get('skip_imports'):
from helpers import *
import struct
import socket
import errno
import select
import os
from sshuttle.helpers import log, debug1, debug2, debug3, Fatal
MAX_CHANNEL = 65535
# these don't exist in the socket module in python 2.3!
SHUT_RD = 0
SHUT_WR = 1
@ -16,35 +19,45 @@ HDR_LEN = 8
CMD_EXIT = 0x4200
CMD_PING = 0x4201
CMD_PONG = 0x4202
CMD_CONNECT = 0x4203
CMD_STOP_SENDING = 0x4204
CMD_EOF = 0x4205
CMD_DATA = 0x4206
CMD_TCP_CONNECT = 0x4203
CMD_TCP_STOP_SENDING = 0x4204
CMD_TCP_EOF = 0x4205
CMD_TCP_DATA = 0x4206
CMD_ROUTES = 0x4207
CMD_HOST_REQ = 0x4208
CMD_HOST_LIST = 0x4209
CMD_DNS_REQ = 0x420a
CMD_DNS_RESPONSE = 0x420b
CMD_UDP_OPEN = 0x420c
CMD_UDP_DATA = 0x420d
CMD_UDP_CLOSE = 0x420e
cmd_to_name = {
CMD_EXIT: 'EXIT',
CMD_PING: 'PING',
CMD_PONG: 'PONG',
CMD_CONNECT: 'CONNECT',
CMD_STOP_SENDING: 'STOP_SENDING',
CMD_EOF: 'EOF',
CMD_DATA: 'DATA',
CMD_TCP_CONNECT: 'TCP_CONNECT',
CMD_TCP_STOP_SENDING: 'TCP_STOP_SENDING',
CMD_TCP_EOF: 'TCP_EOF',
CMD_TCP_DATA: 'TCP_DATA',
CMD_ROUTES: 'ROUTES',
CMD_HOST_REQ: 'HOST_REQ',
CMD_HOST_LIST: 'HOST_LIST',
CMD_DNS_REQ: 'DNS_REQ',
CMD_DNS_RESPONSE: 'DNS_RESPONSE',
CMD_UDP_OPEN: 'UDP_OPEN',
CMD_UDP_DATA: 'UDP_DATA',
CMD_UDP_CLOSE: 'UDP_CLOSE',
}
NET_ERRS = [errno.ECONNREFUSED, errno.ETIMEDOUT,
errno.EHOSTUNREACH, errno.ENETUNREACH,
errno.EHOSTDOWN, errno.ENETDOWN]
def _add(l, elem):
if not elem in l:
if elem not in l:
l.append(elem)
@ -62,7 +75,7 @@ def _fds(l):
def _nb_clean(func, *args):
try:
return func(*args)
except OSError, e:
except OSError as e:
if e.errno not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
else:
@ -75,14 +88,17 @@ def _try_peername(sock):
pn = sock.getpeername()
if pn:
return '%s:%s' % (pn[0], pn[1])
except socket.error, e:
except socket.error as e:
if e.args[0] not in (errno.ENOTCONN, errno.ENOTSOCK):
raise
return 'unknown'
_swcount = 0
class SockWrapper:
def __init__(self, rsock, wsock, connect_to=None, peername=None):
global _swcount
_swcount += 1
@ -128,7 +144,7 @@ class SockWrapper:
self.rsock.connect(self.connect_to)
# connected successfully (Linux)
self.connect_to = None
except socket.error, e:
except socket.error as e:
debug3('%r: connect result: %s\n' % (self, e))
if e.args[0] == errno.EINVAL:
# this is what happens when you call connect() on a socket
@ -142,12 +158,21 @@ class SockWrapper:
debug3('%r: fixed connect result: %s\n' % (self, e))
if e.args[0] in [errno.EINPROGRESS, errno.EALREADY]:
pass # not connected yet
elif e.args[0] == 0:
# connected successfully (weird Linux bug?)
# Sometimes Linux seems to return EINVAL when it isn't
# invalid. This *may* be caused by a race condition
# between connect() and getsockopt(SO_ERROR) (ie. it
# finishes connecting in between the two, so there is no
# longer an error). However, I'm not sure of that.
#
# I did get at least one report that the problem went away
# when we added this, however.
self.connect_to = None
elif e.args[0] == errno.EISCONN:
# connected successfully (BSD)
self.connect_to = None
elif e.args[0] in [errno.ECONNREFUSED, errno.ETIMEDOUT,
errno.EHOSTUNREACH, errno.ENETUNREACH,
errno.EACCES, errno.EPERM]:
elif e.args[0] in NET_ERRS + [errno.EACCES, errno.EPERM]:
# a "normal" kind of error
self.connect_to = None
self.seterr(e)
@ -158,15 +183,15 @@ class SockWrapper:
if not self.shut_read:
debug2('%r: done reading\n' % self)
self.shut_read = True
#self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
# self.rsock.shutdown(SHUT_RD) # doesn't do anything anyway
def nowrite(self):
if not self.shut_write:
debug2('%r: done writing\n' % self)
self.shut_write = True
try:
self.wsock.shutdown(SHUT_WR)
except socket.error, e:
except socket.error as e:
self.seterr('nowrite: %s' % e)
def too_full(self):
@ -178,7 +203,7 @@ class SockWrapper:
self.wsock.setblocking(False)
try:
return _nb_clean(os.write, self.wsock.fileno(), buf)
except OSError, e:
except OSError as e:
if e.errno == errno.EPIPE:
debug1('%r: uwrite: got EPIPE\n' % self)
self.nowrite()
@ -187,7 +212,7 @@ class SockWrapper:
# unexpected error... stream is dead
self.seterr('uwrite: %s' % e)
return 0
def write(self, buf):
assert(buf)
return self.uwrite(buf)
@ -200,9 +225,9 @@ class SockWrapper:
self.rsock.setblocking(False)
try:
return _nb_clean(os.read, self.rsock.fileno(), 65536)
except OSError, e:
except OSError as e:
self.seterr('uread: %s' % e)
return '' # unexpected error... we'll call it EOF
return b'' # unexpected error... we'll call it EOF
def fill(self):
if self.buf:
@ -210,7 +235,7 @@ class SockWrapper:
rb = self.uread()
if rb:
self.buf.append(rb)
if rb == '': # empty string means EOF; None means temporarily empty
if rb == b'': # empty string means EOF; None means temporarily empty
self.noread()
def copy_to(self, outwrap):
@ -224,7 +249,8 @@ class SockWrapper:
class Handler:
def __init__(self, socks = None, callback = None):
def __init__(self, socks=None, callback=None):
self.ok = True
self.socks = socks or []
if callback:
@ -234,9 +260,9 @@ class Handler:
for i in self.socks:
_add(r, i)
def callback(self):
def callback(self, sock):
log('--no callback defined-- %r\n' % self)
(r,w,x) = select.select(self.socks, [], [], 0)
(r, w, x) = select.select(self.socks, [], [], 0)
for s in r:
v = s.recv(4096)
if not v:
@ -246,6 +272,7 @@ class Handler:
class Proxy(Handler):
def __init__(self, wrap1, wrap2):
Handler.__init__(self, [wrap1.rsock, wrap1.wsock,
wrap2.rsock, wrap2.wsock])
@ -253,9 +280,11 @@ class Proxy(Handler):
self.wrap2 = wrap2
def pre_select(self, r, w, x):
if self.wrap1.shut_write: self.wrap2.noread()
if self.wrap2.shut_write: self.wrap1.noread()
if self.wrap1.shut_write:
self.wrap2.noread()
if self.wrap2.shut_write:
self.wrap1.noread()
if self.wrap1.connect_to:
_add(w, self.wrap1.rsock)
elif self.wrap1.buf:
@ -272,7 +301,7 @@ class Proxy(Handler):
elif not self.wrap2.shut_read:
_add(r, self.wrap2.rsock)
def callback(self):
def callback(self, sock):
self.wrap1.try_connect()
self.wrap2.try_connect()
self.wrap1.fill()
@ -286,31 +315,33 @@ class Proxy(Handler):
self.wrap2.buf = []
self.wrap2.noread()
if (self.wrap1.shut_read and self.wrap2.shut_read and
not self.wrap1.buf and not self.wrap2.buf):
not self.wrap1.buf and not self.wrap2.buf):
self.ok = False
self.wrap1.nowrite()
self.wrap2.nowrite()
class Mux(Handler):
def __init__(self, rsock, wsock):
Handler.__init__(self, [rsock, wsock])
self.rsock = rsock
self.wsock = wsock
self.new_channel = self.got_dns_req = self.got_routes = None
self.got_udp_open = self.got_udp_data = self.got_udp_close = None
self.got_host_req = self.got_host_list = None
self.channels = {}
self.chani = 0
self.want = 0
self.inbuf = ''
self.inbuf = b''
self.outbuf = []
self.fullness = 0
self.too_full = False
self.send(0, CMD_PING, 'chicken')
self.send(0, CMD_PING, b'chicken')
def next_channel(self):
# channel 0 is special, so we never allocate it
for timeout in xrange(1024):
for timeout in range(1024):
self.chani += 1
if self.chani > MAX_CHANNEL:
self.chani = 1
@ -322,31 +353,31 @@ class Mux(Handler):
for b in self.outbuf:
total += len(b)
return total
def check_fullness(self):
if self.fullness > 32768:
if not self.too_full:
self.send(0, CMD_PING, 'rttest')
self.send(0, CMD_PING, b'rttest')
self.too_full = True
#ob = []
#for b in self.outbuf:
# ob = []
# for b in self.outbuf:
# (s1,s2,c) = struct.unpack('!ccH', b[:4])
# ob.append(c)
#log('outbuf: %d %r\n' % (self.amount_queued(), ob))
# log('outbuf: %d %r\n' % (self.amount_queued(), ob))
def send(self, channel, cmd, data):
data = str(data)
assert(len(data) <= 65535)
p = struct.pack('!ccHHH', 'S', 'S', channel, cmd, len(data)) + data
assert isinstance(data, bytes)
assert len(data) <= 65535
p = struct.pack('!ccHHH', b'S', b'S', channel, cmd, len(data)) + data
self.outbuf.append(p)
debug2(' > channel=%d cmd=%s len=%d (fullness=%d)\n'
% (channel, cmd_to_name.get(cmd,hex(cmd)),
% (channel, cmd_to_name.get(cmd, hex(cmd)),
len(data), self.fullness))
self.fullness += len(data)
def got_packet(self, channel, cmd, data):
debug2('< channel=%d cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd,hex(cmd)), len(data)))
debug2('< channel=%d cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
if cmd == CMD_PING:
self.send(0, CMD_PONG, data)
elif cmd == CMD_PONG:
@ -355,7 +386,7 @@ class Mux(Handler):
self.fullness = 0
elif cmd == CMD_EXIT:
self.ok = False
elif cmd == CMD_CONNECT:
elif cmd == CMD_TCP_CONNECT:
assert(not self.channels.get(channel))
if self.new_channel:
self.new_channel(channel, data)
@ -363,6 +394,10 @@ class Mux(Handler):
assert(not self.channels.get(channel))
if self.got_dns_req:
self.got_dns_req(channel, data)
elif cmd == CMD_UDP_OPEN:
assert(not self.channels.get(channel))
if self.got_udp_open:
self.got_udp_open(channel, data)
elif cmd == CMD_ROUTES:
if self.got_routes:
self.got_routes(data)
@ -381,8 +416,8 @@ class Mux(Handler):
else:
callback = self.channels.get(channel)
if not callback:
log('warning: closed channel %d got cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd,hex(cmd)), len(data)))
log('warning: closed channel %d got cmd=%s len=%d\n'
% (channel, cmd_to_name.get(cmd, hex(cmd)), len(data)))
else:
callback(cmd, data)
@ -400,24 +435,24 @@ class Mux(Handler):
self.rsock.setblocking(False)
try:
b = _nb_clean(os.read, self.rsock.fileno(), 32768)
except OSError, e:
except OSError as e:
raise Fatal('other end: %r' % e)
#log('<<< %r\n' % b)
if b == '': # EOF
# log('<<< %r\n' % b)
if b == b'': # EOF
self.ok = False
if b:
self.inbuf += b
def handle(self):
self.fill()
#log('inbuf is: (%d,%d) %r\n'
# log('inbuf is: (%d,%d) %r\n'
# % (self.want, len(self.inbuf), self.inbuf))
while 1:
if len(self.inbuf) >= (self.want or HDR_LEN):
(s1,s2,channel,cmd,datalen) = \
(s1, s2, channel, cmd, datalen) = \
struct.unpack('!ccHHH', self.inbuf[:HDR_LEN])
assert(s1 == 'S')
assert(s2 == 'S')
assert(s1 == b'S')
assert(s2 == b'S')
self.want = datalen + HDR_LEN
if self.want and len(self.inbuf) >= self.want:
data = self.inbuf[HDR_LEN:self.want]
@ -432,8 +467,8 @@ class Mux(Handler):
if self.outbuf:
_add(w, self.wsock)
def callback(self):
(r,w,x) = select.select([self.rsock], [self.wsock], [], 0)
def callback(self, sock):
(r, w, x) = select.select([self.rsock], [self.wsock], [], 0)
if self.rsock in r:
self.handle()
if self.outbuf and self.wsock in w:
@ -441,6 +476,7 @@ class Mux(Handler):
class MuxWrapper(SockWrapper):
def __init__(self, mux, channel):
SockWrapper.__init__(self, mux.rsock, mux.wsock)
self.mux = mux
@ -454,22 +490,25 @@ class MuxWrapper(SockWrapper):
SockWrapper.__del__(self)
def __repr__(self):
return 'SW%r:Mux#%d' % (self.peername,self.channel)
return 'SW%r:Mux#%d' % (self.peername, self.channel)
def noread(self):
if not self.shut_read:
debug2('%r: done reading\n' % self)
self.shut_read = True
self.mux.send(self.channel, CMD_STOP_SENDING, '')
self.mux.send(self.channel, CMD_TCP_STOP_SENDING, b'')
self.maybe_close()
def nowrite(self):
if not self.shut_write:
debug2('%r: done writing\n' % self)
self.shut_write = True
self.mux.send(self.channel, CMD_EOF, '')
self.mux.send(self.channel, CMD_TCP_EOF, b'')
self.maybe_close()
def maybe_close(self):
if self.shut_read and self.shut_write:
debug2('%r: closing connection\n' % self)
# remove the mux's reference to us. The python garbage collector
# will then be able to reap our object.
self.mux.channels[self.channel] = None
@ -482,59 +521,59 @@ class MuxWrapper(SockWrapper):
return 0 # too much already enqueued
if len(buf) > 2048:
buf = buf[:2048]
self.mux.send(self.channel, CMD_DATA, buf)
self.mux.send(self.channel, CMD_TCP_DATA, buf)
return len(buf)
def uread(self):
if self.shut_read:
return '' # EOF
return b'' # EOF
else:
return None # no data available right now
def got_packet(self, cmd, data):
if cmd == CMD_EOF:
if cmd == CMD_TCP_EOF:
self.noread()
elif cmd == CMD_STOP_SENDING:
elif cmd == CMD_TCP_STOP_SENDING:
self.nowrite()
elif cmd == CMD_DATA:
elif cmd == CMD_TCP_DATA:
self.buf.append(data)
else:
raise Exception('unknown command %d (%d bytes)'
raise Exception('unknown command %d (%d bytes)'
% (cmd, len(data)))
def connect_dst(ip, port):
def connect_dst(family, ip, port):
debug2('Connecting to %s:%d\n' % (ip, port))
outsock = socket.socket()
outsock = socket.socket(family)
outsock.setsockopt(socket.SOL_IP, socket.IP_TTL, 42)
return SockWrapper(outsock, outsock,
connect_to = (ip,port),
peername = '%s:%d' % (ip,port))
connect_to=(ip, port),
peername = '%s:%d' % (ip, port))
def runonce(handlers, mux):
r = []
w = []
x = []
to_remove = filter(lambda s: not s.ok, handlers)
to_remove = [s for s in handlers if not s.ok]
for h in to_remove:
handlers.remove(h)
for s in handlers:
s.pre_select(r,w,x)
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
% (len(handlers), _fds(r), _fds(w), _fds(x),
s.pre_select(r, w, x)
debug2('Waiting: %d r=%r w=%r x=%r (fullness=%d/%d)\n'
% (len(handlers), _fds(r), _fds(w), _fds(x),
mux.fullness, mux.too_full))
(r,w,x) = select.select(r,w,x)
debug2(' Ready: %d r=%r w=%r x=%r\n'
% (len(handlers), _fds(r), _fds(w), _fds(x)))
ready = r+w+x
(r, w, x) = select.select(r, w, x)
debug2(' Ready: %d r=%r w=%r x=%r\n'
% (len(handlers), _fds(r), _fds(w), _fds(x)))
ready = r + w + x
did = {}
for h in handlers:
for s in h.socks:
if s in ready:
h.callback()
h.callback(s)
did[s] = 1
for s in ready:
if not s in did:
if s not in did:
raise Fatal('socket %r was not used by any handler' % s)

View File

@ -1,8 +1,11 @@
import sys, os
from compat import ssubprocess
import sys
import os
import subprocess as ssubprocess
_p = None
def start_syslog():
global _p
_p = ssubprocess.Popen(['logger',

View File

@ -1,5 +1,8 @@
#!/usr/bin/env python
import sys, os, socket, select, struct, time
import socket
import select
import struct
import time
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
@ -22,63 +25,63 @@ while 1:
count += 1
if count >= 16384:
count = 1
print 'cli CREATING %d' % count
b = struct.pack('I', count) + 'x'*count
print('cli CREATING %d' % count)
b = struct.pack('I', count) + 'x' * count
remain[c] = count
print 'cli >> %r' % len(b)
print('cli >> %r' % len(b))
c.send(b)
c.shutdown(socket.SHUT_WR)
clients.append(c)
r = [listener]
time.sleep(0.1)
else:
r = [listener]+servers+clients
print 'select(%d)' % len(r)
r,w,x = select.select(r, [], [], 5)
r = [listener] + servers + clients
print('select(%d)' % len(r))
r, w, x = select.select(r, [], [], 5)
assert(r)
for i in r:
if i == listener:
s,addr = listener.accept()
s, addr = listener.accept()
servers.append(s)
elif i in servers:
b = i.recv(4096)
print 'srv << %r' % len(b)
if not i in remain:
print('srv << %r' % len(b))
if i not in remain:
assert(len(b) >= 4)
want = struct.unpack('I', b[:4])[0]
b = b[4:]
#i.send('y'*want)
# i.send('y'*want)
else:
want = remain[i]
if want < len(b):
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
print('weird wanted %d bytes, got %d: %r' % (want, len(b), b))
assert(want >= len(b))
want -= len(b)
remain[i] = want
if not b: # EOF
if want:
print 'weird: eof but wanted %d more' % want
print('weird: eof but wanted %d more' % want)
assert(want == 0)
i.close()
servers.remove(i)
del remain[i]
else:
print 'srv >> %r' % len(b)
i.send('y'*len(b))
print('srv >> %r' % len(b))
i.send('y' * len(b))
if not want:
i.shutdown(socket.SHUT_WR)
elif i in clients:
b = i.recv(4096)
print 'cli << %r' % len(b)
print('cli << %r' % len(b))
want = remain[i]
if want < len(b):
print 'weird wanted %d bytes, got %d: %r' % (want, len(b), b)
print('weird wanted %d bytes, got %d: %r' % (want, len(b), b))
assert(want >= len(b))
want -= len(b)
remain[i] = want
if not b: # EOF
if want:
print 'weird: eof but wanted %d more' % want
print('weird: eof but wanted %d more' % want)
assert(want == 0)
i.close()
clients.remove(i)

View File

@ -0,0 +1,102 @@
from mock import Mock, patch, call
import io
import sshuttle.firewall
def setup_daemon():
stdin = io.StringIO(u"""ROUTES
2,24,0,1.2.3.0
2,32,1,1.2.3.66
10,64,0,2404:6800:4004:80c::
10,128,1,2404:6800:4004:80c::101f
NSLIST
2,1.2.3.33
10,2404:6800:4004:80c::33
PORTS 1024,1025,1026,1027
GO 1
HOST 1.2.3.3,existing
""")
stdout = Mock()
return stdin, stdout
def test_rewrite_etc_hosts(tmpdir):
orig_hosts = tmpdir.join("hosts.orig")
orig_hosts.write("1.2.3.3 existing\n")
new_hosts = tmpdir.join("hosts")
orig_hosts.copy(new_hosts)
hostmap = {
'myhost': '1.2.3.4',
'myotherhost': '1.2.3.5',
}
with patch('sshuttle.firewall.HOSTSFILE', new=str(new_hosts)):
sshuttle.firewall.rewrite_etc_hosts(hostmap, 10)
with new_hosts.open() as f:
line = f.readline()
s = line.split()
assert s == ['1.2.3.3', 'existing']
line = f.readline()
s = line.split()
assert s == ['1.2.3.4', 'myhost',
'#', 'sshuttle-firewall-10', 'AUTOCREATED']
line = f.readline()
s = line.split()
assert s == ['1.2.3.5', 'myotherhost',
'#', 'sshuttle-firewall-10', 'AUTOCREATED']
line = f.readline()
assert line == ""
with patch('sshuttle.firewall.HOSTSFILE', new=str(new_hosts)):
sshuttle.firewall.restore_etc_hosts(10)
assert orig_hosts.computehash() == new_hosts.computehash()
@patch('sshuttle.firewall.rewrite_etc_hosts')
@patch('sshuttle.firewall.setup_daemon')
@patch('sshuttle.firewall.get_method')
def test_main(mock_get_method, mock_setup_daemon, mock_rewrite_etc_hosts):
stdin, stdout = setup_daemon()
mock_setup_daemon.return_value = stdin, stdout
mock_get_method("not_auto").name = "test"
mock_get_method.reset_mock()
sshuttle.firewall.main("not_auto", False)
assert mock_rewrite_etc_hosts.mock_calls == [
call({'1.2.3.3': 'existing'}, 1024),
call({}, 1024),
]
assert stdout.mock_calls == [
call.write('READY test\n'),
call.flush(),
call.write('STARTED\n'),
call.flush()
]
assert mock_setup_daemon.mock_calls == [call()]
assert mock_get_method.mock_calls == [
call('not_auto'),
call().setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True),
call().setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True),
call().restore_firewall(1024, 10, True),
call().restore_firewall(1025, 2, True),
]

View File

@ -0,0 +1,191 @@
from mock import patch, call
import sys
import io
import socket
import sshuttle.helpers
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_log(mock_stderr, mock_stdout):
sshuttle.helpers.log("message")
sshuttle.helpers.log("abc")
sshuttle.helpers.log("message 1\n")
sshuttle.helpers.log("message 2\nline2\nline3\n")
sshuttle.helpers.log("message 3\nline2\nline3")
assert mock_stdout.mock_calls == [
call.flush(),
call.flush(),
call.flush(),
call.flush(),
call.flush(),
]
assert mock_stderr.mock_calls == [
call.write('prefix: message'),
call.flush(),
call.write('prefix: abc'),
call.flush(),
call.write('prefix: message 1\n'),
call.flush(),
call.write('prefix: message 2\n'),
call.write('---> line2\n'),
call.write('---> line3\n'),
call.flush(),
call.write('prefix: message 3\n'),
call.write('---> line2\n'),
call.write('---> line3\n'),
call.flush(),
]
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=1)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug1(mock_stderr, mock_stdout):
sshuttle.helpers.debug1("message")
assert mock_stdout.mock_calls == [
call.flush(),
]
assert mock_stderr.mock_calls == [
call.write('prefix: message'),
call.flush(),
]
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=0)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug1_nop(mock_stderr, mock_stdout):
sshuttle.helpers.debug1("message")
assert mock_stdout.mock_calls == []
assert mock_stderr.mock_calls == []
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=2)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug2(mock_stderr, mock_stdout):
sshuttle.helpers.debug2("message")
assert mock_stdout.mock_calls == [
call.flush(),
]
assert mock_stderr.mock_calls == [
call.write('prefix: message'),
call.flush(),
]
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=1)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug2_nop(mock_stderr, mock_stdout):
sshuttle.helpers.debug2("message")
assert mock_stdout.mock_calls == []
assert mock_stderr.mock_calls == []
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=3)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug3(mock_stderr, mock_stdout):
sshuttle.helpers.debug3("message")
assert mock_stdout.mock_calls == [
call.flush(),
]
assert mock_stderr.mock_calls == [
call.write('prefix: message'),
call.flush(),
]
@patch('sshuttle.helpers.logprefix', new='prefix: ')
@patch('sshuttle.helpers.verbose', new=2)
@patch('sshuttle.helpers.sys.stdout')
@patch('sshuttle.helpers.sys.stderr')
def test_debug3_nop(mock_stderr, mock_stdout):
sshuttle.helpers.debug3("message")
assert mock_stdout.mock_calls == []
assert mock_stderr.mock_calls == []
@patch('sshuttle.helpers.open', create=True)
def test_resolvconf_nameservers(mock_open):
mock_open.return_value = io.StringIO(u"""
# Generated by NetworkManager
search pri
nameserver 192.168.1.1
nameserver 192.168.2.1
nameserver 192.168.3.1
nameserver 192.168.4.1
nameserver 2404:6800:4004:80c::1
nameserver 2404:6800:4004:80c::2
nameserver 2404:6800:4004:80c::3
nameserver 2404:6800:4004:80c::4
""")
ns = sshuttle.helpers.resolvconf_nameservers()
assert ns == [
(2, u'192.168.1.1'), (2, u'192.168.2.1'),
(2, u'192.168.3.1'), (2, u'192.168.4.1'),
(10, u'2404:6800:4004:80c::1'), (10, u'2404:6800:4004:80c::2'),
(10, u'2404:6800:4004:80c::3'), (10, u'2404:6800:4004:80c::4')
]
@patch('sshuttle.helpers.open', create=True)
def test_resolvconf_random_nameserver(mock_open):
mock_open.return_value = io.StringIO(u"""
# Generated by NetworkManager
search pri
nameserver 192.168.1.1
nameserver 192.168.2.1
nameserver 192.168.3.1
nameserver 192.168.4.1
nameserver 2404:6800:4004:80c::1
nameserver 2404:6800:4004:80c::2
nameserver 2404:6800:4004:80c::3
nameserver 2404:6800:4004:80c::4
""")
ns = sshuttle.helpers.resolvconf_random_nameserver()
assert ns in [
(2, u'192.168.1.1'), (2, u'192.168.2.1'),
(2, u'192.168.3.1'), (2, u'192.168.4.1'),
(10, u'2404:6800:4004:80c::1'), (10, u'2404:6800:4004:80c::2'),
(10, u'2404:6800:4004:80c::3'), (10, u'2404:6800:4004:80c::4')
]
def test_islocal():
assert sshuttle.helpers.islocal("127.0.0.1", socket.AF_INET)
assert not sshuttle.helpers.islocal("192.0.2.1", socket.AF_INET)
assert sshuttle.helpers.islocal("::1", socket.AF_INET6)
assert not sshuttle.helpers.islocal("2001:db8::1", socket.AF_INET6)
def test_family_ip_tuple():
assert sshuttle.helpers.family_ip_tuple("127.0.0.1") \
== (socket.AF_INET, "127.0.0.1")
assert sshuttle.helpers.family_ip_tuple("192.168.2.6") \
== (socket.AF_INET, "192.168.2.6")
assert sshuttle.helpers.family_ip_tuple("::1") \
== (socket.AF_INET6, "::1")
assert sshuttle.helpers.family_ip_tuple("2404:6800:4004:80c::1") \
== (socket.AF_INET6, "2404:6800:4004:80c::1")
def test_family_to_string():
assert sshuttle.helpers.family_to_string(socket.AF_INET) == "AF_INET"
assert sshuttle.helpers.family_to_string(socket.AF_INET6) == "AF_INET6"
if sys.version_info < (3, 0):
expected = "1"
assert sshuttle.helpers.family_to_string(socket.AF_UNIX) == "1"
else:
expected = 'AddressFamily.AF_UNIX'
assert sshuttle.helpers.family_to_string(socket.AF_UNIX) == expected

View File

@ -0,0 +1,155 @@
import pytest
from mock import Mock, patch, call
import socket
import struct
from sshuttle.helpers import Fatal
from sshuttle.methods import get_method
def test_get_supported_features():
method = get_method('nat')
features = method.get_supported_features()
assert not features.ipv6
assert not features.udp
assert features.dns
def test_get_tcp_dstip():
sock = Mock()
sock.getsockopt.return_value = struct.pack(
'!HHBBBB', socket.ntohs(socket.AF_INET), 1024, 127, 0, 0, 1)
method = get_method('nat')
assert method.get_tcp_dstip(sock) == ('127.0.0.1', 1024)
assert sock.mock_calls == [call.getsockopt(0, 80, 16)]
def test_recv_udp():
sock = Mock()
sock.recvfrom.return_value = "11111", "127.0.0.1"
method = get_method('nat')
result = method.recv_udp(sock, 1024)
assert sock.mock_calls == [call.recvfrom(1024)]
assert result == ("127.0.0.1", None, "11111")
def test_send_udp():
sock = Mock()
method = get_method('nat')
method.send_udp(sock, None, "127.0.0.1", "22222")
assert sock.mock_calls == [call.sendto("22222", "127.0.0.1")]
def test_setup_tcp_listener():
listener = Mock()
method = get_method('nat')
method.setup_tcp_listener(listener)
assert listener.mock_calls == []
def test_setup_udp_listener():
listener = Mock()
method = get_method('nat')
method.setup_udp_listener(listener)
assert listener.mock_calls == []
def test_assert_features():
method = get_method('nat')
features = method.get_supported_features()
method.assert_features(features)
features.udp = True
with pytest.raises(Fatal):
method.assert_features(features)
features.ipv6 = True
with pytest.raises(Fatal):
method.assert_features(features)
def test_firewall_command():
method = get_method('nat')
assert not method.firewall_command("somthing")
@patch('sshuttle.methods.nat.ipt')
@patch('sshuttle.methods.nat.ipt_ttl')
@patch('sshuttle.methods.nat.ipt_chain_exists')
def test_setup_firewall(mock_ipt_chain_exists, mock_ipt_ttl, mock_ipt):
mock_ipt_chain_exists.return_value = True
method = get_method('nat')
assert method.name == 'nat'
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True)
assert str(excinfo.value) \
== 'Address family "AF_INET6" unsupported by nat method_name'
assert mock_ipt_chain_exists.mock_calls == []
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == []
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True)
assert str(excinfo.value) == 'UDP not supported by nat method_name'
assert mock_ipt_chain_exists.mock_calls == []
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == []
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
False)
assert mock_ipt_chain_exists.mock_calls == [
call(2, 'nat', 'sshuttle-1025')
]
assert mock_ipt_ttl.mock_calls == [
call(2, 'nat', '-A', 'sshuttle-1025', '-j', 'REDIRECT',
'--dest', u'1.2.3.0/24', '-p', 'tcp', '--to-ports', '1025'),
call(2, 'nat', '-A', 'sshuttle-1025', '-j', 'REDIRECT',
'--dest', u'1.2.3.33/32', '-p', 'udp',
'--dport', '53', '--to-ports', '1027')
]
assert mock_ipt.mock_calls == [
call(2, 'nat', '-D', 'OUTPUT', '-j', 'sshuttle-1025'),
call(2, 'nat', '-D', 'PREROUTING', '-j', 'sshuttle-1025'),
call(2, 'nat', '-F', 'sshuttle-1025'),
call(2, 'nat', '-X', 'sshuttle-1025'),
call(2, 'nat', '-N', 'sshuttle-1025'),
call(2, 'nat', '-F', 'sshuttle-1025'),
call(2, 'nat', '-I', 'OUTPUT', '1', '-j', 'sshuttle-1025'),
call(2, 'nat', '-I', 'PREROUTING', '1', '-j', 'sshuttle-1025'),
call(2, 'nat', '-A', 'sshuttle-1025', '-j', 'RETURN',
'--dest', u'1.2.3.66/32', '-p', 'tcp')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()
method.restore_firewall(1025, 2, False)
assert mock_ipt_chain_exists.mock_calls == [
call(2, 'nat', 'sshuttle-1025')
]
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == [
call(2, 'nat', '-D', 'OUTPUT', '-j', 'sshuttle-1025'),
call(2, 'nat', '-D', 'PREROUTING', '-j', 'sshuttle-1025'),
call(2, 'nat', '-F', 'sshuttle-1025'),
call(2, 'nat', '-X', 'sshuttle-1025')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()

View File

@ -0,0 +1,403 @@
import pytest
from mock import Mock, patch, call, ANY
import socket
from sshuttle.methods import get_method
from sshuttle.helpers import Fatal
from sshuttle.methods.pf import FreeBsd, Darwin, OpenBsd
def test_get_supported_features():
method = get_method('pf')
features = method.get_supported_features()
assert not features.ipv6
assert not features.udp
assert features.dns
@patch('sshuttle.helpers.verbose', new=3)
def test_get_tcp_dstip():
sock = Mock()
sock.getpeername.return_value = ("127.0.0.1", 1024)
sock.getsockname.return_value = ("127.0.0.2", 1025)
sock.family = socket.AF_INET
firewall = Mock()
firewall.pfile.readline.return_value = \
b"QUERY_PF_NAT_SUCCESS 127.0.0.3,1026\n"
method = get_method('pf')
method.set_firewall(firewall)
assert method.get_tcp_dstip(sock) == ('127.0.0.3', 1026)
assert sock.mock_calls == [
call.getpeername(),
call.getsockname(),
]
assert firewall.mock_calls == [
call.pfile.write(b'QUERY_PF_NAT 2,6,127.0.0.1,1024,127.0.0.2,1025\n'),
call.pfile.flush(),
call.pfile.readline()
]
def test_recv_udp():
sock = Mock()
sock.recvfrom.return_value = "11111", "127.0.0.1"
method = get_method('pf')
result = method.recv_udp(sock, 1024)
assert sock.mock_calls == [call.recvfrom(1024)]
assert result == ("127.0.0.1", None, "11111")
def test_send_udp():
sock = Mock()
method = get_method('pf')
method.send_udp(sock, None, "127.0.0.1", "22222")
assert sock.mock_calls == [call.sendto("22222", "127.0.0.1")]
def test_setup_tcp_listener():
listener = Mock()
method = get_method('pf')
method.setup_tcp_listener(listener)
assert listener.mock_calls == []
def test_setup_udp_listener():
listener = Mock()
method = get_method('pf')
method.setup_udp_listener(listener)
assert listener.mock_calls == []
def test_assert_features():
method = get_method('pf')
features = method.get_supported_features()
method.assert_features(features)
features.udp = True
with pytest.raises(Fatal):
method.assert_features(features)
features.ipv6 = True
with pytest.raises(Fatal):
method.assert_features(features)
@patch('sshuttle.methods.pf.pf', Darwin())
@patch('sshuttle.methods.pf.sys.stdout')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_firewall_command_darwin(mock_pf_get_dev, mock_ioctl, mock_stdout):
method = get_method('pf')
assert not method.firewall_command("somthing")
command = "QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % (
socket.AF_INET, socket.IPPROTO_TCP,
"127.0.0.1", 1025, "127.0.0.2", 1024)
assert method.firewall_command(command)
assert mock_pf_get_dev.mock_calls == [call()]
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xc0544417, ANY),
]
assert mock_stdout.mock_calls == [
call.write('QUERY_PF_NAT_SUCCESS 0.0.0.0,0\n'),
call.flush(),
]
@patch('sshuttle.methods.pf.pf', FreeBsd())
@patch('sshuttle.methods.pf.sys.stdout')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_firewall_command_freebsd(mock_pf_get_dev, mock_ioctl, mock_stdout):
method = get_method('pf')
assert not method.firewall_command("somthing")
command = "QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % (
socket.AF_INET, socket.IPPROTO_TCP,
"127.0.0.1", 1025, "127.0.0.2", 1024)
assert method.firewall_command(command)
assert mock_pf_get_dev.mock_calls == [call()]
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xc04c4417, ANY),
]
assert mock_stdout.mock_calls == [
call.write('QUERY_PF_NAT_SUCCESS 0.0.0.0,0\n'),
call.flush(),
]
@patch('sshuttle.methods.pf.pf', OpenBsd())
@patch('sshuttle.methods.pf.sys.stdout')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_firewall_command_openbsd(mock_pf_get_dev, mock_ioctl, mock_stdout):
method = get_method('pf')
assert not method.firewall_command("somthing")
command = "QUERY_PF_NAT %d,%d,%s,%d,%s,%d\n" % (
socket.AF_INET, socket.IPPROTO_TCP,
"127.0.0.1", 1025, "127.0.0.2", 1024)
assert method.firewall_command(command)
assert mock_pf_get_dev.mock_calls == [call()]
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xc0504417, ANY),
]
assert mock_stdout.mock_calls == [
call.write('QUERY_PF_NAT_SUCCESS 0.0.0.0,0\n'),
call.flush(),
]
def pfctl(args, stdin=None):
if args == '-s all':
return (b'INFO:\nStatus: Disabled\nanother mary had a little lamb\n',
b'little lamb\n')
if args == '-E':
return (b'\n', b'Token : abcdefg\n')
return None
@patch('sshuttle.helpers.verbose', new=3)
@patch('sshuttle.methods.pf.pf', Darwin())
@patch('sshuttle.methods.pf.pfctl')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_setup_firewall_darwin(mock_pf_get_dev, mock_ioctl, mock_pfctl):
mock_pfctl.side_effect = pfctl
method = get_method('pf')
assert method.name == 'pf'
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True)
assert str(excinfo.value) \
== 'Address family "AF_INET6" unsupported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True)
assert str(excinfo.value) == 'UDP not supported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
False)
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xC4704433, ANY),
call(mock_pf_get_dev(), 0xCC20441A, ANY),
call(mock_pf_get_dev(), 0xCC20441A, ANY),
call(mock_pf_get_dev(), 0xC4704433, ANY),
call(mock_pf_get_dev(), 0xCC20441A, ANY),
call(mock_pf_get_dev(), 0xCC20441A, ANY),
]
assert mock_pfctl.mock_calls == [
call('-f /dev/stdin', b'pass on lo\n'),
call('-s all'),
call('-a sshuttle -f /dev/stdin',
b'table <forward_subnets> {!1.2.3.66/32,1.2.3.0/24}\n'
b'table <dns_servers> {1.2.3.33}\n'
b'rdr pass on lo0 proto tcp '
b'to <forward_subnets> -> 127.0.0.1 port 1025\n'
b'rdr pass on lo0 proto udp '
b'to <dns_servers> port 53 -> 127.0.0.1 port 1027\n'
b'pass out route-to lo0 inet proto tcp '
b'to <forward_subnets> keep state\n'
b'pass out route-to lo0 inet proto udp '
b'to <dns_servers> port 53 keep state\n'),
call('-E'),
]
mock_pf_get_dev.reset_mock()
mock_ioctl.reset_mock()
mock_pfctl.reset_mock()
method.restore_firewall(1025, 2, False)
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == [
call('-a sshuttle -F all'),
call("-X abcdefg"),
]
mock_pf_get_dev.reset_mock()
mock_pfctl.reset_mock()
mock_ioctl.reset_mock()
@patch('sshuttle.helpers.verbose', new=3)
@patch('sshuttle.methods.pf.pf', FreeBsd())
@patch('sshuttle.methods.pf.pfctl')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_setup_firewall_freebsd(mock_pf_get_dev, mock_ioctl, mock_pfctl):
mock_pfctl.side_effect = pfctl
method = get_method('pf')
assert method.name == 'pf'
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True)
assert str(excinfo.value) \
== 'Address family "AF_INET6" unsupported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True)
assert str(excinfo.value) == 'UDP not supported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
False)
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xC4704433, ANY),
call(mock_pf_get_dev(), 0xCBE0441A, ANY),
call(mock_pf_get_dev(), 0xCBE0441A, ANY),
call(mock_pf_get_dev(), 0xC4704433, ANY),
call(mock_pf_get_dev(), 0xCBE0441A, ANY),
call(mock_pf_get_dev(), 0xCBE0441A, ANY),
]
assert mock_pfctl.mock_calls == [
call('-s all'),
call('-a sshuttle -f /dev/stdin',
b'table <forward_subnets> {!1.2.3.66/32,1.2.3.0/24}\n'
b'table <dns_servers> {1.2.3.33}\n'
b'rdr pass on lo0 proto tcp '
b'to <forward_subnets> -> 127.0.0.1 port 1025\n'
b'rdr pass on lo0 proto udp '
b'to <dns_servers> port 53 -> 127.0.0.1 port 1027\n'
b'pass out route-to lo0 inet proto tcp '
b'to <forward_subnets> keep state\n'
b'pass out route-to lo0 inet proto udp '
b'to <dns_servers> port 53 keep state\n'),
call('-e'),
]
mock_pf_get_dev.reset_mock()
mock_ioctl.reset_mock()
mock_pfctl.reset_mock()
method.restore_firewall(1025, 2, False)
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == [
call('-a sshuttle -F all'),
call("-d"),
]
mock_pf_get_dev.reset_mock()
mock_pfctl.reset_mock()
mock_ioctl.reset_mock()
@patch('sshuttle.helpers.verbose', new=3)
@patch('sshuttle.methods.pf.pf', OpenBsd())
@patch('sshuttle.methods.pf.pfctl')
@patch('sshuttle.methods.pf.ioctl')
@patch('sshuttle.methods.pf.pf_get_dev')
def test_setup_firewall_openbsd(mock_pf_get_dev, mock_ioctl, mock_pfctl):
mock_pfctl.side_effect = pfctl
method = get_method('pf')
assert method.name == 'pf'
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True)
assert str(excinfo.value) \
== 'Address family "AF_INET6" unsupported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
with pytest.raises(Exception) as excinfo:
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True)
assert str(excinfo.value) == 'UDP not supported by pf method_name'
assert mock_pf_get_dev.mock_calls == []
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == []
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
False)
assert mock_ioctl.mock_calls == [
call(mock_pf_get_dev(), 0xcd48441a, ANY),
call(mock_pf_get_dev(), 0xcd48441a, ANY),
]
assert mock_pfctl.mock_calls == [
call('-f /dev/stdin', b'match on lo\n'),
call('-s all'),
call('-a sshuttle -f /dev/stdin',
b'table <forward_subnets> {!1.2.3.66/32,1.2.3.0/24}\n'
b'table <dns_servers> {1.2.3.33}\n'
b'pass in on lo0 inet proto tcp divert-to 127.0.0.1 port 1025\n'
b'pass in on lo0 inet proto udp to '
b'<dns_servers>port 53 rdr-to 127.0.0.1 port 1027\n'
b'pass out inet proto tcp to '
b'<forward_subnets> route-to lo0 keep state\n'
b'pass out inet proto udp to '
b'<dns_servers> port 53 route-to lo0 keep state\n'),
call('-e'),
]
mock_pf_get_dev.reset_mock()
mock_ioctl.reset_mock()
mock_pfctl.reset_mock()
method.restore_firewall(1025, 2, False)
assert mock_ioctl.mock_calls == []
assert mock_pfctl.mock_calls == [
call('-a sshuttle -F all'),
call("-d"),
]
mock_pf_get_dev.reset_mock()
mock_pfctl.reset_mock()
mock_ioctl.reset_mock()

View File

@ -0,0 +1,283 @@
from mock import Mock, patch, call
from sshuttle.methods import get_method
@patch("sshuttle.methods.tproxy.recvmsg")
def test_get_supported_features_recvmsg(mock_recvmsg):
method = get_method('tproxy')
features = method.get_supported_features()
assert features.ipv6
assert features.udp
assert features.dns
@patch("sshuttle.methods.tproxy.recvmsg", None)
def test_get_supported_features_norecvmsg():
method = get_method('tproxy')
features = method.get_supported_features()
assert features.ipv6
assert not features.udp
assert not features.dns
def test_get_tcp_dstip():
sock = Mock()
sock.getsockname.return_value = ('127.0.0.1', 1024)
method = get_method('tproxy')
assert method.get_tcp_dstip(sock) == ('127.0.0.1', 1024)
assert sock.mock_calls == [call.getsockname()]
@patch("sshuttle.methods.tproxy.recv_udp")
def test_recv_udp(mock_recv_udp):
mock_recv_udp.return_value = ("127.0.0.1", "127.0.0.2", "11111")
sock = Mock()
method = get_method('tproxy')
result = method.recv_udp(sock, 1024)
assert sock.mock_calls == []
assert mock_recv_udp.mock_calls == [call(sock, 1024)]
assert result == ("127.0.0.1", "127.0.0.2", "11111")
@patch("sshuttle.methods.socket.socket")
def test_send_udp(mock_socket):
sock = Mock()
method = get_method('tproxy')
method.send_udp(sock, "127.0.0.2", "127.0.0.1", "2222222")
assert sock.mock_calls == []
assert mock_socket.mock_calls == [
call(sock.family, 2),
call().setsockopt(1, 2, 1),
call().setsockopt(0, 19, 1),
call().bind('127.0.0.2'),
call().sendto("2222222", '127.0.0.1'),
call().close()
]
def test_setup_tcp_listener():
listener = Mock()
method = get_method('tproxy')
method.setup_tcp_listener(listener)
assert listener.mock_calls == [
call.setsockopt(0, 19, 1)
]
def test_setup_udp_listener():
listener = Mock()
method = get_method('tproxy')
method.setup_udp_listener(listener)
assert listener.mock_calls == [
call.setsockopt(0, 19, 1),
call.v4.setsockopt(0, 20, 1),
call.v6.setsockopt(41, 74, 1)
]
def test_assert_features():
method = get_method('tproxy')
features = method.get_supported_features()
method.assert_features(features)
def test_firewall_command():
method = get_method('tproxy')
assert not method.firewall_command("somthing")
@patch('sshuttle.methods.tproxy.ipt')
@patch('sshuttle.methods.tproxy.ipt_ttl')
@patch('sshuttle.methods.tproxy.ipt_chain_exists')
def test_setup_firewall(mock_ipt_chain_exists, mock_ipt_ttl, mock_ipt):
mock_ipt_chain_exists.return_value = True
method = get_method('tproxy')
assert method.name == 'tproxy'
# IPV6
method.setup_firewall(
1024, 1026,
[(10, u'2404:6800:4004:80c::33')],
10,
[(10, 64, False, u'2404:6800:4004:80c::'),
(10, 128, True, u'2404:6800:4004:80c::101f')],
True)
assert mock_ipt_chain_exists.mock_calls == [
call(10, 'mangle', 'sshuttle-m-1024'),
call(10, 'mangle', 'sshuttle-t-1024'),
call(10, 'mangle', 'sshuttle-d-1024')
]
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == [
call(10, 'mangle', '-D', 'OUTPUT', '-j', 'sshuttle-m-1024'),
call(10, 'mangle', '-F', 'sshuttle-m-1024'),
call(10, 'mangle', '-X', 'sshuttle-m-1024'),
call(10, 'mangle', '-D', 'PREROUTING', '-j', 'sshuttle-t-1024'),
call(10, 'mangle', '-F', 'sshuttle-t-1024'),
call(10, 'mangle', '-X', 'sshuttle-t-1024'),
call(10, 'mangle', '-F', 'sshuttle-d-1024'),
call(10, 'mangle', '-X', 'sshuttle-d-1024'),
call(10, 'mangle', '-N', 'sshuttle-m-1024'),
call(10, 'mangle', '-F', 'sshuttle-m-1024'),
call(10, 'mangle', '-N', 'sshuttle-d-1024'),
call(10, 'mangle', '-F', 'sshuttle-d-1024'),
call(10, 'mangle', '-N', 'sshuttle-t-1024'),
call(10, 'mangle', '-F', 'sshuttle-t-1024'),
call(10, 'mangle', '-I', 'OUTPUT', '1', '-j', 'sshuttle-m-1024'),
call(10, 'mangle', '-I', 'PREROUTING', '1', '-j', 'sshuttle-t-1024'),
call(10, 'mangle', '-A', 'sshuttle-d-1024', '-j', 'MARK',
'--set-mark', '1'),
call(10, 'mangle', '-A', 'sshuttle-d-1024', '-j', 'ACCEPT'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-m', 'socket',
'-j', 'sshuttle-d-1024', '-m', 'tcp', '-p', 'tcp'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-m', 'socket',
'-j', 'sshuttle-d-1024', '-m', 'udp', '-p', 'udp'),
call(10, 'mangle', '-A', 'sshuttle-m-1024', '-j', 'MARK',
'--set-mark', '1', '--dest', u'2404:6800:4004:80c::33/32',
'-m', 'udp', '-p', 'udp', '--dport', '53'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1',
'--dest', u'2404:6800:4004:80c::33/32',
'-m', 'udp', '-p', 'udp', '--dport', '53', '--on-port', '1026'),
call(10, 'mangle', '-A', 'sshuttle-m-1024', '-j', 'RETURN',
'--dest', u'2404:6800:4004:80c::101f/128',
'-m', 'tcp', '-p', 'tcp'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-j', 'RETURN',
'--dest', u'2404:6800:4004:80c::101f/128',
'-m', 'tcp', '-p', 'tcp'),
call(10, 'mangle', '-A', 'sshuttle-m-1024', '-j', 'RETURN',
'--dest', u'2404:6800:4004:80c::101f/128',
'-m', 'udp', '-p', 'udp'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-j', 'RETURN',
'--dest', u'2404:6800:4004:80c::101f/128',
'-m', 'udp', '-p', 'udp'),
call(10, 'mangle', '-A', 'sshuttle-m-1024', '-j', 'MARK',
'--set-mark', '1', '--dest', u'2404:6800:4004:80c::/64',
'-m', 'tcp', '-p', 'tcp'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1', '--dest', u'2404:6800:4004:80c::/64',
'-m', 'tcp', '-p', 'tcp', '--on-port', '1024'),
call(10, 'mangle', '-A', 'sshuttle-m-1024', '-j', 'MARK',
'--set-mark', '1', '--dest', u'2404:6800:4004:80c::/64',
'-m', 'udp', '-p', 'udp'),
call(10, 'mangle', '-A', 'sshuttle-t-1024', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1', '--dest', u'2404:6800:4004:80c::/64',
'-m', 'udp', '-p', 'udp', '--on-port', '1024')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()
method.restore_firewall(1025, 10, True)
assert mock_ipt_chain_exists.mock_calls == [
call(10, 'mangle', 'sshuttle-m-1025'),
call(10, 'mangle', 'sshuttle-t-1025'),
call(10, 'mangle', 'sshuttle-d-1025')
]
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == [
call(10, 'mangle', '-D', 'OUTPUT', '-j', 'sshuttle-m-1025'),
call(10, 'mangle', '-F', 'sshuttle-m-1025'),
call(10, 'mangle', '-X', 'sshuttle-m-1025'),
call(10, 'mangle', '-D', 'PREROUTING', '-j', 'sshuttle-t-1025'),
call(10, 'mangle', '-F', 'sshuttle-t-1025'),
call(10, 'mangle', '-X', 'sshuttle-t-1025'),
call(10, 'mangle', '-F', 'sshuttle-d-1025'),
call(10, 'mangle', '-X', 'sshuttle-d-1025')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()
# IPV4
method.setup_firewall(
1025, 1027,
[(2, u'1.2.3.33')],
2,
[(2, 24, False, u'1.2.3.0'), (2, 32, True, u'1.2.3.66')],
True)
assert mock_ipt_chain_exists.mock_calls == [
call(2, 'mangle', 'sshuttle-m-1025'),
call(2, 'mangle', 'sshuttle-t-1025'),
call(2, 'mangle', 'sshuttle-d-1025')
]
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == [
call(2, 'mangle', '-D', 'OUTPUT', '-j', 'sshuttle-m-1025'),
call(2, 'mangle', '-F', 'sshuttle-m-1025'),
call(2, 'mangle', '-X', 'sshuttle-m-1025'),
call(2, 'mangle', '-D', 'PREROUTING', '-j', 'sshuttle-t-1025'),
call(2, 'mangle', '-F', 'sshuttle-t-1025'),
call(2, 'mangle', '-X', 'sshuttle-t-1025'),
call(2, 'mangle', '-F', 'sshuttle-d-1025'),
call(2, 'mangle', '-X', 'sshuttle-d-1025'),
call(2, 'mangle', '-N', 'sshuttle-m-1025'),
call(2, 'mangle', '-F', 'sshuttle-m-1025'),
call(2, 'mangle', '-N', 'sshuttle-d-1025'),
call(2, 'mangle', '-F', 'sshuttle-d-1025'),
call(2, 'mangle', '-N', 'sshuttle-t-1025'),
call(2, 'mangle', '-F', 'sshuttle-t-1025'),
call(2, 'mangle', '-I', 'OUTPUT', '1', '-j', 'sshuttle-m-1025'),
call(2, 'mangle', '-I', 'PREROUTING', '1', '-j', 'sshuttle-t-1025'),
call(2, 'mangle', '-A', 'sshuttle-d-1025',
'-j', 'MARK', '--set-mark', '1'),
call(2, 'mangle', '-A', 'sshuttle-d-1025', '-j', 'ACCEPT'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-m', 'socket',
'-j', 'sshuttle-d-1025', '-m', 'tcp', '-p', 'tcp'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-m', 'socket',
'-j', 'sshuttle-d-1025', '-m', 'udp', '-p', 'udp'),
call(2, 'mangle', '-A', 'sshuttle-m-1025', '-j', 'MARK',
'--set-mark', '1', '--dest', u'1.2.3.33/32',
'-m', 'udp', '-p', 'udp', '--dport', '53'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1', '--dest', u'1.2.3.33/32',
'-m', 'udp', '-p', 'udp', '--dport', '53', '--on-port', '1027'),
call(2, 'mangle', '-A', 'sshuttle-m-1025', '-j', 'RETURN',
'--dest', u'1.2.3.66/32', '-m', 'tcp', '-p', 'tcp'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-j', 'RETURN',
'--dest', u'1.2.3.66/32', '-m', 'tcp', '-p', 'tcp'),
call(2, 'mangle', '-A', 'sshuttle-m-1025', '-j', 'RETURN',
'--dest', u'1.2.3.66/32', '-m', 'udp', '-p', 'udp'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-j', 'RETURN',
'--dest', u'1.2.3.66/32', '-m', 'udp', '-p', 'udp'),
call(2, 'mangle', '-A', 'sshuttle-m-1025', '-j', 'MARK',
'--set-mark', '1', '--dest', u'1.2.3.0/24',
'-m', 'tcp', '-p', 'tcp'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1', '--dest', u'1.2.3.0/24',
'-m', 'tcp', '-p', 'tcp', '--on-port', '1025'),
call(2, 'mangle', '-A', 'sshuttle-m-1025', '-j', 'MARK',
'--set-mark', '1', '--dest', u'1.2.3.0/24',
'-m', 'udp', '-p', 'udp'),
call(2, 'mangle', '-A', 'sshuttle-t-1025', '-j', 'TPROXY',
'--tproxy-mark', '0x1/0x1', '--dest', u'1.2.3.0/24',
'-m', 'udp', '-p', 'udp', '--on-port', '1025')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()
method.restore_firewall(1025, 2, True)
assert mock_ipt_chain_exists.mock_calls == [
call(2, 'mangle', 'sshuttle-m-1025'),
call(2, 'mangle', 'sshuttle-t-1025'),
call(2, 'mangle', 'sshuttle-d-1025')
]
assert mock_ipt_ttl.mock_calls == []
assert mock_ipt.mock_calls == [
call(2, 'mangle', '-D', 'OUTPUT', '-j', 'sshuttle-m-1025'),
call(2, 'mangle', '-F', 'sshuttle-m-1025'),
call(2, 'mangle', '-X', 'sshuttle-m-1025'),
call(2, 'mangle', '-D', 'PREROUTING', '-j', 'sshuttle-t-1025'),
call(2, 'mangle', '-F', 'sshuttle-t-1025'),
call(2, 'mangle', '-X', 'sshuttle-t-1025'),
call(2, 'mangle', '-F', 'sshuttle-d-1025'),
call(2, 'mangle', '-X', 'sshuttle-d-1025')
]
mock_ipt_chain_exists.reset_mock()
mock_ipt_ttl.reset_mock()
mock_ipt.reset_mock()

16
tox.ini Normal file
View File

@ -0,0 +1,16 @@
[tox]
downloadcache = {toxworkdir}/cache/
envlist =
py27,
py35,
[testenv]
basepython =
py27: python2.7
py35: python3.5
commands =
py.test
deps =
pytest
mock
setuptools>=17.1

8
ui-macos/.gitignore vendored
View File

@ -1,8 +0,0 @@
*.pyc
*~
/*.nib
/debug.app
/sources.list
/Sshuttle VPN.app
/*.tar.gz
/*.zip

View File

@ -1,40 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleDisplayName</key>
<string>Sshuttle VPN</string>
<key>CFBundleExecutable</key>
<string>Sshuttle</string>
<key>CFBundleIconFile</key>
<string>app.icns</string>
<key>CFBundleIdentifier</key>
<string>ca.apenwarr.Sshuttle</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>Sshuttle VPN</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>0.0.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>0.0.0</string>
<key>LSUIElement</key>
<string>1</string>
<key>LSHasLocalizedDisplayName</key>
<false/>
<key>NSAppleScriptEnabled</key>
<false/>
<key>NSHumanReadableCopyright</key>
<string>GNU LGPL Version 2</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>startAtLogin</key>
<false/>
<key>autoReconnect</key>
<true/>
</dict>
</plist>

View File

@ -1 +0,0 @@
redo-ifchange debug.app dist

Binary file not shown.

View File

@ -1,28 +0,0 @@
import sys, os, re, subprocess
def askpass(prompt):
prompt = prompt.replace('"', "'")
if 'yes/no' in prompt:
return "yes"
script="""
tell application "Finder"
activate
display dialog "%s" \
with title "Sshuttle SSH Connection" \
default answer "" \
with icon caution \
with hidden answer
end tell
""" % prompt
p = subprocess.Popen(['osascript', '-e', script], stdout=subprocess.PIPE)
out = p.stdout.read()
rv = p.wait()
if rv:
return None
g = re.match("text returned:(.*), button returned:.*", out)
if not g:
return None
return g.group(1)

View File

@ -1 +0,0 @@
/runpython

View File

@ -1 +0,0 @@
APPL????

View File

@ -1,23 +0,0 @@
/*
* This rather pointless program acts like the python interpreter, except
* it's intended to sit inside a MacOS .app package, so that its argv[0]
* will point inside the package.
*
* NSApplicationMain() looks for Info.plist using the path in argv[0], which
* goes wrong if your interpreter is /usr/bin/python.
*/
#include <Python.h>
#include <string.h>
#include <unistd.h>
int main(int argc, char **argv)
{
char *path = strdup(argv[0]), *cptr;
char *args[] = {argv[0], "../Resources/main.py", NULL};
cptr = strrchr(path, '/');
if (cptr)
*cptr = 0;
chdir(path);
free(path);
return Py_Main(2, args);
}

View File

@ -1,5 +0,0 @@
exec >&2
redo-ifchange runpython.c
gcc -Wall -o $3 runpython.c \
-I/usr/include/python2.5 \
-lpython2.5

Binary file not shown.

Before

Width:  |  Height:  |  Size: 821 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 789 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 810 B

View File

@ -1,4 +0,0 @@
exec >&2
find . -name '*~' | xargs rm -f
rm -rf *.app *.zip *.tar.gz
rm -f bits/runpython *.nib sources.list

View File

@ -1,15 +0,0 @@
redo-ifchange bits/runpython MainMenu.nib
rm -rf debug.app
mkdir debug.app debug.app/Contents
cd debug.app/Contents
ln -s ../.. Resources
ln -s ../.. English.lproj
ln -s ../../Info.plist .
ln -s ../../app.icns .
mkdir MacOS
cd MacOS
ln -s ../../../bits/runpython Sshuttle
cd ../../..
redo-ifchange $(find debug.app -type f)

View File

@ -1,28 +0,0 @@
TOP=$PWD
redo-ifchange sources.list
redo-ifchange Info.plist bits/runpython \
$(while read name newname; do echo "$name"; done <sources.list)
rm -rf "$1.app"
mkdir "$1.app" "$1.app/Contents"
cd "$1.app/Contents"
cp "$TOP/Info.plist" .
mkdir MacOS
cp "$TOP/bits/runpython" MacOS/Sshuttle
mkdir Resources
cd "$TOP"
while read name newname; do
[ -z "$name" ] && continue
: "${newname:=$name}"
outname=$1.app/Contents/Resources/$newname
outdir=$(dirname "$outname")
[ -d "$outdir" ] || mkdir "$outdir"
cp "${name-$newname}" "$outname"
done <sources.list
cd "$1.app"
redo-ifchange $(find . -type f)

View File

@ -1,5 +0,0 @@
exec >&2
IFS="
"
redo-ifchange $1.app
tar -czf $3 $1.app/

View File

@ -1,5 +0,0 @@
exec >&2
IFS="
"
redo-ifchange $1.app
zip -q -r $3 $1.app/

View File

@ -1,2 +0,0 @@
redo-ifchange $1.xib
ibtool --compile $3 $1.xib

View File

@ -1 +0,0 @@
redo-ifchange "Sshuttle VPN.app.zip" "Sshuttle VPN.app.tar.gz"

View File

@ -1,19 +0,0 @@
# update a local branch with pregenerated output files, so people can download
# the completed tarballs from github. Since we don't have any real binaries,
# our final distribution package contains mostly blobs from the source code,
# so this doesn't cost us much extra space in the repo.
BRANCH=dist/macos
redo-ifchange 'Sshuttle VPN.app'
git update-ref refs/heads/$BRANCH origin/$BRANCH '' 2>/dev/null || true
export GIT_INDEX_FILE=$PWD/gitindex.tmp
rm -f "$GIT_INDEX_FILE"
git add -f 'Sshuttle VPN.app'
MSG="MacOS precompiled app package for $(git describe)"
TREE=$(git write-tree --prefix=ui-macos)
git show-ref refs/heads/$BRANCH >/dev/null && PARENT="-p refs/heads/$BRANCH"
COMMITID=$(echo "$MSG" | git commit-tree $TREE $PARENT)
git update-ref refs/heads/$BRANCH $COMMITID
rm -f "$GIT_INDEX_FILE"

View File

@ -1,367 +0,0 @@
import sys, os, pty
from AppKit import *
import my, models, askpass
def sshuttle_args(host, auto_nets, auto_hosts, dns, nets, debug,
no_latency_control):
argv = [my.bundle_path('sshuttle/sshuttle', ''), '-r', host]
assert(argv[0])
if debug:
argv.append('-v')
if auto_nets:
argv.append('--auto-nets')
if auto_hosts:
argv.append('--auto-hosts')
if dns:
argv.append('--dns')
if no_latency_control:
argv.append('--no-latency-control')
argv += nets
return argv
class _Callback(NSObject):
def initWithFunc_(self, func):
self = super(_Callback, self).init()
self.func = func
return self
def func_(self, obj):
return self.func(obj)
class Callback:
def __init__(self, func):
self.obj = _Callback.alloc().initWithFunc_(func)
self.sel = self.obj.func_
class Runner:
def __init__(self, argv, logfunc, promptfunc, serverobj):
print 'in __init__'
self.id = argv
self.rv = None
self.pid = None
self.fd = None
self.logfunc = logfunc
self.promptfunc = promptfunc
self.serverobj = serverobj
self.buf = ''
self.logfunc('\nConnecting to %s.\n' % self.serverobj.host())
print 'will run: %r' % argv
self.serverobj.setConnected_(False)
pid,fd = pty.fork()
if pid == 0:
# child
try:
os.execvp(argv[0], argv)
except Exception, e:
sys.stderr.write('failed to start: %r\n' % e)
raise
finally:
os._exit(42)
# parent
self.pid = pid
self.file = NSFileHandle.alloc()\
.initWithFileDescriptor_closeOnDealloc_(fd, True)
self.cb = Callback(self.gotdata)
NSNotificationCenter.defaultCenter()\
.addObserver_selector_name_object_(self.cb.obj, self.cb.sel,
NSFileHandleDataAvailableNotification, self.file)
self.file.waitForDataInBackgroundAndNotify()
def __del__(self):
self.wait()
def _try_wait(self, options):
if self.rv == None and self.pid > 0:
pid,code = os.waitpid(self.pid, options)
if pid == self.pid:
if os.WIFEXITED(code):
self.rv = os.WEXITSTATUS(code)
else:
self.rv = -os.WSTOPSIG(code)
self.serverobj.setConnected_(False)
self.serverobj.setError_('VPN process died')
self.logfunc('Disconnected.\n')
print 'wait_result: %r' % self.rv
return self.rv
def wait(self):
return self._try_wait(0)
def poll(self):
return self._try_wait(os.WNOHANG)
def kill(self):
assert(self.pid > 0)
print 'killing: pid=%r rv=%r' % (self.pid, self.rv)
if self.rv == None:
self.logfunc('Disconnecting from %s.\n' % self.serverobj.host())
os.kill(self.pid, 15)
self.wait()
def gotdata(self, notification):
print 'gotdata!'
d = str(self.file.availableData())
if d:
self.logfunc(d)
self.buf = self.buf + d
if 'Connected.\r\n' in self.buf:
self.serverobj.setConnected_(True)
self.buf = self.buf[-4096:]
if self.buf.strip().endswith(':'):
lastline = self.buf.rstrip().split('\n')[-1]
resp = self.promptfunc(lastline)
add = ' (response)\n'
self.buf += add
self.logfunc(add)
self.file.writeData_(my.Data(resp + '\n'))
self.file.waitForDataInBackgroundAndNotify()
self.poll()
#print 'gotdata done!'
class SshuttleApp(NSObject):
def initialize(self):
d = my.PList('UserDefaults')
my.Defaults().registerDefaults_(d)
class SshuttleController(NSObject):
# Interface builder outlets
startAtLoginField = objc.IBOutlet()
autoReconnectField = objc.IBOutlet()
debugField = objc.IBOutlet()
routingField = objc.IBOutlet()
prefsWindow = objc.IBOutlet()
serversController = objc.IBOutlet()
logField = objc.IBOutlet()
latencyControlField = objc.IBOutlet()
servers = []
conns = {}
def _connect(self, server):
host = server.host()
print 'connecting %r' % host
self.fill_menu()
def logfunc(msg):
print 'log! (%d bytes)' % len(msg)
self.logField.textStorage()\
.appendAttributedString_(NSAttributedString.alloc()\
.initWithString_(msg))
self.logField.didChangeText()
def promptfunc(prompt):
print 'prompt! %r' % prompt
return askpass.askpass(prompt)
nets_mode = server.autoNets()
if nets_mode == models.NET_MANUAL:
manual_nets = ["%s/%d" % (i.subnet(), i.width())
for i in server.nets()]
elif nets_mode == models.NET_ALL:
manual_nets = ['0/0']
else:
manual_nets = []
noLatencyControl = (server.latencyControl() != models.LAT_INTERACTIVE)
conn = Runner(sshuttle_args(host,
auto_nets = nets_mode == models.NET_AUTO,
auto_hosts = server.autoHosts(),
dns = server.useDns(),
nets = manual_nets,
debug = self.debugField.state(),
no_latency_control = noLatencyControl),
logfunc=logfunc, promptfunc=promptfunc,
serverobj=server)
self.conns[host] = conn
def _disconnect(self, server):
host = server.host()
print 'disconnecting %r' % host
conn = self.conns.get(host)
if conn:
conn.kill()
self.fill_menu()
self.logField.textStorage().setAttributedString_(
NSAttributedString.alloc().initWithString_(''))
@objc.IBAction
def cmd_connect(self, sender):
server = sender.representedObject()
server.setWantConnect_(True)
@objc.IBAction
def cmd_disconnect(self, sender):
server = sender.representedObject()
server.setWantConnect_(False)
@objc.IBAction
def cmd_show(self, sender):
self.prefsWindow.makeKeyAndOrderFront_(self)
NSApp.activateIgnoringOtherApps_(True)
@objc.IBAction
def cmd_quit(self, sender):
NSApp.performSelector_withObject_afterDelay_(NSApp.terminate_,
None, 0.0)
def fill_menu(self):
menu = self.menu
menu.removeAllItems()
def additem(name, func, obj):
it = menu.addItemWithTitle_action_keyEquivalent_(name, None, "")
it.setRepresentedObject_(obj)
it.setTarget_(self)
it.setAction_(func)
def addnote(name):
additem(name, None, None)
any_inprogress = None
any_conn = None
any_err = None
if len(self.servers):
for i in self.servers:
host = i.host()
title = i.title()
want = i.wantConnect()
connected = i.connected()
numnets = len(list(i.nets()))
if not host:
additem('Connect Untitled', None, i)
elif i.autoNets() == models.NET_MANUAL and not numnets:
additem('Connect %s (no routes)' % host, None, i)
elif want:
any_conn = i
additem('Disconnect %s' % title, self.cmd_disconnect, i)
else:
additem('Connect %s' % title, self.cmd_connect, i)
if not want:
msg = 'Off'
elif i.error():
msg = 'ERROR - try reconnecting'
any_err = i
elif connected:
msg = 'Connected'
else:
msg = 'Connecting...'
any_inprogress = i
addnote(' State: %s' % msg)
else:
addnote('No servers defined yet')
menu.addItem_(NSMenuItem.separatorItem())
additem('Preferences...', self.cmd_show, None)
additem('Quit Sshuttle VPN', self.cmd_quit, None)
if any_err:
self.statusitem.setImage_(self.img_err)
self.statusitem.setTitle_('Error!')
elif any_conn:
self.statusitem.setImage_(self.img_running)
if any_inprogress:
self.statusitem.setTitle_('Connecting...')
else:
self.statusitem.setTitle_('')
else:
self.statusitem.setImage_(self.img_idle)
self.statusitem.setTitle_('')
def load_servers(self):
l = my.Defaults().arrayForKey_('servers') or []
sl = []
for s in l:
host = s.get('host', None)
if not host: continue
nets = s.get('nets', [])
nl = []
for n in nets:
subnet = n[0]
width = n[1]
net = models.SshuttleNet.alloc().init()
net.setSubnet_(subnet)
net.setWidth_(width)
nl.append(net)
autoNets = s.get('autoNets', models.NET_AUTO)
autoHosts = s.get('autoHosts', True)
useDns = s.get('useDns', autoNets == models.NET_ALL)
latencyControl = s.get('latencyControl', models.LAT_INTERACTIVE)
srv = models.SshuttleServer.alloc().init()
srv.setHost_(host)
srv.setAutoNets_(autoNets)
srv.setAutoHosts_(autoHosts)
srv.setNets_(nl)
srv.setUseDns_(useDns)
srv.setLatencyControl_(latencyControl)
sl.append(srv)
self.serversController.addObjects_(sl)
self.serversController.setSelectionIndex_(0)
def save_servers(self):
l = []
for s in self.servers:
host = s.host()
if not host: continue
nets = []
for n in s.nets():
subnet = n.subnet()
if not subnet: continue
nets.append((subnet, n.width()))
d = dict(host=s.host(),
nets=nets,
autoNets=s.autoNets(),
autoHosts=s.autoHosts(),
useDns=s.useDns(),
latencyControl=s.latencyControl())
l.append(d)
my.Defaults().setObject_forKey_(l, 'servers')
self.fill_menu()
def awakeFromNib(self):
self.routingField.removeAllItems()
tf = self.routingField.addItemWithTitle_
tf('Send all traffic through this server')
tf('Determine automatically')
tf('Custom...')
self.latencyControlField.removeAllItems()
tf = self.latencyControlField.addItemWithTitle_
tf('Fast transfer')
tf('Low latency')
# Hmm, even when I mark this as !enabled in the .nib, it still comes
# through as enabled. So let's just disable it here (since we don't
# support this feature yet).
self.startAtLoginField.setEnabled_(False)
self.startAtLoginField.setState_(False)
self.autoReconnectField.setEnabled_(False)
self.autoReconnectField.setState_(False)
self.load_servers()
# Initialize our menu item
self.menu = NSMenu.alloc().initWithTitle_('Sshuttle')
bar = NSStatusBar.systemStatusBar()
statusitem = bar.statusItemWithLength_(NSVariableStatusItemLength)
self.statusitem = statusitem
self.img_idle = my.Image('chicken-tiny-bw', 'png')
self.img_running = my.Image('chicken-tiny', 'png')
self.img_err = my.Image('chicken-tiny-err', 'png')
statusitem.setImage_(self.img_idle)
statusitem.setHighlightMode_(True)
statusitem.setMenu_(self.menu)
self.fill_menu()
models.configchange_callback = my.DelayedCallback(self.save_servers)
def sc(server):
if server.wantConnect():
self._connect(server)
else:
self._disconnect(server)
models.setconnect_callback = sc
# Note: NSApplicationMain calls sys.exit(), so this never returns.
NSApplicationMain(sys.argv)

View File

@ -1,166 +0,0 @@
from AppKit import *
import my
configchange_callback = setconnect_callback = None
def config_changed():
if configchange_callback:
configchange_callback()
def _validate_ip(v):
parts = v.split('.')[:4]
if len(parts) < 4:
parts += ['0'] * (4 - len(parts))
for i in range(4):
n = my.atoi(parts[i])
if n < 0:
n = 0
elif n > 255:
n = 255
parts[i] = str(n)
return '.'.join(parts)
def _validate_width(v):
n = my.atoi(v)
if n < 0:
n = 0
elif n > 32:
n = 32
return n
class SshuttleNet(NSObject):
def subnet(self):
return getattr(self, '_k_subnet', None)
def setSubnet_(self, v):
self._k_subnet = v
config_changed()
@objc.accessor
def validateSubnet_error_(self, value, error):
#print 'validateSubnet!'
return True, _validate_ip(value), error
def width(self):
return getattr(self, '_k_width', 24)
def setWidth_(self, v):
self._k_width = v
config_changed()
@objc.accessor
def validateWidth_error_(self, value, error):
#print 'validateWidth!'
return True, _validate_width(value), error
NET_ALL = 0
NET_AUTO = 1
NET_MANUAL = 2
LAT_BANDWIDTH = 0
LAT_INTERACTIVE = 1
class SshuttleServer(NSObject):
def init(self):
self = super(SshuttleServer, self).init()
config_changed()
return self
def wantConnect(self):
return getattr(self, '_k_wantconnect', False)
def setWantConnect_(self, v):
self._k_wantconnect = v
self.setError_(None)
config_changed()
if setconnect_callback: setconnect_callback(self)
def connected(self):
return getattr(self, '_k_connected', False)
def setConnected_(self, v):
print 'setConnected of %r to %r' % (self, v)
self._k_connected = v
if v: self.setError_(None) # connected ok, so no error
config_changed()
def error(self):
return getattr(self, '_k_error', None)
def setError_(self, v):
self._k_error = v
config_changed()
def isValid(self):
if not self.host():
return False
if self.autoNets() == NET_MANUAL and not len(list(self.nets())):
return False
return True
def title(self):
host = self.host()
if not host:
return host
an = self.autoNets()
suffix = ""
if an == NET_ALL:
suffix = " (all traffic)"
elif an == NET_MANUAL:
n = self.nets()
suffix = ' (%d subnet%s)' % (len(n), len(n)!=1 and 's' or '')
return self.host() + suffix
def setTitle_(self, v):
# title is always auto-generated
config_changed()
def host(self):
return getattr(self, '_k_host', None)
def setHost_(self, v):
self._k_host = v
self.setTitle_(None)
config_changed()
@objc.accessor
def validateHost_error_(self, value, error):
#print 'validatehost! %r %r %r' % (self, value, error)
while value.startswith('-'):
value = value[1:]
return True, value, error
def nets(self):
return getattr(self, '_k_nets', [])
def setNets_(self, v):
self._k_nets = v
self.setTitle_(None)
config_changed()
def netsHidden(self):
#print 'checking netsHidden'
return self.autoNets() != NET_MANUAL
def setNetsHidden_(self, v):
config_changed()
#print 'setting netsHidden to %r' % v
def autoNets(self):
return getattr(self, '_k_autoNets', NET_AUTO)
def setAutoNets_(self, v):
self._k_autoNets = v
self.setNetsHidden_(-1)
self.setUseDns_(v == NET_ALL)
self.setTitle_(None)
config_changed()
def autoHosts(self):
return getattr(self, '_k_autoHosts', True)
def setAutoHosts_(self, v):
self._k_autoHosts = v
config_changed()
def useDns(self):
return getattr(self, '_k_useDns', False)
def setUseDns_(self, v):
self._k_useDns = v
config_changed()
def latencyControl(self):
return getattr(self, '_k_latencyControl', LAT_INTERACTIVE)
def setLatencyControl_(self, v):
self._k_latencyControl = v
config_changed()

View File

@ -1,62 +0,0 @@
import sys, os
from AppKit import *
import PyObjCTools.AppHelper
def bundle_path(name, typ):
if typ:
return NSBundle.mainBundle().pathForResource_ofType_(name, typ)
else:
return os.path.join(NSBundle.mainBundle().resourcePath(), name)
# Load an NSData using a python string
def Data(s):
return NSData.alloc().initWithBytes_length_(s, len(s))
# Load a property list from a file in the application bundle.
def PList(name):
path = bundle_path(name, 'plist')
return NSDictionary.dictionaryWithContentsOfFile_(path)
# Load an NSImage from a file in the application bundle.
def Image(name, ext):
bytes = open(bundle_path(name, ext)).read()
img = NSImage.alloc().initWithData_(Data(bytes))
return img
# Return the NSUserDefaults shared object.
def Defaults():
return NSUserDefaults.standardUserDefaults()
# Usage:
# f = DelayedCallback(func, args...)
# later:
# f()
#
# When you call f(), it will schedule a call to func() next time the
# ObjC event loop iterates. Multiple calls to f() in a single iteration
# will only result in one call to func().
#
def DelayedCallback(func, *args, **kwargs):
flag = [0]
def _go():
if flag[0]:
print 'running %r (flag=%r)' % (func, flag)
flag[0] = 0
func(*args, **kwargs)
def call():
flag[0] += 1
PyObjCTools.AppHelper.callAfter(_go)
return call
def atoi(s):
try:
return int(s)
except ValueError:
return 0

View File

@ -1,4 +0,0 @@
redo-ifchange debug.app
exec >&2
./debug.app/Contents/MacOS/Sshuttle

View File

@ -1,14 +0,0 @@
redo-always
exec >$3
cat <<-EOF
app.icns
MainMenu.nib English.lproj/MainMenu.nib
UserDefaults.plist
chicken-tiny.png
chicken-tiny-bw.png
chicken-tiny-err.png
EOF
for d in *.py sshuttle/*.py sshuttle/sshuttle sshuttle/compat/*.py; do
echo $d
done
redo-stamp <$3

View File

@ -1 +0,0 @@
..