Compare commits

..

190 Commits

Author SHA1 Message Date
Wade Simmons
a800a48857 v1.6.1 (#752)
Update CHANGELOG for Nebula v1.6.1
2022-09-26 13:38:18 -04:00
Nate Brown
4c0ae3df5e Refuse to process double encrypted packets (#741) 2022-09-19 12:47:48 -05:00
Nate Brown
feb3e1317f Add a simple benchmark to e2e tests (#739) 2022-09-01 09:44:58 -05:00
Jon Rafkind
c2259f14a7 explicitly reload config from ssh command (#725) 2022-08-08 12:44:09 -05:00
Nate Brown
b1eeb5f3b8 Support unsafe_routes on mobile again (#729) 2022-08-05 09:58:10 -05:00
Nate Brown
2adf0ca1d1 Use issue templates to improve bug reports (#726) 2022-07-29 12:57:05 -05:00
Nate Brown
92dfccf01a v1.6.0 (#701)
Update CHANGELOG for Nebula v1.6.0

Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
Co-authored-by: brad-defined <77982333+brad-defined@users.noreply.github.com>
2022-06-30 16:15:18 -04:00
brad-defined
38e495e0d2 Remove EXPERIMENTAL text from routines example config. (#702) 2022-06-30 11:20:41 -04:00
brad-defined
78a0255c91 typeos (#700) 2022-06-29 11:19:20 -04:00
brad-defined
169cdbbd35 Immediately forward packets received on the nebula TUN device from self to self (#501)
* Immediately forward packets received on the nebula TUN device with a destination of our Nebula VPN IP right back out that same TUN device on MacOS.
2022-06-27 14:36:10 -04:00
Nate Brown
0d1ee4214a Add relay e2e tests and output some mermaid sequence diagrams (#691) 2022-06-27 12:33:29 -05:00
Wade Simmons
7b9287709c add listen.send_recv_error config option (#670)
By default, Nebula replies to packets it has no tunnel for with a `recv_error` packet. This packet helps speed up re-connection
in the case that Nebula on either side did not shut down cleanly. This response can be abused as a way to discover if Nebula is running
on a host though. This option lets you configure if you want to send `recv_error` packets always, never, or only to private network remotes.
valid values: always, never, private

This setting is reloadable with SIGHUP.
2022-06-27 12:37:54 -04:00
Wade Simmons
85ec807b7e reserve NebulaHandshakeDetails fields for multiport (#674)
We are currently testing changes for multiport (related to #497) that
use fields 6 and 7 in the protobuf. Reserved these fields so that when
we eventually open the PR we are backwards compatible with any future
changes.
2022-06-27 12:07:05 -04:00
John Maguire
a0b280621d Remove firewall.conntrack.max_connections from examples (#684) 2022-06-23 10:29:54 -05:00
Caleb Jasik
527f953c2c Remove x509 config loading code (#685) 2022-06-23 10:27:34 -05:00
brad-defined
1a7c575011 Relay (#678)
Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2022-06-21 13:35:23 -05:00
Don Stephan
332fa2b825 fix panic in handleInvalidCertificate (#675)
* fix panic in handleInvalidCertificate

when HandleMonitorTick fires, the hostmap can be nil which causes a panic to occur when trying to clean up the hostmap in handleInvalidCertificate. This fix just stops the invalidation from continuing if the hostmap doesn't exist.

* removed conditional for disconnectInvalid in HandleDeletionTick
2022-05-16 13:29:57 -04:00
Wade Simmons
45d1d2b6c6 Update dependencies - 2022-04 (#664)
Updated  github.com/kardianos/service         https://github.com/kardianos/service/compare/v1.2.0...v1.2.1
    Updated  github.com/miekg/dns                 https://github.com/miekg/dns/compare/v1.1.43...v1.1.48
    Updated  github.com/prometheus/client_golang  https://github.com/prometheus/client_golang/compare/v1.11.0...v1.12.1
    Updated  github.com/prometheus/common         https://github.com/prometheus/common/compare/v0.32.1...v0.33.0
    Updated  github.com/stretchr/testify          https://github.com/stretchr/testify/compare/v1.7.0...v1.7.1
    Updated  golang.org/x/crypto                  5770296d90...ae2d96664a
    Updated  golang.org/x/net                     69e39bad7d...749bd193bc
    Updated  golang.org/x/sys                     7861aae155...289d7a0edf
    Updated  golang.zx2c4.com/wireguard/windows   v0.5.1...v0.5.3
    Updated  google.golang.org/protobuf           v1.27.1...v1.28.0
2022-04-18 12:12:25 -04:00
Wade Simmons
3913062c43 build and test with go1.18 (#656)
- https://go.dev/doc/go1.18
2022-04-05 17:08:00 -04:00
Wade Simmons
b38bd36766 fix connection manager check when disconnect_invalid set (#658)
This restores the hostMap.QueryVpnIP block to how it looked before #370
was merged. I'm not sure why the patch from #370 wanted to continue on
if there was no match found in the hostmap, since there isn't anything
to do at that point (the tunnel has already been closed).

This was causing a crash because the handleInvalidCertificate check
expects the hostinfo to be passed in (but it is nil since there was no
hostinfo in the hostmap).

Fixes: #657
2022-04-04 13:38:36 -04:00
Nate Brown
d85e24f49f Allow for self reported ips to the lighthouse (#650) 2022-04-04 12:35:23 -05:00
bitshop
7672c7087a Add to build all windows-arm64 / bin-windows-arm64 build option (#638)
* Add to build all windows-arm64 / bin-winarm64 builds

* update release to build for windows-arm64

* cleanup

Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2022-03-18 13:23:10 -04:00
Caleb Jasik
730a5c4a23 Update link to nebula docs (#655) 2022-03-18 11:15:16 -04:00
brad-defined
03498a0cb2 Make nebula advertise its dynamic port to lighthouses (#653) 2022-03-15 18:03:56 -05:00
Nate Brown
312a01dc09 Lighthouse reload support (#649)
Co-authored-by: John Maguire <contact@johnmaguire.me>
2022-03-14 12:35:13 -05:00
Nate Brown
bbe0a032bb Fix windows unsafe_routes regression (#648) 2022-03-09 13:23:29 -06:00
Wade Simmons
b5b9d33ee7 v1.5.2 (#612)
Update CHANGELOG for Nebula v1.5.2
2021-12-14 16:48:56 -05:00
Wade Simmons
e434ba6523 fix unsafe routes darwin (#610)
With Nebula 1.4.0, if you create an unsafe_route that has a collision with an existing route on the system, the unsafe_route will be silently dropped (and the existing system route remains).

With Nebula 1.5.0, this same situation will cause Nebula to fail to start with an error (EEXIST).

This change restores the Nebula 1.4.0 behavior (but with a WARN log as well).
2021-12-14 11:52:49 -05:00
Wade Simmons
068a93d1f4 fix makeRouteTree allowMTU (#611)
With the previous implementation, we check if route.MTU is greater than zero,
but it will always be because we set it to the default MTU in
parseUnsafeRoutes. This change leaves it as zero in parseUnsafeRoutes so
it can be examined later.
2021-12-14 11:52:28 -05:00
Nate Brown
15fdabc3ab v1.5.1 (#606)
Update CHANGELOG for Nebula v1.5.1
2021-12-13 20:43:25 -05:00
forfuncsake
1110756f0f Allow setup of a CA pool from bytes that contain expired certs (#599)
Co-authored-by: Nate Brown <nbrown.us@gmail.com>
2021-12-09 21:24:56 -06:00
Nate Brown
e31006d546 Be more clear about ipv4 in nebula-cert (#604) 2021-12-07 21:40:30 -06:00
Wade Simmons
949ec78653 don't set ConnectionState to nil (#590)
* don't set ConnectionState to nil

We might have packets processing in another thread, so we can't safely
just set this to nil. Since we removed it from the hostmaps, the next
packets to process should start the handshake over again.

I believe this comment is outdated or incorrect, since the next
handshake will start over with a new HostInfo, I don't think there is
any way a counter reuse could happen:

> We must null the connectionstate or a counter reuse may happen

Here is a panic we saw that I think is related:

    panic: runtime error: invalid memory address or nil pointer dereference
    [signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0x93a037]
    goroutine 59 [running, locked to thread]:
    github.com/slackhq/nebula.(*Firewall).Drop(...)
            github.com/slackhq/nebula/firewall.go:380
    github.com/slackhq/nebula.(*Interface).consumeInsidePacket(...)
            github.com/slackhq/nebula/inside.go:59
    github.com/slackhq/nebula.(*Interface).listenIn(...)
            github.com/slackhq/nebula/interface.go:233
    created by github.com/slackhq/nebula.(*Interface).run
            github.com/slackhq/nebula/interface.go:191

* use closeTunnel
2021-12-06 14:09:05 -05:00
Wade Simmons
127a116bfd update golang.org/x/crypto (#603)
> Version v0.0.0-20211202192323-5770296d904e of golang.org/x/crypto fixes a vulnerability in the golang.org/x/crypto/ssh package which allowed unauthenticated clients to cause a panic in SSH servers.
>
> This issue was discovered and reported by Rod Hynes, Psiphon Inc., and is tracked as CVE-2021-43565 and Issue golang/go#49932.

    Updated  golang.org/x/crypto  089bfa5675...5770296d90
    Updated  golang.org/x/net     4a448f8816...69e39bad7d
2021-12-06 14:07:05 -05:00
Wade Simmons
befce3f990 fix crash with -test (#602)
When running in `-test` mode, `tun` is set to nil. So we should move the
defer into the `!configTest` if block.

    panic: runtime error: invalid memory address or nil pointer dereference
    [signal SIGSEGV: segmentation violation code=0x1 addr=0x28 pc=0x54855c]

    goroutine 1 [running]:
    github.com/slackhq/nebula.Main.func3(0x4000135e80, {0x0, 0x0})
            github.com/slackhq/nebula/main.go:176 +0x2c
    github.com/slackhq/nebula.Main(0x400022e060, 0x1, {0x76faa0, 0x5}, 0x4000230000, 0x0)
            github.com/slackhq/nebula/main.go:316 +0x2414
    main.main()
            github.com/slackhq/nebula/cmd/nebula/main.go:54 +0x540
2021-12-06 14:06:16 -05:00
Wade Simmons
f60ed2b36d overlay: fix tun.RouteFor getting *net.IP (#595)
tun.RouteFor expects the routeTree to have an iputil.VpnIp inside of it
instead of a *net.IP.
2021-12-06 09:35:31 -05:00
Nate Brown
48c47f5841 Warn if no lighthouses were configured on a non lighthouse node (#587) 2021-11-30 10:31:33 -06:00
Wade Simmons
75306487c5 fix wintun package to have // +build comments (#598)
Without these comments, gofmt 1.16.9 will complain. Since we otherwise
still support building with go1.16, lets add the comments to make it
easier to compile and gofmt.

Related: #588
2021-11-30 11:14:15 -05:00
Nate Brown
78d0d46bae Remove WriteRaw, cidrTree -> routeTree to better describe its purpose, remove redundancy from field names (#582) 2021-11-12 12:47:09 -06:00
Nate Brown
467e605d5e Push route handling into overlay, a few more nits fixed (#581) 2021-11-12 11:19:28 -06:00
Nate Brown
2f1f0d602f Cleanup most of the remaining nits (#578) 2021-11-12 10:47:36 -06:00
Nate Brown
e07524a654 Move all of tun into overlay (#577) 2021-11-11 16:37:29 -06:00
Nate Brown
88ce0edf76 Start the overlay package with the old Inside interface (#576) 2021-11-10 21:52:26 -06:00
Nate Brown
4453964e34 Move util to test, contextual errors to util (#575) 2021-11-10 21:47:38 -06:00
Wade Simmons
19a9a4221e v1.5.0 (#574)
Update CHANGELOG for Nebula v1.5.0
2021-11-10 22:32:26 -05:00
Chad Harp
1915fab619 tun_darwin (#163)
- Remove water and replace with syscalls for tun setup
- Support named interfaces
- Set up routes with syscalls instead of os/exec

Co-authored-by: Wade Simmons <wade@wades.im>
2021-11-09 20:24:24 -05:00
Nate Brown
7801b589b6 Sign and notarize darwin universal binaries (#571) 2021-11-09 10:49:54 -06:00
Nate Brown
b6391292d1 Move wintun distributable into release zip for windows (#572) 2021-11-08 21:55:10 -06:00
Terry Wang
999efdb2e8 docs: improve grammar and readability for README.md (#225) 2021-11-08 17:32:31 -06:00
Wade Simmons
304b12f63f create ConnectionState before adding to HostMap (#535)
We have a few small race conditions with creating the HostInfo.ConnectionState
since we add the host info to the pendingHostMap before we set this
field. We can make everything a lot easier if we just add an "init"
function so that we can set this field in the hostinfo before we add it
to the hostmap.
2021-11-08 14:46:22 -05:00
CzBiX
16be0ce566 Add Wintun support (#289) 2021-11-08 12:36:31 -06:00
John Maguire
0577c097fb Fix flaky test (#567) 2021-11-04 14:49:56 -05:00
Jake Howard
eb66e13dc4 Use CGO_ENABLED=0 (#421)
Set `CGO_ENABLED` to 0 when building
2021-11-04 14:20:44 -04:00
Wade Simmons
a22c134bf5 Update dependencies, November 2021 (#564)
*Direct Dependencies*

    Updated  github.com/anmitsu/go-shlex                648efa6222...38f4b401e2
    Updated  github.com/flynn/noise                     https://github.com/flynn/noise/compare/4bdb43be3117...v1.0.0
    Updated  github.com/golang/protobuf                 https://github.com/golang/protobuf/compare/v1.5.0...v1.5.2
    Updated  github.com/kardianos/service               https://github.com/kardianos/service/compare/v1.1.0...v1.2.0
    Updated  github.com/miekg/dns                       https://github.com/miekg/dns/compare/v1.1.25...v1.1.43
    Updated  github.com/nbrownus/go-metrics-prometheus  https://github.com/nbrownus/go-metrics-prometheus/compare/6e6d5173d99c...974a6260965f
    Updated  github.com/prometheus/client_golang        https://github.com/prometheus/client_golang/compare/v1.2.1...v1.11.0
    Updated  github.com/rcrowley/go-metrics             https://github.com/rcrowley/go-metrics/compare/cac0b30c2563...cf1acfcdf475
    Updated  github.com/sirupsen/logrus                 https://github.com/sirupsen/logrus/compare/v1.4.2...v1.8.1
    Updated  github.com/songgao/water                   https://github.com/songgao/water/compare/fd331bda3f4b...2b4b6d7c09d8
    Updated  github.com/stretchr/testify                https://github.com/stretchr/testify/compare/v1.6.1...v1.7.0
    Updated  github.com/vishvananda/netlink             https://github.com/vishvananda/netlink/compare/00009fb8606a...v1.1.0
    Updated  golang.org/x/crypto                        https://github.com/golang/crypto/compare/0c34fe9e7dc2...089bfa567519
    Updated  golang.org/x/net                           https://github.com/golang/net/compare/e18ecbb05110...4a448f8816b3
    Updated  golang.org/x/sys                           https://github.com/golang/sys/compare/f84b799fce68...4dd72447c267
    Updated  google.golang.org/protobuf                 v1.26.0...v1.27.1
    Updated  gopkg.in/yaml.v2                           v2.2.7...v2.4.0

*Indirect Dependencies*

    Updated  github.com/alecthomas/units                         https://github.com/alecthomas/units/compare/c3de453c63f4...f65c72e2690d
    Updated  github.com/cespare/xxhash                           https://github.com/cespare/xxhash/compare/v2.1.1...v2.1.2
    Updated  github.com/go-logfmt/logfmt                         https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
    Updated  github.com/json-iterator/go                         https://github.com/json-iterator/go/compare/v1.1.7...v1.1.11
    Updated  github.com/julienschmidt/httprouter                 https://github.com/julienschmidt/httprouter/compare/v1.2.0...v1.3.0
    Updated  github.com/konsorten/go-windows-terminal-sequences  https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.2...v1.0.3
    Updated  github.com/mwitkow/go-conntrack                     https://github.com/mwitkow/go-conntrack/compare/cc309e4a2223...2f068394615f
    Updated  github.com/pkg/errors                               https://github.com/pkg/errors/compare/v0.8.1...v0.9.1
    Updated  github.com/prometheus/client_model                  https://github.com/prometheus/client_model/compare/d1d2010b5bee...v0.2.0
    Updated  github.com/prometheus/common                        https://github.com/prometheus/common/compare/v0.7.0...v0.32.1
    Updated  github.com/prometheus/procfs                        https://github.com/prometheus/procfs/compare/v0.0.8...v0.7.3
    Updated  github.com/vishvananda/netns                        https://github.com/vishvananda/netns/compare/0a2b9b5464df...50045581ed74
    Updated  golang.org/x/sync                                   https://github.com/golang/sync/compare/67f06af15bc9...036812b2e83c
    Updated  golang.org/x/term                                   https://github.com/golang/term/compare/7de9c90e9dd1...03fcf44c2211
    Updated  golang.org/x/text                                   https://github.com/golang/text/compare/v0.3.3...v0.3.6
    Added    cloud.google.com/go                                 v0.65.0
    Added    cloud.google.com/go/bigquery                        v1.8.0
    Added    cloud.google.com/go/datastore                       v1.1.0
    Added    cloud.google.com/go/pubsub                          v1.3.1
    Added    cloud.google.com/go/storage                         v1.10.0
    Added    dmitri.shuralyov.com/gpu/mtl                        666a987793e9
    Added    github.com/BurntSushi/toml                          https://github.com/BurntSushi/toml/tree/v0.3.1
    Added    github.com/BurntSushi/xgb                           https://github.com/BurntSushi/xgb/tree/27f122750802
    Added    github.com/census-instrumentation/opencensus-proto  https://github.com/census-instrumentation/opencensus-proto/tree/v0.2.1
    Added    github.com/chzyer/logex                             https://github.com/chzyer/logex/tree/v1.1.10
    Added    github.com/chzyer/readline                          https://github.com/chzyer/readline/tree/2972be24d48e
    Added    github.com/chzyer/test                              https://github.com/chzyer/test/tree/a1ea475d72b1
    Added    github.com/client9/misspell                         https://github.com/client9/misspell/tree/v0.3.4
    Added    github.com/cncf/udpa/go                             https://github.com/cncf/udpa/go/tree/269d4d468f6f
    Added    github.com/envoyproxy/go-control-plane              https://github.com/envoyproxy/go-control-plane/tree/v0.9.4
    Added    github.com/envoyproxy/protoc-gen-validate           https://github.com/envoyproxy/protoc-gen-validate/tree/v0.1.0
    Added    github.com/go-gl/glfw                               https://github.com/go-gl/glfw/tree/e6da0acd62b1
    Added    github.com/go-gl/glfw/v3.3/glfw                     https://github.com/go-gl/glfw/v3.3/glfw/tree/6f7a984d4dc4
    Added    github.com/go-kit/log                               https://github.com/go-kit/log/tree/v0.1.0
    Added    github.com/golang/glog                              https://github.com/golang/glog/tree/23def4e6c14b
    Added    github.com/golang/groupcache                        https://github.com/golang/groupcache/tree/8c9f03a8e57e
    Added    github.com/golang/mock                              https://github.com/golang/mock/tree/v1.4.4
    Added    github.com/google/btree                             https://github.com/google/btree/tree/v1.0.0
    Added    github.com/google/martian                           https://github.com/google/martian/tree/v2.1.0+incompatible
    Added    github.com/google/martian                           https://github.com/google/martian/tree/v3.0.0
    Added    github.com/google/pprof                             https://github.com/google/pprof/tree/1a94d8640e99
    Added    github.com/google/renameio                          https://github.com/google/renameio/tree/v0.1.0
    Added    github.com/googleapis/gax-go                        https://github.com/googleapis/gax-go/tree/v2.0.5
    Added    github.com/hashicorp/golang-lru                     https://github.com/hashicorp/golang-lru/tree/v0.5.1
    Added    github.com/ianlancetaylor/demangle                  https://github.com/ianlancetaylor/demangle/tree/5e5cf60278f6
    Added    github.com/jpillora/backoff                         https://github.com/jpillora/backoff/tree/v1.0.0
    Added    github.com/jstemmer/go-junit-report                 https://github.com/jstemmer/go-junit-report/tree/v0.9.1
    Added    github.com/rogpeppe/go-internal                     https://github.com/rogpeppe/go-internal/tree/v1.3.0
    Added    go.opencensus.io                                    v0.22.4
    Added    golang.org/x/exp                                    https://github.com/golang/exp/tree/6cc2880d07d6
    Added    golang.org/x/image                                  https://github.com/golang/image/tree/cff245a6509b
    Added    golang.org/x/mobile                                 https://github.com/golang/mobile/tree/d2bd2a29d028
    Added    golang.org/x/oauth2                                 https://github.com/golang/oauth2/tree/f6687ab2804c
    Added    golang.org/x/time                                   https://github.com/golang/time/tree/555d28b269f0
    Added    google.golang.org/api                               v0.30.0
    Added    google.golang.org/appengine                         v1.6.6
    Added    google.golang.org/genproto                          8632dd797987
    Added    google.golang.org/grpc                              v1.31.0
    Added    gopkg.in/errgo.v2                                   v2.1.0
    Added    honnef.co/go/tools                                  v0.0.1-2020.1.4
    Added    rsc.io/binaryregexp                                 v0.2.0
    Added    rsc.io/quote                                        v3.1.0
    Added    rsc.io/sampler                                      v1.3.0
    Removed  github.com/flynn/go-shlex                           https://github.com/flynn/go-shlex/tree/3f9db97f8568
2021-11-04 10:25:13 -04:00
Nate Brown
94aaab042f Fix race between punchback and lighthouse handler reset (#566) 2021-11-03 21:54:27 -05:00
Donatas Abraitis
b358bbab80 Add an ability to specify metric for unsafe routes (#474) 2021-11-03 21:53:28 -05:00
Nate Brown
bcabcfdaca Rework some things into packages (#489) 2021-11-03 20:54:04 -05:00
Nate Brown
1f75fb3c73 Add link to further documentation (#563) 2021-11-02 20:55:34 -05:00
brad-defined
6ae8ba26f7 Add a context object in nebula.Main to clean up on error (#550) 2021-11-02 13:14:26 -05:00
Nate Brown
32cd9a93f1 Bump to go1.17 (#553) 2021-10-21 16:24:11 -05:00
Nate Brown
97afe2ec48 Update changelog for #370 (#551) 2021-10-20 14:36:56 -05:00
Donatas Abraitis
32e2619323 Teardown tunnel automatically if peer's certificate expired (#370) 2021-10-20 13:23:33 -05:00
Wade Simmons
e8b08e49e6 update CHANGELOG for 532, 540 and 541 (#549)
- #532
- #540
- #541

Also fix some whitespace
2021-10-19 11:07:31 -04:00
Wade Simmons
ea2c186a77 remote_allow_ranges: allow inside CIDR specific remote_allow_lists (#540)
This allows you to configure remote allow lists specific to different
subnets of the inside CIDR. Example:

    remote_allow_ranges:
      10.42.42.0/24:
        192.168.0.0/16: true

This would only allow hosts with a VPN IP in the 10.42.42.0/24 range to
have private IPs (and thus don't connect over public IPs).

The PR also refactors AllowList into RemoteAllowList and LocalAllowList to make it clearer which methods are allowed on which allow list.
2021-10-19 10:54:30 -04:00
Wade Simmons
ae5505bc74 handshake: update to preferred remote (#532)
If we receive a handshake packet for a tunnel that has already been
completed, check to see if the new remote is preferred. If so, update to
the preferred remote and send a test packet to influence the other side
to do the same.
2021-10-19 10:53:55 -04:00
Wade Simmons
afda79feac documented "preferred_ranges" (#541)
Document the preferred config variable, and deprecate "local_range".
2021-10-19 10:53:36 -04:00
rvalue
0e7bc290f8 Fix build on riscv64 (#542)
Add riscv64 build tag for udp_linux_64.go to fix build on riscv64

Co-authored-by: Wade Simmons <wade@wades.im>
2021-10-13 10:55:32 -04:00
Manuel Romei
3a8f533b24 refactor: use X25519 instead of ScalarBaseMult (#533)
As suggested in https://pkg.go.dev/golang.org/x/crypto/curve25519#ScalarBaseMult,
use X25519 instead of ScalarBaseMult. When using Basepoint, it may employ
some precomputed values, enhancing performance.

Co-authored-by: Wade Simmons <wade@wades.im>
Co-authored-by: Wade Simmons <wadey@slack-corp.com>
2021-10-12 12:03:43 -04:00
John Maguire
34d002d695 Check CA cert and key match in nebula-cert sign (#503)
`func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error` would
previously return an error even if passed the correct private key for a
CA certificate `nc`.

That function has been updated to support CA certificates, and
nebula-cert now calls it before signing a new certificate. Previously,
it would perform all constraint checks against the CA certificate
provided, take a SHA256 fingerprint of the provided certificate, insert
it into the new node certificate, and then finally sign it with the
mismatching private key provided.
2021-10-01 12:43:33 -04:00
Ben Yanke
9f34c5e2ba Typo Fix (#523) 2021-09-16 00:12:08 -05:00
Joe Doss
3f5caf67ff Add info about Distribution Packages. (#414) 2021-09-15 17:57:35 -05:00
Stan Grishin
e01213cd21 Update README.md (#378)
Add missing period.
2021-09-15 17:50:01 -05:00
Jack Adamson
af3674ac7b add peer cert issuer to handshake log entries (#510)
Co-authored-by: Jack Adamson <jackadamson@users.noreply.github.com>
2021-08-31 11:57:38 +10:00
Nate Brown
c726d20578 Fix single command ssh exec (#483) 2021-06-07 17:06:59 -05:00
Andrii Chubatiuk
d13f4b5948 fixed recv_errors spoofing condition (#482)
Hi @nbrownus
Fixed a small bug that was introduced in
df7c7ee#diff-5d05d02296a1953fd5fbcb3f4ab486bc5f7c34b14c3bdedb068008ec8ff5beb4
having problems due to it
2021-06-03 13:04:04 -04:00
Nate Brown
2e1d6743be v1.4.0 (#458)
Update CHANGELOG for Nebula v1.4.0

Co-authored-by: Wade Simmons <wade@wades.im>
2021-05-10 21:23:49 -04:00
Nate Brown
d004fae4f9 Unlock the hostmap quickly, lock hostinfo instead (#459) 2021-05-05 13:10:55 -05:00
Nate Brown
95f4c8a01b Don't check for rebind if we are closing the tunnel (#457) 2021-05-04 19:15:24 -05:00
Nate Brown
9ff73cb02f Increase the timestamp resolution for handshakes (#453) 2021-05-03 14:10:00 -05:00
John Maguire
98c391396c Remove log when no handshake message is sent (#452) 2021-04-30 18:19:40 -05:00
Nate Brown
1bc6f5fe6c Minor windows focused improvements (#443)
Co-authored-by: Wade Simmons <wadey@slack-corp.com>
2021-04-30 15:04:47 -05:00
Wade Simmons
44cb697552 Add more metrics (#450)
* Add more metrics

This change adds the following counter metrics:

Metrics to track packets dropped at the firewall:

    firewall.dropped.local_ip
    firewall.dropped.remote_ip
    firewall.dropped.no_rule

Metrics to track handshakes attempts that have been initiated and ones
that have timed out (ones that have completed are tracked by the
existing "handshakes" histogram).

    handshake_manager.initiated
    handshake_manager.timed_out

Metrics to track when cached_packets are dropped because we run out of
buffer space, and how many are sent once the handshake completes.

    hostinfo.cached_packets.dropped
    hostinfo.cached_packets.sent

This change also notes how many cached packets we have when we log the
final "Handshake received" message for either stage1 for stage2.

* separate incoming/outgoing metrics

* remove "allowed" firewall metrics

We don't need this on the hotpath, they aren't worh it.

* don't need pointers here
2021-04-27 22:23:18 -04:00
Nathan Brown
db23fdf9bc Dont apply race avoidance to existing handshakes, use the handshake time to determine who wins (#451)
Co-authored-by: Wade Simmons <wadey@slack-corp.com>
2021-04-27 21:15:34 -05:00
Nathan Brown
df7c7eec4a Get out faster on nil udpAddr (#449) 2021-04-26 20:21:47 -05:00
Nathan Brown
6f37280e8e Fully close tunnels when CloseAllTunnels is called (#448) 2021-04-26 10:42:24 -05:00
Nathan Brown
a0735dd7d5 Add locking around ssh conns to avoid concurrent map access on reload (#447) 2021-04-23 14:43:16 -05:00
Nathan Brown
1deb5d98e8 Fix tun funcs for ios and android (#446) 2021-04-22 15:23:40 -05:00
Nathan Brown
a1ee521d79 Fix a failed return in an error case (#445) 2021-04-17 18:47:31 -05:00
brad-defined
7859140711 Only set serveDns if the host is also configured to be a lighthouse. (#433) 2021-04-16 13:33:56 -05:00
brad-defined
17106f83a0 Ensure the Nebula device exists before attempting to bind to the Nebula IP (#375) 2021-04-16 10:34:28 -05:00
Nathan Brown
ab08be1e3e Don't panic on a nil response from the lighthouse (#442) 2021-04-15 09:12:21 -05:00
Nathan Brown
710df6a876 Refactor remotes and handshaking to give every address a fair shot (#437) 2021-04-14 13:50:09 -05:00
John Maguire
20bef975cd Remove obsolete systemd unit settings (take 2) (#438) 2021-04-07 12:02:40 -05:00
Nathan Brown
480036fbc8 Remove unused structs in hostmap.go (#430) 2021-04-01 22:07:11 -05:00
Nathan Brown
1499be3e40 Fix name resolution for host names in config (#431) 2021-04-01 21:48:41 -05:00
Nathan Brown
64d8e5aa96 More LH cleanup (#429) 2021-04-01 10:23:31 -05:00
Nathan Brown
75f7bda0a4 Lighthouse performance pass (#418) 2021-03-31 17:32:02 -05:00
Nathan Brown
e7e55618ff Include bad backets in the good handshake test (#428) 2021-03-31 13:36:10 -05:00
Nathan Brown
0c2e5973e1 Simple lie test (#427) 2021-03-31 10:26:35 -05:00
Nathan Brown
830d6d4639 Start of end to end testing with a good handshake between two nodes (#425) 2021-03-29 14:29:20 -05:00
Nathan Brown
883e09a392 Don't use a global ca pool (#426) 2021-03-29 12:10:19 -05:00
Wade Simmons
4603b5b2dd fix PromoteEvery check (#424)
This check was accidentally typo'd in #396 from `%` to `&`. Restore the
correct functionality here (we want to do the check every "PromoteEvery"
count packets).
2021-03-26 15:01:05 -04:00
Wade Simmons
a71541fb0b export build version as a prometheus label (#405)
This is how Prometheus recommends you do it, and how they do it
themselves in their client. This makes it easy to see which versions you
have deployed in your fleet, and query over it too.
2021-03-26 14:16:35 -04:00
Nathan Brown
3ea7e1b75f Don't use a global logger (#423) 2021-03-26 09:46:30 -05:00
Nathan Brown
7a9f9dbded Don't craft buffers if we don't need them (#416) 2021-03-22 18:25:06 -05:00
Nathan Brown
7073d204a8 IPv6 support for outside (udp) (#369) 2021-03-18 20:37:24 -05:00
Joe Doss
9e94442ce7 Add fedora dist files. (#413) 2021-03-18 12:33:43 -07:00
Joe Doss
13471f5792 Remove obsolete systemd unit settings. (#412) 2021-03-18 12:29:36 -07:00
Thomas Roten
ea07a89cc8 Ensure mutex is unlocked when adding remote IP. (#406)
Currently, if you use the remote allow list config, as soon as you attempt to create a tunnel to a node that has a blocked IP address, a mutex is locked and never unlocked. This happens even if the node has an allowed remote IP address in addition to the blocked remote IP address.

This pull request ensures that the lighthouse mutex is unlocked whenever we attempt to add a remote IP.
2021-03-16 12:41:35 -04:00
Ryan Huber
3aaaea6309 don't allow a useless handshake with yourself (#402)
* don't allow a useless handshake with yourself

* remove helper
2021-03-15 12:58:23 -07:00
Wade Simmons
5506da3de9 Fix selection of UDP remote to use during stage2 (#404)
The change for #401 incorrectly called HostInfo.ForcePromoteBest in
stage2, when we really we want to pick the remote that we received the
response from.
2021-03-12 21:43:24 -05:00
Wade Simmons
6c55d67f18 Refactor handshake_ix (#401)
There are some subtle race conditions with the previous handshake_ix implementation, mostly around collisions with localIndexId. This change refactors it so that we have a "commit" phase during the handshake where we grab the lock for the hostmap and ensure that we have a unique local index before storing it. We also now avoid using the pending hostmap at all for receiving stage1 packets, since we have everything we need to just store the completed handshake.

Co-authored-by: Nate Brown <nbrown.us@gmail.com>
Co-authored-by: Ryan Huber <rhuber@gmail.com>
Co-authored-by: forfuncsake <drussell@slack-corp.com>
2021-03-12 14:16:25 -05:00
Wade Simmons
64d8035d09 fix race in getOrHandshake (#400)
We missed this race with #396 (and I think this is also the crash in
issue #226). We need to lock a little higher in the getOrHandshake
method, before we reset hostinfo.ConnectionInfo. Previously, two
routines could enter this section and confuse the handshake process.

This could result in the other side sending a recv_error that also has
a race with setting hostinfo.ConnectionInfo back to nil. So we make sure
to grab the lock in handleRecvError as well.

Neither of these code paths are in the hot path (handling packets
between two hosts over an active tunnel) so there should be no
performance concerns.
2021-03-09 09:27:02 -05:00
Ryan Huber
73a5ed90b2 Do not allow someone to run a nebula lighthouse with an ephemeral port (#399)
* Do not allow someone to run a nebula lighthouse with an ephemeral port

* derp - we discover the port so we have to check the config setting

* No context needed for this error

* gofmt yourself

* Revert "gofmt yourself"

This reverts commit c01423498e.

* Revert "No context needed for this error"

This reverts commit 6792af6846.

* snip snap snip snap
2021-03-08 12:42:06 -08:00
Wade Simmons
d604270966 Fix most known data races (#396)
This change fixes all of the known data races that `make smoke-docker-race` finds, except for one.

Most of these races are around the handshake phase for a hostinfo, so we add a RWLock to the hostinfo and Lock during each of the handshake stages.

Some of the other races are around consistently using `atomic` around the `messageCounter` field. To make this harder to mess up, I have renamed the field to `atomicMessageCounter` (I also removed the unnecessary extra pointer deference as we can just point directly to the struct field).

The last remaining data race is around reading `ConnectionInfo.ready`, which is a boolean that is only written to once when the handshake has finished. Due to it being in the hot path for packets and the rare case that this could actually be an issue, holding off on fixing that one for now.

here is the results of `make smoke-docker-race`:

before:

    lighthouse1: Found 2 data race(s)
    host2:       Found 36 data race(s)
    host3:       Found 17 data race(s)
    host4:       Found 31 data race(s)

after:

    host2: Found 1 data race(s)
    host4: Found 1 data race(s)

Fixes: #147
Fixes: #226
Fixes: #283
Fixes: #316
2021-03-05 21:18:33 -05:00
Nathan Brown
29c5f31f90 Add a check in the makefile to ensure a minimum version of go is installed (#383) 2021-03-02 13:29:05 -06:00
Nathan Brown
b6234abfb3 Add a way to trigger punch backs via lighthouse (#394) 2021-03-01 19:06:01 -06:00
Wade Simmons
2a4beb41b9 Routine-local conntrack cache (#391)
Previously, every packet we see gets a lock on the conntrack table and updates it. When running with multiple routines, this can cause heavy lock contention and limit our ability for the threads to run independently. This change caches reads from the conntrack table for a very short period of time to reduce this lock contention. This cache will currently default to disabled unless you are running with multiple routines, in which case the default cache delay will be 1 second. This means that entries in the conntrack table may be up to 1 second out of date and remain in a routine local cache for up to 1 second longer than the global table.

Instead of calling time.Now() for every packet, this cache system relies on a tick thread that updates the current cache "version" each tick. Every packet we check if the cache version is out of date, and reset the cache if so.
2021-03-01 19:52:17 -05:00
Wade Simmons
d232ccbfab add metrics for the udp sockets using SO_MEMINFO (#390)
Retrieve the current socket stats using SO_MEMINFO and report them as
metrics gauges. If SO_MEMINFO isn't supported, we don't report these metrics.
2021-03-01 19:51:33 -05:00
Nathan Brown
ecfb40f29c Fix osx for mq changes, this does not implement mq on osx (#395) 2021-03-01 16:57:05 -05:00
Wade Simmons
1bae5b2550 more validation in pending hostmap deletes (#344)
We are currently seeing some cases where we are not deleting entries
correctly from the pending hostmap. I believe this is a case of
an inbound timer tick firing and deleting the Hosts map entry for
a newer handshake attempt than intended, thus leaving the old Indexes
entry orphaned. This change adds some extra checking when deleteing from
the Indexes and Hosts maps to ensure we clean everything up correctly.
2021-03-01 12:40:46 -05:00
Wade Simmons
73081d99bc add make smoke-docker (#287)
This makes it easier to use the docker container smoke test that
GitHub actions runs. There is also `make smoke-docker-race` that runs the
smoke test with `-race` enabled.
2021-03-01 11:15:15 -05:00
Tim Rots
e7e6a23cde fix a few typos (#302) 2021-03-01 11:14:34 -05:00
Wade Simmons
a0583ebdca tun_disabled: reply to ICMP Echo Request (#342)
This change allows a server running with `tun.disabled: true` (usually
a lighthouse) to still reply to ICMP EchoRequest packets. This allows
you to "ping" the lighthouse Nebula IP as a quick check to make sure the
tunnel is up, even when running with tun.disabled.

This is still gated by allowing `icmp` packets in the inbound firewall
rules.
2021-03-01 11:09:41 -05:00
Wade Simmons
27d9a67dda Proper multiqueue support for tun devices (#382)
This change is for Linux only.

Previously, when running with multiple tun.routines, we would only have one file descriptor. This change instead sets IFF_MULTI_QUEUE and opens a file descriptor for each routine. This allows us to process with multiple threads while preventing out of order packet reception issues.

To attempt to distribute the flows across the queues, we try to write to the tun/UDP queue that corresponds with the one we read from. So if we read a packet from tun queue "2", we will write the outgoing encrypted packet to UDP queue "2". Because of the nature of how multi queue works with flows, a given host tunnel will be sticky to a given routine (so if you try to performance benchmark by only using one tunnel between two hosts, you are only going to be using a max of one thread for each direction).

Because this system works much better when we can correlate flows between the tun and udp routines, we are deprecating the undocumented "tun.routines" and "listen.routines" parameters and introducing a new "routines" parameter that sets the value for both. If you use the old undocumented parameters, the max of the values will be used and a warning logged.

Co-authored-by: Nate Brown <nbrown.us@gmail.com>
2021-02-25 15:01:14 -05:00
John Maguire
2bce222550 List possible cipher options in example config (#385) 2021-02-19 21:46:42 -06:00
Wade Simmons
3dd1108099 Go 1.16 and darwin-arm64 (#381)
This commit switches to Go 1.16 and adds a release binary for darwin-arm64.

Fixes: #343
2021-02-17 13:11:57 -05:00
Nathan Brown
d4b81f9b8d Add QR code support to nebula-cert (#297) 2021-02-11 18:53:25 -06:00
brad-defined
454bc8a6bb Check certificate banner during nebula-cert print (#373) 2021-02-05 14:52:32 -06:00
Wade Simmons
ce9ad37431 fix regression with LightHouseHandler and punchBack (#346)
The change introduced by #320 incorrectly re-uses the output buffer for
sending punchBack packets. Since we are currently spawning a new
goroutine for each send here, we need to allocate a new buffer each
time. We can come back and optimize this in the future, but for now we
should fix the regression.
2020-11-25 17:49:26 -05:00
Wade Simmons
ee7c27093c add HostMap.RemoteIndexes (#329)
This change adds an index based on HostInfo.remoteIndexId. This allows
us to use HostMap.QueryReverseIndex without having to loop over all
entries in the map (this can be a bottleneck under high traffic
lighthouses).

Without this patch, a high traffic lighthouse server receiving recv_error
packets and lots of handshakes, cpu pprof trace can look like this:

      flat  flat%   sum%        cum   cum%
    2000ms 32.26% 32.26%     3040ms 49.03%  github.com/slackhq/nebula.(*HostMap).QueryReverseIndex
     870ms 14.03% 46.29%     1060ms 17.10%  runtime.mapiternext

Which shows 50% of total cpu time is being spent in QueryReverseIndex.
2020-11-23 14:51:16 -05:00
Wade Simmons
2e7ca027a4 Lighthouse handler optimizations (#320)
We noticed that the number of memory allocations LightHouse.HandleRequest creates for each call can seriously impact performance for high traffic lighthouses. This PR introduces a benchmark in the first commit and then optimizes memory usage by creating a LightHouseHandler struct. This struct allows us to re-use memory between each lighthouse request (one instance per UDP listener go-routine).
2020-11-23 14:50:01 -05:00
mhp
672ce1f0a8 Move slice allocations in connection manager monitor loop (#340)
* Move slice allocations in connection manager monitor loop

* move further out

Co-authored-by: Miran Park <mpark@slack-corp.com>
2020-11-19 15:44:05 -08:00
Wade Simmons
384b1166ea fix panic in UnmarshalNebulaCertificate (#339)
This fixes a panic in UnmarshalNebulaCertificate when unmarshaling
a payload with Details set to nil.

Fixes: #332
2020-11-19 08:44:54 -05:00
Wade Simmons
0389596f66 don't mark handshake packets as "lost" (#331)
Packet 1 is always a stage 1 handshake and packet 2 is always stage 2.
Normal packets don't start flowing until the message counter is 3 or
higher.

Currently we only receive either packet 1 or 2 depending on if
we are the initiator or responder for the handshake, so we end up
marking one of these as "lost". We should mark these packets as "seen"
when we are the one sending them, since we don't expect to see them from
the other side.
2020-11-16 14:03:08 -05:00
Ryan Huber
43a3988afc i don't think this is used at all anymore (#323) 2020-10-29 21:43:50 -04:00
Brian Kelly
5c23676a0f Added line to systemd config template to start Nebula before sshd (#317)
During shutdown, this will keep Nebula alive until after sshd is finished. This cleanly terminates ssh clients accessing a server over a Nebula tunnel.
2020-10-29 21:43:02 -04:00
Nathan Brown
f6d0b4b893 Update README for supported platforms (#312) 2020-10-12 13:11:32 -05:00
Ryan Huber
0d6b55e495 Bring in the new version of kardianos/service and output logfiles on osx (#303)
* this brings in the new version of kardianos/service which properly
outputs logs from launchd services

* add go sum

* is it really this easy?

* Update CHANGELOG.md
2020-09-24 15:34:08 -07:00
Wade Simmons
c71c84882e v1.3.0 (#268)
Update the CHANGELOG for Nebula v1.3.0

Co-authored-by: forfuncsake <drussell@slack-corp.com>
2020-09-22 12:21:12 -04:00
Darren Hoo
0010db46e4 Fix a data race on message counter (#284)
3. ==================
WARNING: DATA RACE
Write at 0x00c00030e020 by goroutine 17:
  sync/atomic.AddInt64()
      runtime/race_amd64.s:276 +0xb
  github.com/slackhq/nebula.(*Interface).sendNoMetrics()
      github.com/slackhq/nebula/inside.go:226 +0x9c
  github.com/slackhq/nebula.(*Interface).send()
      github.com/slackhq/nebula/inside.go:214 +0x149
  github.com/slackhq/nebula.(*Interface).readOutsidePackets()
      github.com/slackhq/nebula/outside.go:94 +0x1213
  github.com/slackhq/nebula.(*udpConn).ListenOut()
      github.com/slackhq/nebula/udp_generic.go:109 +0x3b5
  github.com/slackhq/nebula.(*Interface).listenOut()
      github.com/slackhq/nebula/interface.go:147 +0x15e

Previous read at 0x00c00030e020 by goroutine 18:
  github.com/slackhq/nebula.(*Interface).consumeInsidePacket()
      github.com/slackhq/nebula/inside.go:58 +0x892
  github.com/slackhq/nebula.(*Interface).listenIn()
      github.com/slackhq/nebula/interface.go:164 +0x178
2020-09-21 21:41:46 -04:00
Nathan Brown
68e3e84fdc More like a library (#279) 2020-09-18 09:20:09 -05:00
Brian Luong
6238f1550b Handle panic when invalid IP entered in sshd (#296) 2020-09-18 10:10:25 -04:00
forfuncsake
50b04413c7 Block nebula ssh server from listening on port 22 (#266)
Port 22 is blocked as a safety mechanism. In a case where nebula is
started before sshd, a system may be rendered unreachable if nebula
is holding the system ssh port and there is no other connectivity.

This commit enforces the restriction, which could previously be worked
around by listening on an IPv6 address, e.g.  "[::]:22".
2020-09-15 09:57:32 -04:00
CzBiX
ef498a31da Add disable_timestamp option (#288) 2020-09-09 07:42:11 -04:00
forfuncsake
2e5a477a50 Align linux UDP performance optimizations with configuration (#275)
* Remove unused (*udpConn).Read method

* Align linux UDP performance optimizations with configuration

While attempting to run nebula on an older Synology NAS, it became
apparent that some of the performance optimizations effectively
block support for older kernels. The recvmmsg syscall was added in
linux kernel 2.6.33, and the Synology DS212j (among other models)
is pinned to 2.6.32.12.

Similarly, SO_REUSEPORT was added to the kernel in the 3.9 cycle.
While this option has been backported into some older trees, it
is also missing from the Synology kernel.

This commit allows nebula to be run on linux devices with older
kernels if the config options are set up with a single listener
and a UDP batch size of 1.
2020-08-13 08:24:05 +10:00
Wade Simmons
32fe9bfe75 Use Go 1.15 (#277)
Update all CI checks and release process to use the latest patch version
of go1.15.
2020-08-12 16:16:21 -04:00
forfuncsake
9b8b3c478b Support startup without a tun device (#269)
This commit adds support for Nebula to be started without creating
a tun device. A node started in this mode still has a full "control
plane", but no effective "data plane". Its use is suited to a
lighthouse that has no need to partake in the mesh VPN.

Consequently, creation of the tun device is the only reason nebula
neesd to be started with elevated privileged, so this example
lighthouse can also be run as a non-root user.
2020-08-10 09:15:55 -04:00
Michael Hardy
7b3f23d9a1 Start nebula after the network is up (#270) 2020-08-07 11:33:48 -05:00
forfuncsake
25964b54f6 Use inclusive terminology for cert blocking (#272) 2020-08-06 11:17:47 +10:00
Wade Simmons
ac557f381b drop unroutable packets (#267)
Currently, if a packet arrives on the tun device with a destination that
is not a routable Nebula IP, `queryUnsafeRoute` converts that IP to
0.0.0.0 and we store that packet and try to look up that IP with the
lighthouse. This doesn't make any sense to do, if we get a packet that
is unroutable we should just drop it.

Note, we have a few configurable options like `drop_local_broadcast`
and `drop_multicast` which do this for a few specific types, but since
no packets like this will send correctly I think we should just drop
anything that is unroutable.
2020-08-04 22:59:04 -04:00
Wade Simmons
a54f3fc681 fix fast handshake trigger for static hosts (#265)
We are currently triggering a fast handshake for static hosts right
inside HandshakeManager.AddVpnIP, but this can actually trigger before
we have generated the handshake packet to use. Instead, we should be
triggering right after we call ixHandshakeStage0 in getOrHandshake
(which generates the handshake packet)
2020-08-02 20:59:50 -04:00
Alan Lam
5545cff6ef log remote certificate fingerprint on handshakes (#262) 2020-07-31 18:54:51 -04:00
Wade Simmons
f3a6d8d990 Preserve conntrack table during firewall rules reload (SIGHUP) (#233)
Currently, we drop the conntrack table when firewall rules change during a SIGHUP reload. This means responses to inflight HTTP requests can be dropped, among other issues. This change copies the conntrack table over to the new firewall (it holds the conntrack mutex lock during this process, to be safe).

This change also records which firewall rules hash each conntrack entry used, so that we can re-verify the rules after the new firewall has been loaded.
2020-07-31 18:53:36 -04:00
forfuncsake
9b06748506 Make Interface.Inside an interface type (#252)
This commit updates the Interface.Inside type to be a new interface
type instead of a *Tun. This will allow for an inside interface
that does not use a tun device, such as a single-binary client that
can run without elevated privileges.
2020-07-28 08:53:16 -04:00
Wade Simmons
4756c9613d trigger handshakes when lighthouse reply arrives (#246)
Currently, we wait until the next timer tick to act on the lighthouse's
reply to our HostQuery. This means we can easily add hundreds of
milliseconds of unnecessary delay to the handshake. To fix this, we
can introduce a channel to trigger an outbound handshake without waiting
for the next timer tick.

A few samples of cold ping time between two hosts that require a
lighthouse lookup:

    before (v1.2.0):

    time=156 ms
    time=252 ms
    time=12.6 ms
    time=301 ms
    time=352 ms
    time=49.4 ms
    time=150 ms
    time=13.5 ms
    time=8.24 ms
    time=161 ms
    time=355 ms

    after:

    time=3.53 ms
    time=3.14 ms
    time=3.08 ms
    time=3.92 ms
    time=7.78 ms
    time=3.59 ms
    time=3.07 ms
    time=3.22 ms
    time=3.12 ms
    time=3.08 ms
    time=8.04 ms

I recommend reviewing this PR by looking at each commit individually, as
some refactoring was required that makes the diff a bit confusing when
combined together.
2020-07-22 10:35:10 -04:00
Nathan Brown
4645e6034b Fix up the tun for android (#249) 2020-07-01 10:20:52 -05:00
Wade Simmons
aba42f9fa6 enforce the use of goimports (#248)
* enforce the use of goimports

Instead of enforcing `gofmt`, enforce `goimports`, which also asserts
a separate section for non-builtin packages.

* run `goimports` everywhere

* exclude generated .pb.go files
2020-06-30 18:53:30 -04:00
Nathan Brown
41578ca971 Be more like a library to support mobile (#247) 2020-06-30 13:48:58 -05:00
Wade Simmons
1ea8847085 linux: set advmss correctly when route MTU is used (#245)
If different mtus are specified for different routes, we should set
advmss on each route because Linux does a poor job of selecting the
default (from ip-route(8)):

    advmss NUMBER (Linux 2.3.15+ only)
           the MSS ('Maximal Segment Size') to advertise to these destinations when estab‐
           lishing TCP connections. If it is not given, Linux uses a default value calcu‐
           lated from the first hop device MTU.  (If the path to these destination is asym‐
           metric, this guess may be wrong.)

Note that the default value is calculated from the first hop *device
MTU*, not the *route MTU*. In practice this is usually ok as long as the
other side of the tunnel has the mtu configured exactly the same, but we
should probably just set advmss correctly on these routes.
2020-06-26 13:47:21 -04:00
Wade Simmons
55858c64cc smoke test: test firewall inbound / outbound (#240)
Test that basic inbound / outbound firewall rules work during the smoke
test. This change sets an inbound firewall rule on host3, and a new
host4 with outbound firewall rules. It also tests that conntrack allows
packets once the connection has been established.
2020-06-26 13:46:51 -04:00
Wade Simmons
e94c6b0125 mips-softfloat (#231)
This makes GOARM more generic and does GOMIPS in a similar way to
support mips-softfloat. We also set `-ldflags "-s -w"` for
mips-softfloat to give the best chance of the binary working on these
small devices.
2020-06-26 13:46:23 -04:00
Wade Simmons
b37a91cfbc add meta packet statistics (#230)
This change add more metrics around "meta" (non "message" type packets).
For lighthouse packets, we also record statistics around the specific
lighthouse meta type.

We don't keep statistics for the "message" type so that we don't slow
down the fast path (and you can just look at metrics on the tun
interface to find that information).
2020-06-26 13:45:48 -04:00
David Sonder
3212b769d4 fix typo in conntrack section in examples/config.yml (#236)
the rest of the conntrack values match the default
2020-06-26 11:08:22 -05:00
Patrick Bogen
ecf0e5a9f6 drop packets even if we aren't going to emit Debug logs about it (#239)
* drop packets even if we aren't going to emit Debug logs about it

* smallify change
2020-06-10 16:55:49 -05:00
Wade Simmons
ff13aba8fc allow go test -bench=. to run (#234)
This benchmark had an Errorf at the end, lets remove it so the
benchmarks all run.
2020-05-27 16:52:34 -04:00
Mateusz Kwiatkowski
cc03ff9e9a Unbreak building for FreeBSD (#103)
Add support for freebsd. You have to set `tun.dev` in your config. The second pass of this would be to remove the exec calls and use ioctl(2) and route(4) instead, but we can do that in a second PR.

Co-authored-by: Wade Simmons <wade@wades.im>
2020-05-26 22:23:23 -04:00
Patrick Bogen
363c836422 log the reason for fw drops (#220)
* log the reason for fw drops

* only prepare log if we will end up sending it
2020-04-10 10:57:21 -07:00
Wade Simmons
fb252db4a1 v1.2.0 (#215)
Add descriptions for all commits since v1.1.0
2020-04-08 19:52:24 -04:00
Wade Simmons
4f6313ebd3 fix config name for {remote,local}_allow_list (#219)
This config option should be snake_case, not camelCase.
2020-04-08 16:20:12 -04:00
Wade Simmons
0a474e757b Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.

`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote.  If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.

    lighthouse:
      remoteAllowList:
        # Example to block IPs from this subnet from being used for remote IPs.
        "172.16.0.0/12": false

        # A more complicated example, allow public IPs but only private IPs from a specific subnet
        "0.0.0.0/0": true
        "10.0.0.0/8": false
        "10.42.42.0/24": true

`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.

Default is all local IP addresses.

    lighthouse:
      localAllowList:
        # Example to blacklist docker interfaces.
        interfaces:
          'docker.*': false

        # Example to only advertise IPs in this subnet to the lighthouse.
        "10.0.0.0/8": true
2020-04-08 15:36:43 -04:00
Nathan Brown
7cd342c7ab Add a systemd unit for arch and a wireshark dissector (#216) 2020-04-06 18:47:32 -07:00
Wade Simmons
7cdbb14a18 Better config test (#177)
* Better config test

Previously, when using the config test option `-test`, we quit fairly
earlier in the process and would not catch a variety of additional
parsing errors (such as lighthouse IP addresses, local_range, the new
check to make sure static hosts are in the certificate's subnet, etc).

* run config test as part of smoke test

* don't need privileges for configtest

Co-authored-by: Nathan Brown <nate@slack-corp.com>
2020-04-06 11:35:32 -07:00
Wade Simmons
b4f2f7ce4e log certName alongside vpnIp (#200)
This change adds a new helper, `(*HostInfo).logger()`, that starts a new
logrus.Entry with `vpnIp` and `certName`. We don't use the helper inside
of handshake_ix though since the certificate has not been attached to
the HostInfo yet.

Fixes: #84
2020-04-06 11:34:00 -07:00
Alex
ff64d1f952 unsafe_routes mtu (#209) 2020-04-06 11:33:30 -07:00
Felix Yan
9e2ff7df57 Correct typos in noise.go (#205) 2020-03-30 11:23:55 -07:00
Ryan Huber
1297090af3 add configurable punching delay because of race-condition-y conntracks (#210)
* add configurable punching delay because of race-condition-y conntracks

* add changelog

* fix tests

* only do one punch per query

* Coalesce punchy config

* It is not is not set

* Add tests

Co-authored-by: Nate Brown <nbrown.us@gmail.com>
2020-03-27 11:26:39 -07:00
Wade Simmons
add1b21777 only create a CIDRTree for each host if necessary (#198)
A CIDRTree can be expensive to create, so only do it if we need
it. If the remote host only has one IP address and no subnets, just do
an exact IP match instead.

Fixes: #171
2020-03-02 16:21:33 -05:00
Wade Simmons
1cb3201b5e Github Actions: cache modules and only run when necessary (#197)
This PR does two things:

- Only run the tests when relevant files change.
- Cache the Go Modules directory between runs, so they don't have to redownload everything everytime (go.sum is the cache key). Pretty much straight from the examples: https://github.com/actions/cache/blob/master/examples.md#go---modules
2020-03-02 16:21:19 -05:00
Ryan Huber
41968551f9 clarify that lighthouse IP should be nebula range (#196) 2020-02-28 11:35:55 -08:00
Wade Simmons
8548ac3c31 build and test with go1.14 (#195)
- https://golang.org/doc/go1.14

I did a performance sanity check in Docker, and performance seems about
the same (perhaps slightly higher).
2020-02-27 15:48:39 -05:00
Wade Simmons
fb9b36f677 allow any config file name if specified directly (#189)
Currently, we require that config file names end with `.yml` or `.yaml`.
This is because if the user points `-config` at a directory of files, we
only want to use the YAML files in that directory.

But this makes it more difficult to use the `-test -config` option
because config management tools might not have an extension on the file
when preparing a new config file. This change makes it so that if you
point `-config file` directly at a file, it uses it no matter what the
extension is.
2020-02-26 15:38:56 -05:00
Sebastien Bariteau
4d1928f1e3 Support unsafe_routes on Windows (#184)
* Support unsafe_routes on Windows

* Full path to route executable

* Escape string properly
2020-02-26 15:23:16 -05:00
Ryan Huber
a91a40212d check that packet isn't bound for my vpn ip (#192) 2020-02-21 16:49:54 -08:00
Wade Simmons
179a369130 add configuration options for HandshakeManager (#179)
This change exposes the current constants we have defined for the handshake
manager as configuration options. This will allow us to test and tweak
with different intervals and wait rotations.

    # Handshake Manger Settings
    handshakes:
      # Total time to try a handshake = sequence of `try_interval * retries`
      # With 100ms interval and 20 retries it is 23.5 seconds
      try_interval: 100ms
      retries: 20

      # wait_rotation is the number of handshake attempts to do before starting to try non-local IP addresses
      wait_rotation: 5
2020-02-21 16:25:11 -05:00
Wade Simmons
df69371620 use absolute paths on darwin and windows (#191)
We want to make sure to use the system binaries, and not whatever is in
the PATH.
2020-02-21 15:25:33 -05:00
Wade Simmons
eda344d88f add logging.timestamp_format config option (#187)
This change introduces logging.timestamp_format, which allows
configuration of the Logrus TimestampFormat setting. The primary purpose
of this change was to allow logging with millisecond precision. The
default for `text` and `json` formats remains the same for backwards
compatibility.

timestamp format is specified in Go time format, see:

 - https://golang.org/pkg/time/#pkg-constants

Default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
Default when `format: text`:
  when TTY attached: seconds since beginning of execution
  otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)

As an example, to log as RFC3339 with millisecond precision, set to:

    logging:
        timestamp_format: "2006-01-02T15:04:05.000Z07:00"
2020-02-21 15:25:00 -05:00
Wade Simmons
065e2ff88a update golang.org/x/crypto (#188)
This version contains a fix for CVE-2020-9283, a remote crash bug:

- https://groups.google.com/forum/#!msg/golang-announce/3L45YRc91SY/ywEPcKLnGQAJ
2020-02-20 14:49:55 -05:00
Nathan Brown
45a5de2719 Print the udp listen address on startup (#181) 2020-02-06 21:17:43 -08:00
Wade Simmons
2d24ef7166 validate lighthouses and static hosts are in our subnet (#170)
Validate all lighthouse.hosts and static_host_map VPN IPs are in the
subnet defined in our cert. Exit with a fatal error if they are not in
our subnet, as this is an invalid configuration (we will not have the
proper routes set up to communicate with these hosts).

This error case could occur for the following invalid example:

    nebula-cert sign -name "lighthouse" -ip "10.0.1.1/24"
    nebula-cert sign -name "host" -ip "10.0.2.1/24"

    config.yaml:

        static_host_map:
            "10.0.1.1": ["lighthouse.local:4242"]
        lighthouse:
          hosts:
            - "10.0.1.1"

We will now return a fatal error for this config, since `10.0.1.1` is
not in the host cert's subnet of `10.0.2.1/24`
2020-01-20 15:52:55 -05:00
163 changed files with 18402 additions and 4531 deletions

57
.github/ISSUE_TEMPLATE/bug-report.yml vendored Normal file
View File

@@ -0,0 +1,57 @@
name: "\U0001F41B Bug Report"
description: Report an issue or possible bug
title: "\U0001F41B BUG:"
labels: []
assignees: []
body:
- type: markdown
attributes:
value: |
### Thank you for taking the time to file a bug report!
Please fill out this form as completely as possible.
- type: input
id: version
attributes:
label: What version of `nebula` are you using?
placeholder: 0.0.0
validations:
required: true
- type: input
id: os
attributes:
label: What operating system are you using?
description: iOS and Android specific issues belong in the [mobile_nebula](https://github.com/DefinedNet/mobile_nebula) repo.
placeholder: Linux, Mac, Windows
validations:
required: true
- type: textarea
id: description
attributes:
label: Describe the Bug
description: A clear and concise description of what the bug is.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Logs from affected hosts
description: |
Provide logs from all affected hosts during the time of the issue.
Improve formatting by using <code>```</code> at the beginning and end of each log block.
validations:
required: false
- type: textarea
id: configs
attributes:
label: Config files from affected hosts
description: |
Provide config files for all affected hosts.
Improve formatting by using <code>```</code> at the beginning and end of each config file.
validations:
required: false

9
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,9 @@
blank_issues_enabled: true
contact_links:
- name: 📘 Documentation
url: https://www.defined.net/nebula/
about: Review documentation.
- name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'

View File

@@ -4,6 +4,9 @@ on:
branches: branches:
- master - master
pull_request: pull_request:
paths:
- '.github/workflows/gofmt.yml'
- '**.go'
jobs: jobs:
gofmt: gofmt:
@@ -11,19 +14,31 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v1 uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-gofmt1.18-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-gofmt1.18-
- name: Install goimports
run: |
go get golang.org/x/tools/cmd/goimports
go build golang.org/x/tools/cmd/goimports
- name: gofmt - name: gofmt
run: | run: |
if [ "$(find . -iname '*.go' | xargs gofmt -l)" ] if [ "$(find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -l)" ]
then then
find . -iname '*.go' | xargs gofmt -d find . -iname '*.go' | grep -v '\.pb\.go$' | xargs ./goimports -d
exit 1 exit 1
fi fi

View File

@@ -10,34 +10,34 @@ jobs:
name: Build Linux All name: Build Linux All
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Build - name: Build
run: | run: |
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
mkdir release mkdir release
mv build/*.tar.gz release mv build/*.tar.gz release
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v1 uses: actions/upload-artifact@v2
with: with:
name: linux-latest name: linux-latest
path: release path: release
build-windows: build-windows:
name: Build Windows amd64 name: Build Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
@@ -45,63 +45,88 @@ jobs:
- name: Build - name: Build
run: | run: |
echo $Env:GITHUB_REF.Substring(11) echo $Env:GITHUB_REF.Substring(11)
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\nebula.exe ./cmd/nebula-service mkdir build\windows-amd64
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\nebula-cert.exe ./cmd/nebula-cert $Env:GOARCH = "amd64"
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-amd64\nebula.exe ./cmd/nebula-service
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-amd64\nebula-cert.exe ./cmd/nebula-cert
mkdir build\windows-arm64
$Env:GOARCH = "arm64"
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-arm64\nebula.exe ./cmd/nebula-service
go build -trimpath -ldflags "-X main.Build=$($Env:GITHUB_REF.Substring(11))" -o build\windows-arm64\nebula-cert.exe ./cmd/nebula-cert
mkdir build\dist\windows
mv dist\windows\wintun build\dist\windows\
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v1 uses: actions/upload-artifact@v2
with: with:
name: windows-latest name: windows-latest
path: build path: build
build-darwin: build-darwin:
name: Build Darwin amd64 name: Build Universal Darwin
runs-on: macOS-latest env:
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
runs-on: macos-11
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Build - name: Import certificates
if: env.HAS_SIGNING_CREDS == 'true'
uses: Apple-Actions/import-codesign-certs@v1
with:
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
- name: Build, sign, and notarize
env:
AC_USERNAME: ${{ secrets.AC_USERNAME }}
AC_PASSWORD: ${{ secrets.AC_PASSWORD }}
run: | run: |
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/nebula-darwin-amd64.tar.gz rm -rf release
mkdir release mkdir release
mv build/*.tar.gz release make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" service build/darwin-arm64/nebula build/darwin-arm64/nebula-cert
lipo -create -output ./release/nebula ./build/darwin-amd64/nebula ./build/darwin-arm64/nebula
lipo -create -output ./release/nebula-cert ./build/darwin-amd64/nebula-cert ./build/darwin-arm64/nebula-cert
if [ -n "$AC_USERNAME" ]; then
codesign -s "10BC1FDDEB6CE753550156C0669109FAC49E4D1E" -f -v --timestamp --options=runtime -i "net.defined.nebula" ./release/nebula
codesign -s "10BC1FDDEB6CE753550156C0669109FAC49E4D1E" -f -v --timestamp --options=runtime -i "net.defined.nebula-cert" ./release/nebula-cert
fi
zip -j release/nebula-darwin.zip release/nebula-cert release/nebula
if [ -n "$AC_USERNAME" ]; then
xcrun notarytool submit ./release/nebula-darwin.zip --team-id "576H3XS7FP" --apple-id "$AC_USERNAME" --password "$AC_PASSWORD" --wait
fi
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v1 uses: actions/upload-artifact@v2
with: with:
name: darwin-latest name: darwin-latest
path: release path: ./release/*
release: release:
name: Create and Upload Release name: Create and Upload Release
needs: [build-linux, build-darwin, build-windows] needs: [build-linux, build-darwin, build-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download Linux artifacts - name: Download artifacts
uses: actions/download-artifact@v1 uses: actions/download-artifact@v2
with:
name: linux-latest
- name: Download Darwin artifacts
uses: actions/download-artifact@v1
with:
name: darwin-latest
- name: Download Windows artifacts
uses: actions/download-artifact@v1
with:
name: windows-latest
- name: Zip Windows - name: Zip Windows
run: | run: |
cd windows-latest cd windows-latest
zip nebula-windows-amd64.zip nebula.exe nebula-cert.exe cp windows-amd64/* .
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
cp windows-arm64/* .
zip -r nebula-windows-arm64.zip nebula.exe nebula-cert.exe dist
- name: Create sha256sum - name: Create sha256sum
run: | run: |
@@ -111,15 +136,23 @@ jobs:
cd $dir cd $dir
if [ "$dir" = windows-latest ] if [ "$dir" = windows-latest ]
then then
sha256sum <nebula.exe | sed 's=-$=nebula-windows-amd64.zip/nebula.exe=' sha256sum <windows-amd64/nebula.exe | sed 's=-$=nebula-windows-amd64.zip/nebula.exe='
sha256sum <nebula-cert.exe | sed 's=-$=nebula-windows-amd64.zip/nebula-cert.exe=' sha256sum <windows-amd64/nebula-cert.exe | sed 's=-$=nebula-windows-amd64.zip/nebula-cert.exe='
sha256sum <windows-arm64/nebula.exe | sed 's=-$=nebula-windows-arm64.zip/nebula.exe='
sha256sum <windows-arm64/nebula-cert.exe | sed 's=-$=nebula-windows-arm64.zip/nebula-cert.exe='
sha256sum nebula-windows-amd64.zip sha256sum nebula-windows-amd64.zip
sha256sum nebula-windows-arm64.zip
elif [ "$dir" = darwin-latest ]
then
sha256sum <nebula-darwin.zip | sed 's=-$=nebula-darwin.zip='
sha256sum <nebula | sed 's=-$=nebula-darwin.zip/nebula='
sha256sum <nebula-cert | sed 's=-$=nebula-darwin.zip/nebula-cert='
else else
for v in *.tar.gz for v in *.tar.gz
do do
sha256sum $v sha256sum $v
tar zxf $v --to-command='sh -c "sha256sum | sed s=-$='$v'/$TAR_FILENAME="' tar zxf $v --to-command='sh -c "sha256sum | sed s=-$='$v'/$TAR_FILENAME="'
done done
fi fi
) )
done | sort -k 2 >SHASUM256.txt done | sort -k 2 >SHASUM256.txt
@@ -149,15 +182,15 @@ jobs:
asset_name: SHASUM256.txt asset_name: SHASUM256.txt
asset_content_type: text/plain asset_content_type: text/plain
- name: Upload darwin-amd64 - name: Upload darwin zip
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
upload_url: ${{ steps.create_release.outputs.upload_url }} upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./darwin-latest/nebula-darwin-amd64.tar.gz asset_path: ./darwin-latest/nebula-darwin.zip
asset_name: nebula-darwin-amd64.tar.gz asset_name: nebula-darwin.zip
asset_content_type: application/gzip asset_content_type: application/zip
- name: Upload windows-amd64 - name: Upload windows-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
@@ -169,6 +202,16 @@ jobs:
asset_name: nebula-windows-amd64.zip asset_name: nebula-windows-amd64.zip
asset_content_type: application/zip asset_content_type: application/zip
- name: Upload windows-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-arm64.zip
asset_name: nebula-windows-arm64.zip
asset_content_type: application/zip
- name: Upload linux-amd64 - name: Upload linux-amd64
uses: actions/upload-release-asset@v1.0.1 uses: actions/upload-release-asset@v1.0.1
env: env:
@@ -278,3 +321,33 @@ jobs:
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
asset_name: nebula-linux-mips64le.tar.gz asset_name: nebula-linux-mips64le.tar.gz
asset_content_type: application/gzip asset_content_type: application/gzip
- name: Upload linux-mips-softfloat
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
asset_name: nebula-linux-mips-softfloat.tar.gz
asset_content_type: application/gzip
- name: Upload linux-riscv64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
asset_name: nebula-linux-riscv64.tar.gz
asset_content_type: application/gzip
- name: Upload freebsd-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
asset_name: nebula-freebsd-amd64.tar.gz
asset_content_type: application/gzip

View File

@@ -4,24 +4,38 @@ on:
branches: branches:
- master - master
pull_request: pull_request:
paths:
- '.github/workflows/smoke**'
- '**Makefile'
- '**.go'
- '**.proto'
- 'go.mod'
- 'go.sum'
jobs: jobs:
smoke: smoke:
name: Run 3 node smoke test name: Run multi node smoke test
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v1 uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.18-
- name: build - name: build
run: make run: make bin-docker
- name: setup docker image - name: setup docker image
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
@@ -31,4 +45,12 @@ jobs:
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
run: ./smoke.sh run: ./smoke.sh
- name: setup relay docker image
working-directory: ./.github/workflows/smoke
run: ./build-relay.sh
- name: run smoke relay
working-directory: ./.github/workflows/smoke
run: ./smoke-relay.sh
timeout-minutes: 10 timeout-minutes: 10

View File

@@ -1,5 +1,7 @@
FROM debian:buster FROM debian:buster
ADD ./build / ADD ./build /nebula
ENTRYPOINT ["/nebula"] WORKDIR /nebula
ENTRYPOINT ["/nebula/nebula"]

44
.github/workflows/smoke/build-relay.sh vendored Executable file
View File

@@ -0,0 +1,44 @@
#!/bin/sh
set -e -x
rm -rf ./build
mkdir ./build
(
cd build
cp ../../../../build/linux-amd64/nebula .
cp ../../../../build/linux-amd64/nebula-cert .
HOST="lighthouse1" AM_LIGHTHOUSE=true ../genconfig.sh >lighthouse1.yml <<EOF
relay:
am_relay: true
EOF
export LIGHTHOUSES="192.168.100.1 172.17.0.2:4242"
export REMOTE_ALLOW_LIST='{"172.17.0.4/32": false, "172.17.0.5/32": false}'
HOST="host2" ../genconfig.sh >host2.yml <<EOF
relay:
relays:
- 192.168.100.1
EOF
export REMOTE_ALLOW_LIST='{"172.17.0.3/32": false}'
HOST="host3" ../genconfig.sh >host3.yml
HOST="host4" ../genconfig.sh >host4.yml <<EOF
relay:
use_relays: false
EOF
../../../../nebula-cert ca -name "Smoke Test"
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
)
sudo docker build -t nebula:smoke-relay .

View File

@@ -8,17 +8,32 @@ mkdir ./build
( (
cd build cd build
cp ../../../../nebula . cp ../../../../build/linux-amd64/nebula .
cp ../../../../nebula-cert . cp ../../../../build/linux-amd64/nebula-cert .
HOST="lighthouse1" AM_LIGHTHOUSE=true ../genconfig.sh >lighthouse1.yml HOST="lighthouse1" \
HOST="host2" LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" ../genconfig.sh >host2.yml AM_LIGHTHOUSE=true \
HOST="host3" LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" ../genconfig.sh >host3.yml ../genconfig.sh >lighthouse1.yml
./nebula-cert ca -name "Smoke Test" HOST="host2" \
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24" LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
./nebula-cert sign -name "host2" -ip "192.168.100.2/24" ../genconfig.sh >host2.yml
./nebula-cert sign -name "host3" -ip "192.168.100.3/24"
HOST="host3" \
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
INBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
../genconfig.sh >host3.yml
HOST="host4" \
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
../genconfig.sh >host4.yml
../../../../nebula-cert ca -name "Smoke Test"
../../../../nebula-cert sign -name "lighthouse1" -groups "lighthouse,lighthouse1" -ip "192.168.100.1/24"
../../../../nebula-cert sign -name "host2" -groups "host,host2" -ip "192.168.100.2/24"
../../../../nebula-cert sign -name "host3" -groups "host,host3" -ip "192.168.100.3/24"
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
) )
docker build -t nebula:smoke . sudo docker build -t nebula:smoke .

View File

@@ -2,6 +2,7 @@
set -e set -e
FIREWALL_ALL='[{"port": "any", "proto": "any", "host": "any"}]'
if [ "$STATIC_HOSTS" ] || [ "$LIGHTHOUSES" ] if [ "$STATIC_HOSTS" ] || [ "$LIGHTHOUSES" ]
then then
@@ -32,13 +33,14 @@ lighthouse_hosts() {
cat <<EOF cat <<EOF
pki: pki:
ca: /ca.crt ca: ca.crt
cert: /${HOST}.crt cert: ${HOST}.crt
key: /${HOST}.key key: ${HOST}.key
lighthouse: lighthouse:
am_lighthouse: ${AM_LIGHTHOUSE:-false} am_lighthouse: ${AM_LIGHTHOUSE:-false}
hosts: $(lighthouse_hosts) hosts: $(lighthouse_hosts)
remote_allow_list: ${REMOTE_ALLOW_LIST}
listen: listen:
host: 0.0.0.0 host: 0.0.0.0
@@ -48,13 +50,8 @@ tun:
dev: ${TUN_DEV:-nebula1} dev: ${TUN_DEV:-nebula1}
firewall: firewall:
outbound: outbound: ${OUTBOUND:-$FIREWALL_ALL}
- port: any inbound: ${INBOUND:-$FIREWALL_ALL}
proto: any
host: any
inbound: $(test -t 0 || cat)
- port: any
proto: any
host: any
EOF EOF

85
.github/workflows/smoke/smoke-relay.sh vendored Executable file
View File

@@ -0,0 +1,85 @@
#!/bin/bash
set -e -x
set -o pipefail
mkdir -p logs
cleanup() {
echo
echo " *** cleanup"
echo
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
fi
}
trap cleanup EXIT
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1
set +x
echo
echo " *** Testing ping from lighthouse1"
echo
set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3
sudo docker exec lighthouse1 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host2"
echo
set -x
sudo docker exec host2 ping -c1 192.168.100.1
# Should fail because no relay configured in this direction
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
set +x
echo
echo " *** Testing ping from host3"
echo
set -x
sudo docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2
sudo docker exec host3 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host4"
echo
set -x
sudo docker exec host4 ping -c1 192.168.100.1
# Should fail because relays not allowed
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
sudo docker exec host4 ping -c1 192.168.100.3
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]
then
echo "nebula still running after SIGTERM sent" >&2
exit 1
fi

View File

@@ -1,12 +1,37 @@
#!/bin/sh #!/bin/bash
set -e -x set -e -x
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml & set -o pipefail
mkdir -p logs
cleanup() {
echo
echo " *** cleanup"
echo
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
fi
}
trap cleanup EXIT
sudo docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
sudo docker run --name host2 --rm nebula:smoke -config host2.yml -test
sudo docker run --name host3 --rm nebula:smoke -config host3.yml -test
sudo docker run --name host4 --rm nebula:smoke -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1 sleep 1
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml & sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1 sleep 1
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml & sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1 sleep 1
set +x set +x
@@ -14,21 +39,55 @@ echo
echo " *** Testing ping from lighthouse1" echo " *** Testing ping from lighthouse1"
echo echo
set -x set -x
docker exec lighthouse1 ping -c1 192.168.100.2 sudo docker exec lighthouse1 ping -c1 192.168.100.2
docker exec lighthouse1 ping -c1 192.168.100.3 sudo docker exec lighthouse1 ping -c1 192.168.100.3
set +x set +x
echo echo
echo " *** Testing ping from host2" echo " *** Testing ping from host2"
echo echo
set -x set -x
docker exec host2 ping -c1 192.168.100.1 sudo docker exec host2 ping -c1 192.168.100.1
docker exec host2 ping -c1 192.168.100.3 # Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
set +x set +x
echo echo
echo " *** Testing ping from host3" echo " *** Testing ping from host3"
echo echo
set -x set -x
docker exec host3 ping -c1 192.168.100.1 sudo docker exec host3 ping -c1 192.168.100.1
docker exec host3 ping -c1 192.168.100.2 sudo docker exec host3 ping -c1 192.168.100.2
set +x
echo
echo " *** Testing ping from host4"
echo
set -x
sudo docker exec host4 ping -c1 192.168.100.1
# Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
echo " *** Testing conntrack"
echo
set -x
# host2 can ping host3 now that host3 pinged it first
sudo docker exec host2 ping -c1 192.168.100.3
# host4 can ping host2 once conntrack established
sudo docker exec host2 ping -c1 192.168.100.4
sudo docker exec host4 ping -c1 192.168.100.2
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]
then
echo "nebula still running after SIGTERM sent" >&2
exit 1
fi

View File

@@ -4,6 +4,13 @@ on:
branches: branches:
- master - master
pull_request: pull_request:
paths:
- '.github/workflows/test.yml'
- '**Makefile'
- '**.go'
- '**.proto'
- 'go.mod'
- 'go.sum'
jobs: jobs:
test-linux: test-linux:
@@ -11,14 +18,21 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v1 uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.18-
- name: Build - name: Build
run: make all run: make all
@@ -26,22 +40,38 @@ jobs:
- name: Test - name: Test
run: make test run: make test
- name: End 2 end
run: make e2evv
- uses: actions/upload-artifact@v3
with:
name: e2e packet flow
path: e2e/mermaid/
if-no-files-found: warn
test: test:
name: Build and test on ${{ matrix.os }} name: Build and test on ${{ matrix.os }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [windows-latest, macOS-latest] os: [windows-latest, macos-11]
steps: steps:
- name: Set up Go 1.13 - name: Set up Go 1.18
uses: actions/setup-go@v1 uses: actions/setup-go@v2
with: with:
go-version: 1.13 go-version: 1.18
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v1 uses: actions/checkout@v2
- uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.18-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.18-
- name: Build nebula - name: Build nebula
run: go build ./cmd/nebula run: go build ./cmd/nebula
@@ -51,3 +81,12 @@ jobs:
- name: Test - name: Test
run: go test -v ./... run: go test -v ./...
- name: End 2 end
run: make e2evv
- uses: actions/upload-artifact@v3
with:
name: e2e packet flow
path: e2e/mermaid/
if-no-files-found: warn

1
.gitignore vendored
View File

@@ -10,3 +10,4 @@
/cpu.pprof /cpu.pprof
/build /build
/*.tar.gz /*.tar.gz
/e2e/mermaid/

View File

@@ -7,6 +7,357 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
## [1.6.1] - 2022-09-26
### Fixed
- Refuse to process underlay packets received from overlay IPs. This prevents
confusion on hosts that have unsafe routes configured. (#741)
- The ssh `reload` command did not work on Windows, since it relied on sending
a SIGHUP signal internally. This has been fixed. (#725)
- A regression in v1.5.2 that broke unsafe routes on Mobile clients has been
fixed. (#729)
## [1.6.0] - 2022-06-30
### Added
- Experimental: nebula clients can be configured to act as relays for other nebula clients.
Primarily useful when stubborn NATs make a direct tunnel impossible. (#678)
- Configuration option to report manually specified `ip:port`s to lighthouses. (#650)
- Windows arm64 build. (#638)
- `punchy` and most `lighthouse` config options now support hot reloading. (#649)
### Changed
- Build against go 1.18. (#656)
- Promoted `routines` config from experimental to supported feature. (#702)
- Dependencies updated. (#664)
### Fixed
- Packets destined for the same host that sent it will be returned on MacOS.
This matches the default behavior of other operating systems. (#501)
- `unsafe_route` configuration will no longer crash on Windows. (#648)
- A few panics that were introduced in 1.5.x. (#657, #658, #675)
### Security
- You can set `listen.send_recv_error` to control the conditions in which
`recv_error` messages are sent. Sending these messages can expose the fact
that Nebula is running on a host, but it speeds up re-handshaking. (#670)
### Removed
- `x509` config stanza support has been removed. (#685)
## [1.5.2] - 2021-12-14
### Added
- Warn when a non lighthouse node does not have lighthouse hosts configured. (#587)
### Changed
- No longer fatals if expired CA certificates are present in `pki.ca`, as long as 1 valid CA is present. (#599)
- `nebula-cert` will now enforce ipv4 addresses. (#604)
- Warn on macOS if an unsafe route cannot be created due to a collision with an
existing route. (#610)
- Warn if you set a route MTU on platforms where we don't support it. (#611)
### Fixed
- Rare race condition when tearing down a tunnel due to `recv_error` and sending packets on another thread. (#590)
- Bug in `routes` and `unsafe_routes` handling that was introduced in 1.5.0. (#595)
- `-test` mode no longer results in a crash. (#602)
### Removed
- `x509.ca` config alias for `pki.ca`. (#604)
### Security
- Upgraded `golang.org/x/crypto` to address an issue which allowed unauthenticated clients to cause a panic in SSH
servers. (#603)
## 1.5.1 - 2021-12-13
(This release was skipped due to discovering #610 and #611 after the tag was
created.)
## [1.5.0] - 2021-11-11
### Added
- SSH `print-cert` has a new `-raw` flag to get the PEM representation of a certificate. (#483)
- New build architecture: Linux `riscv64`. (#542)
- New experimental config option `remote_allow_ranges`. (#540)
- New config option `pki.disconnect_invalid` that will tear down tunnels when they become invalid (through expiry or
removal of root trust). Default is `false`. Note, this will not currently recognize if a remote has changed
certificates since the last handshake. (#370)
- New config option `unsafe_routes.<route>.metric` will set a metric for a specific unsafe route. It's useful if you have
more than one identical route and want to prefer one against the other. (#353)
### Changed
- Build against go 1.17. (#553)
- Build with `CGO_ENABLED=0` set, to create more portable binaries. This could
have an effect on DNS resolution if you rely on anything non-standard. (#421)
- Windows now uses the [wintun](https://www.wintun.net/) driver which does not require installation. This driver
is a large improvement over the TAP driver that was used in previous versions. If you had a previous version
of `nebula` running, you will want to disable the tap driver in Control Panel, or uninstall the `tap0901` driver
before running this version. (#289)
- Darwin binaries are now universal (works on both amd64 and arm64), signed, and shipped in a notarized zip file.
`nebula-darwin.zip` will be the only darwin release artifact. (#571)
- Darwin uses syscalls and AF_ROUTE to configure the routing table, instead of
using `/sbin/route`. Setting `tun.dev` is now allowed on Darwin as well, it
must be in the format `utun[0-9]+` or it will be ignored. (#163)
### Deprecated
- The `preferred_ranges` option has been supported as a replacement for
`local_range` since v1.0.0. It has now been documented and `local_range`
has been officially deprecated. (#541)
### Fixed
- Valid recv_error packets were incorrectly marked as "spoofing" and ignored. (#482)
- SSH server handles single `exec` requests correctly. (#483)
- Signing a certificate with `nebula-cert sign` now verifies that the supplied
ca-key matches the ca-crt. (#503)
- If `preferred_ranges` (or the deprecated `local_range`) is configured, we
will immediately switch to a preferred remote address after the reception of
a handshake packet (instead of waiting until 1,000 packets have been sent).
(#532)
- A race condition when `punchy.respond` is enabled and ensures the correct
vpn ip is sent a punch back response in highly queried node. (#566)
- Fix a rare crash during handshake due to a race condition. (#535)
## [1.4.0] - 2021-05-11
### Added
- Ability to output qr code images in `print`, `ca`, and `sign` modes for `nebula-cert`.
This is useful when configuring mobile clients. (#297)
- Experimental: Nebula can now do work on more than 2 cpu cores in send and receive paths via
the new `routines` config option. (#382, #391, #395)
- ICMP ping requests can be responded to when the `tun.disabled` is `true`.
This is useful so that you can "ping" a lighthouse running in this mode. (#342)
- Run smoke tests via `make smoke-docker`. (#287)
- More reported stats, udp memory use on linux, build version (when using Prometheus), firewall,
handshake, and cached packet stats. (#390, #405, #450, #453)
- IPv6 support for the underlay network. (#369)
- End to end testing, run with `make e2e`. (#425, #427, #428)
### Changed
- Darwin will now log stdout/stderr to a file when using `-service` mode. (#303)
- Example systemd unit file now better arranged startup order when using `sshd`
and other fixes. (#317, #412, #438)
- Reduced memory utilization/garbage collection. (#320, #323, #340)
- Reduced CPU utilization. (#329)
- Build against go 1.16. (#381)
- Refactored handshakes to improve performance and correctness. (#401, #402, #404, #416, #451)
- Improved roaming support for mobile clients. (#394, #457)
- Lighthouse performance and correctness improvements. (#406, #418, #429, #433, #437, #442, #449)
- Better ordered startup to enable `sshd`, `stats`, and `dns` subsystems to listen on
the nebula interface. (#375)
### Fixed
- No longer report handshake packets as `lost` in stats. (#331)
- Error handling in the `cert` package. (#339, #373)
- Orphaned pending hostmap entries are cleaned up. (#344)
- Most known data races are now resolved. (#396, #400, #424)
- Refuse to run a lighthouse on an ephemeral port. (#399)
- Removed the global references. (#423, #426, #446)
- Reloading via ssh command avoids a panic. (#447)
- Shutdown is now performed in a cleaner way. (#448)
- Logs will now find their way to Windows event viewer when running under `-service` mode
in Windows. (#443)
## [1.3.0] - 2020-09-22
### Added
- You can emit statistics about non-message packets by setting the option
`stats.message_metrics`. You can similarly emit detailed statistics about
lighthouse packets by setting the option `stats.lighthouse_metrics`. See
the example config for more details. (#230)
- We now support freebsd/amd64. This is experimental, please give us feedback.
(#103)
- We now release a binary for `linux/mips-softfloat` which has also been
stripped to reduce filesize and hopefully have a better chance on running on
small mips devices. (#231)
- You can set `tun.disabled` to true to run a standalone lighthouse without a
tun device (and thus, without root). (#269)
- You can set `logging.disable_timestamp` to remove timestamps from log lines,
which is useful when output is redirected to a logging system that already
adds timestamps. (#288)
### Changed
- Handshakes should now trigger faster, as we try to be proactive with sending
them instead of waiting for the next timer tick in most cases. (#246, #265)
- Previously, we would drop the conntrack table whenever firewall rules were
changed during a SIGHUP. Now, we will maintain the table and just validate
that an entry still matches with the new rule set. (#233)
- Debug logs for firewall drops now include the reason. (#220, #239)
- Logs for handshakes now include the fingerprint of the remote host. (#262)
- Config item `pki.blacklist` is now `pki.blocklist`. (#272)
- Better support for older Linux kernels. We now only set `SO_REUSEPORT` if
`tun.routines` is greater than 1 (default is 1). We also only use the
`recvmmsg` syscall if `listen.batch` is greater than 1 (default is 64).
(#275)
- It is possible to run Nebula as a library inside of another process now.
Note that this is still experimental and the internal APIs around this might
change in minor version releases. (#279)
### Deprecated
- `pki.blacklist` is deprecated in favor of `pki.blocklist` with the same
functionality. Existing configs will continue to load for this release to
allow for migrations. (#272)
### Fixed
- `advmss` is now set correctly for each route table entry when `tun.routes`
is configured to have some routes with higher MTU. (#245)
- Packets that arrive on the tun device with an unroutable destination IP are
now dropped correctly, instead of wasting time making queries to the
lighthouses for IP `0.0.0.0` (#267)
## [1.2.0] - 2020-04-08
### Added
- Add `logging.timestamp_format` config option. The primary purpose of this
change is to allow logging timestamps with millisecond precision. (#187)
- Support `unsafe_routes` on Windows. (#184)
- Add `lighthouse.remote_allow_list` to filter which subnets we will use to
handshake with other hosts. See the example config for more details. (#217)
- Add `lighthouse.local_allow_list` to filter which local IP addresses and/or
interfaces we advertise to the lighthouses. See the example config for more
details. (#217)
- Wireshark dissector plugin. Add this file in `dist/wireshark` to your
Wireshark plugins folder to see Nebula packet headers decoded. (#216)
- systemd unit for Arch, so it can be built entirely from this repo. (#216)
### Changed
- Added a delay to punching via lighthouse signal to deal with race conditions
in some linux conntrack implementations. (#210)
See deprecated, this also adds a new `punchy.delay` option that defaults to `1s`.
- Validate all `lighthouse.hosts` and `static_host_map` VPN IPs are in the
subnet defined in our cert. Exit with a fatal error if they are not in our
subnet, as this is an invalid configuration (we will not have the proper
routes set up to communicate with these hosts). (#170)
- Use absolute paths to system binaries on macOS and Windows. (#191)
- Add configuration options for `handshakes`. This includes options to tweak
`try_interval`, `retries` and `wait_rotation`. See example config for
descriptions. (#179)
- Allow `-config` file to not end in `.yaml` or `yml`. Useful when using
`-test` and automated tools like Ansible that create temporary files without
suffixes. (#189)
- The config test mode, `-test`, is now more thorough and catches more parsing
issues. (#177)
- Various documentation and example fixes. (#196)
- Improved log messages. (#181, #200)
- Dependencies updated. (#188)
### Deprecated
- `punchy`, `punch_back` configuration options have been collapsed under the
now top level `punchy` config directive. (#210)
`punchy.punch` - This is the old `punchy` option. Should we perform NAT hole
punching (default false)?
`punchy.respond` - This is the old `punch_back` option. Should we respond to
hole punching by hole punching back (default false)?
### Fixed
- Reduce memory allocations when not using `unsafe_routes`. (#198)
- Ignore packets from self to self. (#192)
- MTU fixed for `unsafe_routes`. (#209)
## [1.1.0] - 2020-01-17 ## [1.1.0] - 2020-01-17
### Added ### Added
@@ -47,6 +398,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Initial public release. - Initial public release.
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.1.0...HEAD [Unreleased]: https://github.com/slackhq/nebula/compare/v1.6.1...HEAD
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
[1.5.2]: https://github.com/slackhq/nebula/releases/tag/v1.5.2
[1.5.0]: https://github.com/slackhq/nebula/releases/tag/v1.5.0
[1.4.0]: https://github.com/slackhq/nebula/releases/tag/v1.4.0
[1.3.0]: https://github.com/slackhq/nebula/releases/tag/v1.3.0
[1.2.0]: https://github.com/slackhq/nebula/releases/tag/v1.2.0
[1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0 [1.1.0]: https://github.com/slackhq/nebula/releases/tag/v1.1.0
[1.0.0]: https://github.com/slackhq/nebula/releases/tag/v1.0.0 [1.0.0]: https://github.com/slackhq/nebula/releases/tag/v1.0.0

115
Makefile
View File

@@ -1,7 +1,34 @@
GOMINVERSION = 1.18
NEBULA_CMD_PATH = "./cmd/nebula" NEBULA_CMD_PATH = "./cmd/nebula"
BUILD_NUMBER ?= dev+$(shell date -u '+%Y%m%d%H%M%S')
GO111MODULE = on GO111MODULE = on
export GO111MODULE export GO111MODULE
CGO_ENABLED = 0
export CGO_ENABLED
# Set up OS specific bits
ifeq ($(OS),Windows_NT)
#TODO: we should be able to ditch awk as well
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
NEBULA_CMD_SUFFIX = .exe
NULL_FILE = nul
else
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
NEBULA_CMD_SUFFIX =
NULL_FILE = /dev/null
endif
# Only defined the build number if we haven't already
ifndef BUILD_NUMBER
ifeq ($(shell git describe --exact-match 2>$(NULL_FILE)),)
BUILD_NUMBER = $(shell git describe --abbrev=0 --match "v*" | cut -dv -f2)-$(shell git branch --show-current)-$(shell git describe --long --dirty | cut -d- -f2-)
else
BUILD_NUMBER = $(shell git describe --exact-match --dirty | cut -dv -f2)
endif
endif
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
ALL_LINUX = linux-amd64 \ ALL_LINUX = linux-amd64 \
linux-386 \ linux-386 \
@@ -13,11 +40,34 @@ ALL_LINUX = linux-amd64 \
linux-mips \ linux-mips \
linux-mipsle \ linux-mipsle \
linux-mips64 \ linux-mips64 \
linux-mips64le linux-mips64le \
linux-mips-softfloat \
linux-riscv64
ALL = $(ALL_LINUX) \ ALL = $(ALL_LINUX) \
darwin-amd64 \ darwin-amd64 \
windows-amd64 darwin-arm64 \
freebsd-amd64 \
windows-amd64 \
windows-arm64
e2e:
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
e2ev: TEST_FLAGS = -v
e2ev: e2e
e2evv: TEST_ENV += TEST_LOGS=1
e2evv: e2ev
e2evvv: TEST_ENV += TEST_LOGS=2
e2evvv: e2ev
e2evvvv: TEST_ENV += TEST_LOGS=3
e2evvvv: e2ev
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
e2e-bench: e2e
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert) all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
@@ -25,31 +75,45 @@ release: $(ALL:%=build/nebula-%.tar.gz)
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz) release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
release-freebsd: build/nebula-freebsd-amd64.tar.gz
BUILD_ARGS = -trimpath
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
mv $? . mv $? .
bin-windows-arm64: build/windows-arm64/nebula.exe build/windows-arm64/nebula-cert.exe
mv $? .
bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
mv $? . mv $? .
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
mv $? .
bin: bin:
go build -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" -o ./nebula ${NEBULA_CMD_PATH} go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
go build -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" -o ./nebula-cert ./cmd/nebula-cert go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
install: install:
go install -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" ${NEBULA_CMD_PATH} go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
go install -trimpath -ldflags "-X main.Build=$(BUILD_NUMBER)" ./cmd/nebula-cert go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*))
build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
# Build an extra small binary for mips-softfloat
build/linux-mips-softfloat/%: LDFLAGS += -s -w
build/%/nebula: .FORCE build/%/nebula: .FORCE
GOOS=$(firstword $(subst -, , $*)) \ GOOS=$(firstword $(subst -, , $*)) \
GOARCH=$(word 2, $(subst -, ,$*)) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
GOARM=$(word 3, $(subst -, ,$*)) \ go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
go build -trimpath -o $@ -ldflags "-X main.Build=$(BUILD_NUMBER)" ${NEBULA_CMD_PATH}
build/%/nebula-cert: .FORCE build/%/nebula-cert: .FORCE
GOOS=$(firstword $(subst -, , $*)) \ GOOS=$(firstword $(subst -, , $*)) \
GOARCH=$(word 2, $(subst -, ,$*)) \ GOARCH=$(word 2, $(subst -, ,$*)) $(GOENV) \
GOARM=$(word 3, $(subst -, ,$*)) \ go build $(BUILD_ARGS) -o $@ -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
go build -trimpath -o $@ -ldflags "-X main.Build=$(BUILD_NUMBER)" ./cmd/nebula-cert
build/%/nebula.exe: build/%/nebula build/%/nebula.exe: build/%/nebula
mv $< $@ mv $< $@
@@ -87,20 +151,33 @@ bench-cpu-long:
proto: nebula.pb.go cert/cert.pb.go proto: nebula.pb.go cert/cert.pb.go
nebula.pb.go: nebula.proto .FORCE nebula.pb.go: nebula.proto .FORCE
go build github.com/golang/protobuf/protoc-gen-go go build github.com/gogo/protobuf/protoc-gen-gogofaster
PATH="$(PWD):$(PATH)" protoc --go_out=. $< PATH="$(CURDIR):$(PATH)" protoc --gogofaster_out=paths=source_relative:. $<
rm protoc-gen-go rm protoc-gen-gogofaster
cert/cert.pb.go: cert/cert.proto .FORCE cert/cert.pb.go: cert/cert.proto .FORCE
$(MAKE) -C cert cert.pb.go $(MAKE) -C cert cert.pb.go
service: service:
@echo > /dev/null @echo > $(NULL_FILE)
$(eval NEBULA_CMD_PATH := "./cmd/nebula-service") $(eval NEBULA_CMD_PATH := "./cmd/nebula-service")
ifeq ($(words $(MAKECMDGOALS)),1) ifeq ($(words $(MAKECMDGOALS)),1)
$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory @$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
endif endif
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
smoke-docker: bin-docker
cd .github/workflows/smoke/ && ./build.sh
cd .github/workflows/smoke/ && ./smoke.sh
smoke-relay-docker: bin-docker
cd .github/workflows/smoke/ && ./build-relay.sh
cd .github/workflows/smoke/ && ./smoke-relay.sh
smoke-docker-race: BUILD_ARGS = -race
smoke-docker-race: smoke-docker
.FORCE: .FORCE:
.PHONY: test test-cov-html bench bench-cpu bench-cpu-long bin proto release service .PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
.DEFAULT_GOAL := bin .DEFAULT_GOAL := bin

View File

@@ -1,7 +1,6 @@
## What is Nebula? ## What is Nebula?
Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security.
It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, and Windows. It lets you seamlessly connect computers anywhere in the world. Nebula is portable, and runs on Linux, OSX, Windows, iOS, and Android.
(Also: keep this quiet, but we have an early prototype running on iOS).
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers. It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
Nebula incorporates a number of existing concepts like encryption, security groups, certificates, Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
@@ -9,9 +8,39 @@ and tunneling, and each of those individual pieces existed before Nebula in vari
What makes Nebula different to existing offerings is that it brings all of these ideas together, What makes Nebula different to existing offerings is that it brings all of these ideas together,
resulting in a sum that is greater than its individual parts. resulting in a sum that is greater than its individual parts.
Further documentation can be found [here](https://www.defined.net/nebula/).
You can read more about Nebula [here](https://medium.com/p/884110a5579). You can read more about Nebula [here](https://medium.com/p/884110a5579).
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU) You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU).
## Supported Platforms
#### Desktop and Server
Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for downloads or see the [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) section.
- Linux - 64 and 32 bit, arm, and others
- Windows
- MacOS
- Freebsd
#### Distribution Packages
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
```
$ sudo pacman -S nebula
```
- [Fedora Linux](https://copr.fedorainfracloud.org/coprs/jdoss/nebula/)
```
$ sudo dnf copr enable jdoss/nebula
$ sudo dnf install nebula
```
#### Mobile
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&amp;itscg=30200)
- [Android](https://play.google.com/store/apps/details?id=net.defined.mobile_nebula&pcampaignid=pcampaignidMKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1)
## Technical Overview ## Technical Overview
@@ -21,15 +50,15 @@ Nebula's user-defined groups allow for provider agnostic traffic filtering betwe
Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs. Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme. Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
Nebula uses elliptic curve Diffie-Hellman key exchange, and AES-256-GCM in its default configuration. Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
Nebula was created to provide a mechanism for groups hosts to communicate securely, even across the internet, while enabling expressive firewall definitions similar in style to cloud security groups. Nebula was created to provide a mechanism for groups of hosts to communicate securely, even across the internet, while enabling expressive firewall definitions similar in style to cloud security groups.
## Getting started (quickly) ## Getting started (quickly)
To set up a Nebula network, you'll need: To set up a Nebula network, you'll need:
#### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use. #### 1. The [Nebula binaries](https://github.com/slackhq/nebula/releases) or [Distribution Packages](https://github.com/slackhq/nebula#distribution-packages) for your specific platform. Specifically you'll need `nebula-cert` and the specific nebula binary for each platform you use.
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse. #### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.

361
allow_list.go Normal file
View File

@@ -0,0 +1,361 @@
package nebula
import (
"fmt"
"net"
"regexp"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
)
type AllowList struct {
// The values of this cidrTree are `bool`, signifying allow/deny
cidrTree *cidr.Tree6
}
type RemoteAllowList struct {
AllowList *AllowList
// Inside Range Specific, keys of this tree are inside CIDRs and values
// are *AllowList
insideAllowLists *cidr.Tree6
}
type LocalAllowList struct {
AllowList *AllowList
// To avoid ambiguity, all rules must be true, or all rules must be false.
nameRules []AllowListNameRule
}
type AllowListNameRule struct {
Name *regexp.Regexp
Allow bool
}
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) {
var nameRules []AllowListNameRule
handleKey := func(key string, value interface{}) (bool, error) {
if key == "interfaces" {
var err error
nameRules, err = getAllowListInterfaces(k, value)
if err != nil {
return false, err
}
return true, nil
}
return false, nil
}
al, err := newAllowListFromConfig(c, k, handleKey)
if err != nil {
return nil, err
}
return &LocalAllowList{AllowList: al, nameRules: nameRules}, nil
}
func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllowList, error) {
al, err := newAllowListFromConfig(c, k, nil)
if err != nil {
return nil, err
}
remoteAllowRanges, err := getRemoteAllowRanges(c, rangesKey)
if err != nil {
return nil, err
}
return &RemoteAllowList{AllowList: al, insideAllowLists: remoteAllowRanges}, nil
}
// If the handleKey func returns true, the rest of the parsing is skipped
// for this key. This allows parsing of special values like `interfaces`.
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
r := c.Get(k)
if r == nil {
return nil, nil
}
return newAllowList(k, r, handleKey)
}
// If the handleKey func returns true, the rest of the parsing is skipped
// for this key. This allows parsing of special values like `interfaces`.
func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
rawMap, ok := raw.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
}
tree := cidr.NewTree6()
// Keep track of the rules we have added for both ipv4 and ipv6
type allowListRules struct {
firstValue bool
allValuesMatch bool
defaultSet bool
allValues bool
}
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
if handleKey != nil {
handled, err := handleKey(rawCIDR, rawValue)
if err != nil {
return nil, err
}
if handled {
continue
}
}
value, ok := rawValue.(bool)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
}
_, ipNet, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
// TODO: should we error on duplicate CIDRs in the config?
tree.AddCIDR(ipNet, value)
maskBits, maskSize := ipNet.Mask.Size()
var rules *allowListRules
if maskSize == 32 {
rules = &rules4
} else {
rules = &rules6
}
if rules.firstValue {
rules.allValues = value
rules.firstValue = false
} else {
if value != rules.allValues {
rules.allValuesMatch = false
}
}
// Check if this is 0.0.0.0/0 or ::/0
if maskBits == 0 {
rules.defaultSet = true
}
}
if !rules4.defaultSet {
if rules4.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
tree.AddCIDR(zeroCIDR, !rules4.allValues)
} else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
}
}
if !rules6.defaultSet {
if rules6.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("::/0")
tree.AddCIDR(zeroCIDR, !rules6.allValues)
} else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
}
}
return &AllowList{cidrTree: tree}, nil
}
func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
var nameRules []AllowListNameRule
rawRules, ok := v.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
}
firstEntry := true
var allValues bool
for rawName, rawAllow := range rawRules {
name, ok := rawName.(string)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
}
allow, ok := rawAllow.(bool)
if !ok {
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
}
nameRE, err := regexp.Compile("^" + name + "$")
if err != nil {
return nil, fmt.Errorf("config `%s.interfaces` has invalid key: %s: %v", k, name, err)
}
nameRules = append(nameRules, AllowListNameRule{
Name: nameRE,
Allow: allow,
})
if firstEntry {
allValues = allow
firstEntry = false
} else {
if allow != allValues {
return nil, fmt.Errorf("config `%s.interfaces` values must all be the same true/false value", k)
}
}
}
return nameRules, nil
}
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
value := c.Get(k)
if value == nil {
return nil, nil
}
remoteAllowRanges := cidr.NewTree6()
rawMap, ok := value.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
}
for rawKey, rawValue := range rawMap {
rawCIDR, ok := rawKey.(string)
if !ok {
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
}
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
if err != nil {
return nil, err
}
_, ipNet, err := net.ParseCIDR(rawCIDR)
if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
}
remoteAllowRanges.AddCIDR(ipNet, allowList)
}
return remoteAllowRanges, nil
}
func (al *AllowList) Allow(ip net.IP) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContains(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV4(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *LocalAllowList) Allow(ip net.IP) bool {
if al == nil {
return true
}
return al.AllowList.Allow(ip)
}
func (al *LocalAllowList) AllowName(name string) bool {
if al == nil || len(al.nameRules) == 0 {
return true
}
for _, rule := range al.nameRules {
if rule.Name.MatchString(name) {
return rule.Allow
}
}
// If no rules match, return the default, which is the inverse of the rules
return !al.nameRules[0].Allow
}
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
if al == nil {
return true
}
return al.AllowList.Allow(ip)
}
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
if !al.getInsideAllowList(vpnIp).Allow(ip) {
return false
}
return al.AllowList.Allow(ip)
}
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
return false
}
return al.AllowList.AllowIpV4(ip)
}
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
return false
}
return al.AllowList.AllowIpV6(hi, lo)
}
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
if al.insideAllowLists != nil {
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
if inside != nil {
return inside.(*AllowList)
}
}
return nil
}

145
allow_list_test.go Normal file
View File

@@ -0,0 +1,145 @@
package nebula
import (
"net"
"regexp"
"testing"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert"
)
func TestNewAllowListFromConfig(t *testing.T) {
l := test.NewLogger()
c := config.NewC(l)
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0": true,
}
r, err := newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
assert.Nil(t, r)
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": "abc",
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
c.Settings["allowlist"] = map[interface{}]interface{}{
"192.168.0.0/16": true,
"10.0.0.0/8": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
"fd00::/8": true,
"fd00:fd00::/16": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
c.Settings["allowlist"] = map[interface{}]interface{}{
"0.0.0.0/0": true,
"10.0.0.0/8": false,
"10.42.42.0/24": true,
"::/0": false,
"fd00::/8": true,
"fd00:fd00::/16": false,
}
r, err = newAllowListFromConfig(c, "allowlist", nil)
if assert.NoError(t, err) {
assert.NotNil(t, r)
}
// Test interface names
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: "foo",
},
}
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
`eth.*`: true,
},
}
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
c.Settings["allowlist"] = map[interface{}]interface{}{
"interfaces": map[interface{}]interface{}{
`docker.*`: false,
},
}
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
if assert.NoError(t, err) {
assert.NotNil(t, lr)
}
}
func TestAllowList_Allow(t *testing.T) {
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
tree := cidr.NewTree6()
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
tree.AddCIDR(cidr.Parse("::1/128"), true)
tree.AddCIDR(cidr.Parse("::2/128"), false)
al := &AllowList{cidrTree: tree}
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1")))
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4")))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42")))
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
}
func TestLocalAllowList_AllowName(t *testing.T) {
assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0"))
rules := []AllowListNameRule{
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
{Name: regexp.MustCompile("^tun.*$"), Allow: false},
}
al := &LocalAllowList{nameRules: rules}
assert.Equal(t, false, al.AllowName("docker0"))
assert.Equal(t, false, al.AllowName("tun0"))
assert.Equal(t, true, al.AllowName("eth0"))
rules = []AllowListNameRule{
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
{Name: regexp.MustCompile("^ens.*$"), Allow: true},
}
al = &LocalAllowList{nameRules: rules}
assert.Equal(t, false, al.AllowName("docker0"))
assert.Equal(t, true, al.AllowName("eth0"))
assert.Equal(t, true, al.AllowName("ens5"))
}

View File

@@ -26,7 +26,7 @@ func NewBits(bits uint64) *Bits {
} }
} }
func (b *Bits) Check(i uint64) bool { func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool {
// If i is the next number, return true. // If i is the next number, return true.
if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) { if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) {
return true return true
@@ -47,7 +47,7 @@ func (b *Bits) Check(i uint64) bool {
return false return false
} }
func (b *Bits) Update(i uint64) bool { func (b *Bits) Update(l *logrus.Logger, i uint64) bool {
// If i is the next number, return true and update current. // If i is the next number, return true and update current.
if i == b.current+1 { if i == b.current+1 {
// Report missed packets, we can only understand what was missed after the first window has been gone through // Report missed packets, we can only understand what was missed after the first window has been gone through

View File

@@ -3,10 +3,12 @@ package nebula
import ( import (
"testing" "testing"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestBits(t *testing.T) { func TestBits(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
// make sure it is the right size // make sure it is the right size
@@ -14,46 +16,46 @@ func TestBits(t *testing.T) {
// This is initialized to zero - receive one. This should work. // This is initialized to zero - receive one. This should work.
assert.True(t, b.Check(1)) assert.True(t, b.Check(l, 1))
u := b.Update(1) u := b.Update(l, 1)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 1, b.current) assert.EqualValues(t, 1, b.current)
g := []bool{false, true, false, false, false, false, false, false, false, false} g := []bool{false, true, false, false, false, false, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Receive two // Receive two
assert.True(t, b.Check(2)) assert.True(t, b.Check(l, 2))
u = b.Update(2) u = b.Update(l, 2)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 2, b.current) assert.EqualValues(t, 2, b.current)
g = []bool{false, true, true, false, false, false, false, false, false, false} g = []bool{false, true, true, false, false, false, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Receive two again - it will fail // Receive two again - it will fail
assert.False(t, b.Check(2)) assert.False(t, b.Check(l, 2))
u = b.Update(2) u = b.Update(l, 2)
assert.False(t, u) assert.False(t, u)
assert.EqualValues(t, 2, b.current) assert.EqualValues(t, 2, b.current)
// Jump ahead to 15, which should clear everything and set the 6th element // Jump ahead to 15, which should clear everything and set the 6th element
assert.True(t, b.Check(15)) assert.True(t, b.Check(l, 15))
u = b.Update(15) u = b.Update(l, 15)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, false, true, false, false, false, false} g = []bool{false, false, false, false, false, true, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Mark 14, which is allowed because it is in the window // Mark 14, which is allowed because it is in the window
assert.True(t, b.Check(14)) assert.True(t, b.Check(l, 14))
u = b.Update(14) u = b.Update(l, 14)
assert.True(t, u) assert.True(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, true, true, false, false, false, false} g = []bool{false, false, false, false, true, true, false, false, false, false}
assert.Equal(t, g, b.bits) assert.Equal(t, g, b.bits)
// Mark 5, which is not allowed because it is not in the window // Mark 5, which is not allowed because it is not in the window
assert.False(t, b.Check(5)) assert.False(t, b.Check(l, 5))
u = b.Update(5) u = b.Update(l, 5)
assert.False(t, u) assert.False(t, u)
assert.EqualValues(t, 15, b.current) assert.EqualValues(t, 15, b.current)
g = []bool{false, false, false, false, true, true, false, false, false, false} g = []bool{false, false, false, false, true, true, false, false, false, false}
@@ -61,63 +63,65 @@ func TestBits(t *testing.T) {
// make sure we handle wrapping around once to the current position // make sure we handle wrapping around once to the current position
b = NewBits(10) b = NewBits(10)
assert.True(t, b.Update(1)) assert.True(t, b.Update(l, 1))
assert.True(t, b.Update(11)) assert.True(t, b.Update(l, 11))
assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits) assert.Equal(t, []bool{false, true, false, false, false, false, false, false, false, false}, b.bits)
// Walk through a few windows in order // Walk through a few windows in order
b = NewBits(10) b = NewBits(10)
for i := uint64(0); i <= 100; i++ { for i := uint64(0); i <= 100; i++ {
assert.True(t, b.Check(i), "Error while checking %v", i) assert.True(t, b.Check(l, i), "Error while checking %v", i)
assert.True(t, b.Update(i), "Error while updating %v", i) assert.True(t, b.Update(l, i), "Error while updating %v", i)
} }
} }
func TestBitsDupeCounter(t *testing.T) { func TestBitsDupeCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(1)) assert.True(t, b.Update(l, 1))
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())
assert.False(t, b.Update(1)) assert.False(t, b.Update(l, 1))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.True(t, b.Update(2)) assert.True(t, b.Update(l, 2))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.True(t, b.Update(3)) assert.True(t, b.Update(l, 3))
assert.Equal(t, int64(1), b.dupeCounter.Count()) assert.Equal(t, int64(1), b.dupeCounter.Count())
assert.False(t, b.Update(1)) assert.False(t, b.Update(l, 1))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
assert.Equal(t, int64(2), b.dupeCounter.Count()) assert.Equal(t, int64(2), b.dupeCounter.Count())
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
} }
func TestBitsOutOfWindowCounter(t *testing.T) { func TestBitsOutOfWindowCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(20)) assert.True(t, b.Update(l, 20))
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
assert.True(t, b.Update(21)) assert.True(t, b.Update(l, 21))
assert.True(t, b.Update(22)) assert.True(t, b.Update(l, 22))
assert.True(t, b.Update(23)) assert.True(t, b.Update(l, 23))
assert.True(t, b.Update(24)) assert.True(t, b.Update(l, 24))
assert.True(t, b.Update(25)) assert.True(t, b.Update(l, 25))
assert.True(t, b.Update(26)) assert.True(t, b.Update(l, 26))
assert.True(t, b.Update(27)) assert.True(t, b.Update(l, 27))
assert.True(t, b.Update(28)) assert.True(t, b.Update(l, 28))
assert.True(t, b.Update(29)) assert.True(t, b.Update(l, 29))
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
assert.False(t, b.Update(0)) assert.False(t, b.Update(l, 0))
assert.Equal(t, int64(1), b.outOfWindowCounter.Count()) assert.Equal(t, int64(1), b.outOfWindowCounter.Count())
//tODO: make sure lostcounter doesn't increase in orderly increment //tODO: make sure lostcounter doesn't increase in orderly increment
@@ -127,23 +131,24 @@ func TestBitsOutOfWindowCounter(t *testing.T) {
} }
func TestBitsLostCounter(t *testing.T) { func TestBitsLostCounter(t *testing.T) {
l := test.NewLogger()
b := NewBits(10) b := NewBits(10)
b.lostCounter.Clear() b.lostCounter.Clear()
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
//assert.True(t, b.Update(0)) //assert.True(t, b.Update(0))
assert.True(t, b.Update(0)) assert.True(t, b.Update(l, 0))
assert.True(t, b.Update(20)) assert.True(t, b.Update(l, 20))
assert.True(t, b.Update(21)) assert.True(t, b.Update(l, 21))
assert.True(t, b.Update(22)) assert.True(t, b.Update(l, 22))
assert.True(t, b.Update(23)) assert.True(t, b.Update(l, 23))
assert.True(t, b.Update(24)) assert.True(t, b.Update(l, 24))
assert.True(t, b.Update(25)) assert.True(t, b.Update(l, 25))
assert.True(t, b.Update(26)) assert.True(t, b.Update(l, 26))
assert.True(t, b.Update(27)) assert.True(t, b.Update(l, 27))
assert.True(t, b.Update(28)) assert.True(t, b.Update(l, 28))
assert.True(t, b.Update(29)) assert.True(t, b.Update(l, 29))
assert.Equal(t, int64(20), b.lostCounter.Count()) assert.Equal(t, int64(20), b.lostCounter.Count())
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())
assert.Equal(t, int64(0), b.outOfWindowCounter.Count()) assert.Equal(t, int64(0), b.outOfWindowCounter.Count())
@@ -153,56 +158,56 @@ func TestBitsLostCounter(t *testing.T) {
b.dupeCounter.Clear() b.dupeCounter.Clear()
b.outOfWindowCounter.Clear() b.outOfWindowCounter.Clear()
assert.True(t, b.Update(0)) assert.True(t, b.Update(l, 0))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
assert.True(t, b.Update(9)) assert.True(t, b.Update(l, 9))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
// 10 will set 0 index, 0 was already set, no lost packets // 10 will set 0 index, 0 was already set, no lost packets
assert.True(t, b.Update(10)) assert.True(t, b.Update(l, 10))
assert.Equal(t, int64(0), b.lostCounter.Count()) assert.Equal(t, int64(0), b.lostCounter.Count())
// 11 will set 1 index, 1 was missed, we should see 1 packet lost // 11 will set 1 index, 1 was missed, we should see 1 packet lost
assert.True(t, b.Update(11)) assert.True(t, b.Update(l, 11))
assert.Equal(t, int64(1), b.lostCounter.Count()) assert.Equal(t, int64(1), b.lostCounter.Count())
// Now let's fill in the window, should end up with 8 lost packets // Now let's fill in the window, should end up with 8 lost packets
assert.True(t, b.Update(12)) assert.True(t, b.Update(l, 12))
assert.True(t, b.Update(13)) assert.True(t, b.Update(l, 13))
assert.True(t, b.Update(14)) assert.True(t, b.Update(l, 14))
assert.True(t, b.Update(15)) assert.True(t, b.Update(l, 15))
assert.True(t, b.Update(16)) assert.True(t, b.Update(l, 16))
assert.True(t, b.Update(17)) assert.True(t, b.Update(l, 17))
assert.True(t, b.Update(18)) assert.True(t, b.Update(l, 18))
assert.True(t, b.Update(19)) assert.True(t, b.Update(l, 19))
assert.Equal(t, int64(8), b.lostCounter.Count()) assert.Equal(t, int64(8), b.lostCounter.Count())
// Jump ahead by a window size // Jump ahead by a window size
assert.True(t, b.Update(29)) assert.True(t, b.Update(l, 29))
assert.Equal(t, int64(8), b.lostCounter.Count()) assert.Equal(t, int64(8), b.lostCounter.Count())
// Now lets walk ahead normally through the window, the missed packets should fill in // Now lets walk ahead normally through the window, the missed packets should fill in
assert.True(t, b.Update(30)) assert.True(t, b.Update(l, 30))
assert.True(t, b.Update(31)) assert.True(t, b.Update(l, 31))
assert.True(t, b.Update(32)) assert.True(t, b.Update(l, 32))
assert.True(t, b.Update(33)) assert.True(t, b.Update(l, 33))
assert.True(t, b.Update(34)) assert.True(t, b.Update(l, 34))
assert.True(t, b.Update(35)) assert.True(t, b.Update(l, 35))
assert.True(t, b.Update(36)) assert.True(t, b.Update(l, 36))
assert.True(t, b.Update(37)) assert.True(t, b.Update(l, 37))
assert.True(t, b.Update(38)) assert.True(t, b.Update(l, 38))
// 39 packets tracked, 22 seen, 17 lost // 39 packets tracked, 22 seen, 17 lost
assert.Equal(t, int64(17), b.lostCounter.Count()) assert.Equal(t, int64(17), b.lostCounter.Count())
// Jump ahead by 2 windows, should have recording 1 full window missing // Jump ahead by 2 windows, should have recording 1 full window missing
assert.True(t, b.Update(58)) assert.True(t, b.Update(l, 58))
assert.Equal(t, int64(27), b.lostCounter.Count()) assert.Equal(t, int64(27), b.lostCounter.Count())
// Now lets walk ahead normally through the window, the missed packets should fill in from this window // Now lets walk ahead normally through the window, the missed packets should fill in from this window
assert.True(t, b.Update(59)) assert.True(t, b.Update(l, 59))
assert.True(t, b.Update(60)) assert.True(t, b.Update(l, 60))
assert.True(t, b.Update(61)) assert.True(t, b.Update(l, 61))
assert.True(t, b.Update(62)) assert.True(t, b.Update(l, 62))
assert.True(t, b.Update(63)) assert.True(t, b.Update(l, 63))
assert.True(t, b.Update(64)) assert.True(t, b.Update(l, 64))
assert.True(t, b.Update(65)) assert.True(t, b.Update(l, 65))
assert.True(t, b.Update(66)) assert.True(t, b.Update(l, 66))
assert.True(t, b.Update(67)) assert.True(t, b.Update(l, 67))
// 68 packets tracked, 32 seen, 36 missed // 68 packets tracked, 32 seen, 36 missed
assert.Equal(t, int64(36), b.lostCounter.Count()) assert.Equal(t, int64(36), b.lostCounter.Count())
assert.Equal(t, int64(0), b.dupeCounter.Count()) assert.Equal(t, int64(0), b.dupeCounter.Count())
@@ -212,10 +217,10 @@ func TestBitsLostCounter(t *testing.T) {
func BenchmarkBits(b *testing.B) { func BenchmarkBits(b *testing.B) {
z := NewBits(10) z := NewBits(10)
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
for i, _ := range z.bits { for i := range z.bits {
z.bits[i] = true z.bits[i] = true
} }
for i, _ := range z.bits { for i := range z.bits {
z.bits[i] = false z.bits[i] = false
} }

54
cert.go
View File

@@ -7,11 +7,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
) )
var trustedCAs *cert.NebulaCAPool
type CertState struct { type CertState struct {
certificate *cert.NebulaCertificate certificate *cert.NebulaCertificate
rawCertificate []byte rawCertificate []byte
@@ -46,16 +46,11 @@ func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*Cert
return cs, nil return cs, nil
} }
func NewCertStateFromConfig(c *Config) (*CertState, error) { func NewCertStateFromConfig(c *config.C) (*CertState, error) {
var pemPrivateKey []byte var pemPrivateKey []byte
var err error var err error
privPathOrPEM := c.GetString("pki.key", "") privPathOrPEM := c.GetString("pki.key", "")
if privPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
privPathOrPEM = c.GetString("x509.key", "")
}
if privPathOrPEM == "" { if privPathOrPEM == "" {
return nil, errors.New("no pki.key path or PEM data provided") return nil, errors.New("no pki.key path or PEM data provided")
@@ -79,11 +74,6 @@ func NewCertStateFromConfig(c *Config) (*CertState, error) {
var rawCert []byte var rawCert []byte
pubPathOrPEM := c.GetString("pki.cert", "") pubPathOrPEM := c.GetString("pki.cert", "")
if pubPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
pubPathOrPEM = c.GetString("x509.cert", "")
}
if pubPathOrPEM == "" { if pubPathOrPEM == "" {
return nil, errors.New("no pki.cert path or PEM data provided") return nil, errors.New("no pki.cert path or PEM data provided")
@@ -119,24 +109,18 @@ func NewCertStateFromConfig(c *Config) (*CertState, error) {
return NewCertState(nebulaCert, rawKey) return NewCertState(nebulaCert, rawKey)
} }
func loadCAFromConfig(c *Config) (*cert.NebulaCAPool, error) { func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
var rawCA []byte var rawCA []byte
var err error var err error
caPathOrPEM := c.GetString("pki.ca", "") caPathOrPEM := c.GetString("pki.ca", "")
if caPathOrPEM == "" {
// Support backwards compat with the old x509
//TODO: remove after this is rolled out everywhere - NB 2018/02/23
caPathOrPEM = c.GetString("x509.ca", "")
}
if caPathOrPEM == "" { if caPathOrPEM == "" {
return nil, errors.New("no pki.ca path or PEM data provided") return nil, errors.New("no pki.ca path or PEM data provided")
} }
if strings.Contains(caPathOrPEM, "-----BEGIN") { if strings.Contains(caPathOrPEM, "-----BEGIN") {
rawCA = []byte(caPathOrPEM) rawCA = []byte(caPathOrPEM)
caPathOrPEM = "<inline>"
} else { } else {
rawCA, err = ioutil.ReadFile(caPathOrPEM) rawCA, err = ioutil.ReadFile(caPathOrPEM)
if err != nil { if err != nil {
@@ -145,14 +129,34 @@ func loadCAFromConfig(c *Config) (*cert.NebulaCAPool, error) {
} }
CAs, err := cert.NewCAPoolFromBytes(rawCA) CAs, err := cert.NewCAPoolFromBytes(rawCA)
if err != nil { if errors.Is(err, cert.ErrExpired) {
var expired int
for _, cert := range CAs.CAs {
if cert.Expired(time.Now()) {
expired++
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
}
}
if expired >= len(CAs.CAs) {
return nil, errors.New("no valid CA certificates present")
}
} else if err != nil {
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err) return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
} }
// pki.blacklist entered the scene at about the same time we aliased x509 to pki, not supporting backwards compat for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
CAs.BlocklistFingerprint(fp)
}
// Support deprecated config for at least one minor release to allow for migrations
//TODO: remove in 2022 or later
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) { for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
l.WithField("fingerprint", fp).Infof("Blacklisting cert") l.WithField("fingerprint", fp).Info("Blocklisting cert")
CAs.BlacklistFingerprint(fp) l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
CAs.BlocklistFingerprint(fp)
} }
return CAs, nil return CAs, nil

View File

@@ -2,8 +2,8 @@ GO111MODULE = on
export GO111MODULE export GO111MODULE
cert.pb.go: cert.proto .FORCE cert.pb.go: cert.proto .FORCE
go build github.com/golang/protobuf/protoc-gen-go go build google.golang.org/protobuf/cmd/protoc-gen-go
PATH="$(PWD):$(PATH)" protoc --go_out=. $< PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $<
rm protoc-gen-go rm protoc-gen-go
.FORCE: .FORCE:

View File

@@ -1,6 +1,7 @@
package cert package cert
import ( import (
"errors"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@@ -8,32 +9,45 @@ import (
type NebulaCAPool struct { type NebulaCAPool struct {
CAs map[string]*NebulaCertificate CAs map[string]*NebulaCertificate
certBlacklist map[string]struct{} certBlocklist map[string]struct{}
} }
// NewCAPool creates a CAPool // NewCAPool creates a CAPool
func NewCAPool() *NebulaCAPool { func NewCAPool() *NebulaCAPool {
ca := NebulaCAPool{ ca := NebulaCAPool{
CAs: make(map[string]*NebulaCertificate), CAs: make(map[string]*NebulaCertificate),
certBlacklist: make(map[string]struct{}), certBlocklist: make(map[string]struct{}),
} }
return &ca return &ca
} }
// NewCAPoolFromBytes will create a new CA pool from the provided
// input bytes, which must be a PEM-encoded set of nebula certificates.
// If the pool contains any expired certificates, an ErrExpired will be
// returned along with the pool. The caller must handle any such errors.
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) { func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
pool := NewCAPool() pool := NewCAPool()
var err error var err error
var expired bool
for { for {
caPEMs, err = pool.AddCACertificate(caPEMs) caPEMs, err = pool.AddCACertificate(caPEMs)
if errors.Is(err, ErrExpired) {
expired = true
err = nil
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
if caPEMs == nil || len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" { if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
break break
} }
} }
if expired {
return pool, ErrExpired
}
return pool, nil return pool, nil
} }
@@ -47,15 +61,11 @@ func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
} }
if !c.Details.IsCA { if !c.Details.IsCA {
return pemBytes, fmt.Errorf("provided certificate was not a CA; %s", c.Details.Name) return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotCA)
} }
if !c.CheckSignature(c.Details.PublicKey) { if !c.CheckSignature(c.Details.PublicKey) {
return pemBytes, fmt.Errorf("provided certificate was not self signed; %s", c.Details.Name) return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotSelfSigned)
}
if c.Expired(time.Now()) {
return pemBytes, fmt.Errorf("provided CA certificate is expired; %s", c.Details.Name)
} }
sum, err := c.Sha256Sum() sum, err := c.Sha256Sum()
@@ -64,27 +74,31 @@ func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
} }
ncp.CAs[sum] = c ncp.CAs[sum] = c
if c.Expired(time.Now()) {
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrExpired)
}
return pemBytes, nil return pemBytes, nil
} }
// BlacklistFingerprint adds a cert fingerprint to the blacklist // BlocklistFingerprint adds a cert fingerprint to the blocklist
func (ncp *NebulaCAPool) BlacklistFingerprint(f string) { func (ncp *NebulaCAPool) BlocklistFingerprint(f string) {
ncp.certBlacklist[f] = struct{}{} ncp.certBlocklist[f] = struct{}{}
} }
// ResetCertBlacklist removes all previously blacklisted cert fingerprints // ResetCertBlocklist removes all previously blocklisted cert fingerprints
func (ncp *NebulaCAPool) ResetCertBlacklist() { func (ncp *NebulaCAPool) ResetCertBlocklist() {
ncp.certBlacklist = make(map[string]struct{}) ncp.certBlocklist = make(map[string]struct{})
} }
// IsBlacklisted returns true if the fingerprint fails to generate or has been explicitly blacklisted // IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
func (ncp *NebulaCAPool) IsBlacklisted(c *NebulaCertificate) bool { func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
h, err := c.Sha256Sum() h, err := c.Sha256Sum()
if err != nil { if err != nil {
return true return true
} }
if _, ok := ncp.certBlacklist[h]; ok { if _, ok := ncp.certBlocklist[h]; ok {
return true return true
} }

View File

@@ -1,21 +1,21 @@
package cert package cert
import ( import (
"bytes"
"crypto" "crypto"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"encoding/json"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"net" "net"
"time" "time"
"bytes"
"encoding/json"
"github.com/golang/protobuf/proto"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
"google.golang.org/protobuf/proto"
) )
const publicKeyLen = 32 const publicKeyLen = 32
@@ -61,6 +61,10 @@ func UnmarshalNebulaCertificate(b []byte) (*NebulaCertificate, error) {
return nil, err return nil, err
} }
if rc.Details == nil {
return nil, fmt.Errorf("encoded Details was nil")
}
if len(rc.Details.Ips)%2 != 0 { if len(rc.Details.Ips)%2 != 0 {
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found") return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
} }
@@ -123,6 +127,9 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
if p == nil { if p == nil {
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
} }
if p.Type != CertBanner {
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
}
nc, err := UnmarshalNebulaCertificate(p.Bytes) nc, err := UnmarshalNebulaCertificate(p.Bytes)
return nc, r, err return nc, r, err
} }
@@ -244,10 +251,10 @@ func (nc *NebulaCertificate) Expired(t time.Time) bool {
return nc.Details.NotBefore.After(t) || nc.Details.NotAfter.Before(t) return nc.Details.NotBefore.After(t) || nc.Details.NotAfter.Before(t)
} }
// Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blacklist, etc) // Verify will ensure a certificate is good in all respects (expiry, group membership, signature, cert blocklist, etc)
func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) { func (nc *NebulaCertificate) Verify(t time.Time, ncp *NebulaCAPool) (bool, error) {
if ncp.IsBlacklisted(nc) { if ncp.IsBlocklisted(nc) {
return false, fmt.Errorf("certificate has been blacklisted") return false, fmt.Errorf("certificate has been blocked")
} }
signer, err := ncp.GetCAForCert(nc) signer, err := ncp.GetCAForCert(nc)
@@ -318,12 +325,26 @@ func (nc *NebulaCertificate) CheckRootConstrains(signer *NebulaCertificate) erro
// VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match // VerifyPrivateKey checks that the public key in the Nebula certificate and a supplied private key match
func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error { func (nc *NebulaCertificate) VerifyPrivateKey(key []byte) error {
var dst, key32 [32]byte if nc.Details.IsCA {
copy(key32[:], key) // the call to PublicKey below will panic slice bounds out of range otherwise
curve25519.ScalarBaseMult(&dst, &key32) if len(key) != ed25519.PrivateKeySize {
if !bytes.Equal(dst[:], nc.Details.PublicKey) { return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
}
if !ed25519.PublicKey(nc.Details.PublicKey).Equal(ed25519.PrivateKey(key).Public()) {
return fmt.Errorf("public key in cert and private key supplied don't match")
}
return nil
}
pub, err := curve25519.X25519(key, curve25519.Basepoint)
if err != nil {
return err
}
if !bytes.Equal(pub, nc.Details.PublicKey) {
return fmt.Errorf("public key in cert and private key supplied don't match") return fmt.Errorf("public key in cert and private key supplied don't match")
} }
return nil return nil
} }
@@ -468,6 +489,63 @@ func (nc *NebulaCertificate) MarshalJSON() ([]byte, error) {
return json.Marshal(jc) return json.Marshal(jc)
} }
//func (nc *NebulaCertificate) Copy() *NebulaCertificate {
// r, err := nc.Marshal()
// if err != nil {
// //TODO
// return nil
// }
//
// c, err := UnmarshalNebulaCertificate(r)
// return c
//}
func (nc *NebulaCertificate) Copy() *NebulaCertificate {
c := &NebulaCertificate{
Details: NebulaCertificateDetails{
Name: nc.Details.Name,
Groups: make([]string, len(nc.Details.Groups)),
Ips: make([]*net.IPNet, len(nc.Details.Ips)),
Subnets: make([]*net.IPNet, len(nc.Details.Subnets)),
NotBefore: nc.Details.NotBefore,
NotAfter: nc.Details.NotAfter,
PublicKey: make([]byte, len(nc.Details.PublicKey)),
IsCA: nc.Details.IsCA,
Issuer: nc.Details.Issuer,
InvertedGroups: make(map[string]struct{}, len(nc.Details.InvertedGroups)),
},
Signature: make([]byte, len(nc.Signature)),
}
copy(c.Signature, nc.Signature)
copy(c.Details.Groups, nc.Details.Groups)
copy(c.Details.PublicKey, nc.Details.PublicKey)
for i, p := range nc.Details.Ips {
c.Details.Ips[i] = &net.IPNet{
IP: make(net.IP, len(p.IP)),
Mask: make(net.IPMask, len(p.Mask)),
}
copy(c.Details.Ips[i].IP, p.IP)
copy(c.Details.Ips[i].Mask, p.Mask)
}
for i, p := range nc.Details.Subnets {
c.Details.Subnets[i] = &net.IPNet{
IP: make(net.IP, len(p.IP)),
Mask: make(net.IPMask, len(p.Mask)),
}
copy(c.Details.Subnets[i].IP, p.IP)
copy(c.Details.Subnets[i].Mask, p.Mask)
}
for g := range nc.Details.InvertedGroups {
c.Details.InvertedGroups[g] = struct{}{}
}
return c
}
func netMatch(certIp *net.IPNet, rootIps []*net.IPNet) bool { func netMatch(certIp *net.IPNet, rootIps []*net.IPNet) bool {
for _, net := range rootIps { for _, net := range rootIps {
if net.Contains(certIp.IP) && maskContains(net.Mask, certIp.Mask) { if net.Contains(certIp.IP) && maskContains(net.Mask, certIp.Mask) {

View File

@@ -1,202 +1,298 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.20.0
// source: cert.proto // source: cert.proto
package cert package cert
import ( import (
fmt "fmt" protoreflect "google.golang.org/protobuf/reflect/protoreflect"
proto "github.com/golang/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl"
math "math" reflect "reflect"
sync "sync"
) )
// Reference imports to suppress errors if they are not otherwise used. const (
var _ = proto.Marshal // Verify that this generated code is sufficiently up-to-date.
var _ = fmt.Errorf _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
var _ = math.Inf // Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
// This is a compile-time assertion to ensure that this generated file )
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type RawNebulaCertificate struct { type RawNebulaCertificate struct {
Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,json=details,proto3" json:"Details,omitempty"` state protoimpl.MessageState
Signature []byte `protobuf:"bytes,2,opt,name=Signature,json=signature,proto3" json:"Signature,omitempty"` sizeCache protoimpl.SizeCache
XXX_NoUnkeyedLiteral struct{} `json:"-"` unknownFields protoimpl.UnknownFields
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` Details *RawNebulaCertificateDetails `protobuf:"bytes,1,opt,name=Details,proto3" json:"Details,omitempty"`
Signature []byte `protobuf:"bytes,2,opt,name=Signature,proto3" json:"Signature,omitempty"`
} }
func (m *RawNebulaCertificate) Reset() { *m = RawNebulaCertificate{} } func (x *RawNebulaCertificate) Reset() {
func (m *RawNebulaCertificate) String() string { return proto.CompactTextString(m) } *x = RawNebulaCertificate{}
func (*RawNebulaCertificate) ProtoMessage() {} if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaCertificate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RawNebulaCertificate) ProtoMessage() {}
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) { func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
return fileDescriptor_a142e29cbef9b1cf, []int{0} return file_cert_proto_rawDescGZIP(), []int{0}
} }
func (m *RawNebulaCertificate) XXX_Unmarshal(b []byte) error { func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
return xxx_messageInfo_RawNebulaCertificate.Unmarshal(m, b) if x != nil {
} return x.Details
func (m *RawNebulaCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RawNebulaCertificate.Marshal(b, m, deterministic)
}
func (m *RawNebulaCertificate) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawNebulaCertificate.Merge(m, src)
}
func (m *RawNebulaCertificate) XXX_Size() int {
return xxx_messageInfo_RawNebulaCertificate.Size(m)
}
func (m *RawNebulaCertificate) XXX_DiscardUnknown() {
xxx_messageInfo_RawNebulaCertificate.DiscardUnknown(m)
}
var xxx_messageInfo_RawNebulaCertificate proto.InternalMessageInfo
func (m *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
if m != nil {
return m.Details
} }
return nil return nil
} }
func (m *RawNebulaCertificate) GetSignature() []byte { func (x *RawNebulaCertificate) GetSignature() []byte {
if m != nil { if x != nil {
return m.Signature return x.Signature
} }
return nil return nil
} }
type RawNebulaCertificateDetails struct { type RawNebulaCertificateDetails struct {
Name string `protobuf:"bytes,1,opt,name=Name,json=name,proto3" json:"Name,omitempty"` state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
// Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask // Ips and Subnets are in big endian 32 bit pairs, 1st the ip, 2nd the mask
Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,json=ips,proto3" json:"Ips,omitempty"` Ips []uint32 `protobuf:"varint,2,rep,packed,name=Ips,proto3" json:"Ips,omitempty"`
Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,json=subnets,proto3" json:"Subnets,omitempty"` Subnets []uint32 `protobuf:"varint,3,rep,packed,name=Subnets,proto3" json:"Subnets,omitempty"`
Groups []string `protobuf:"bytes,4,rep,name=Groups,json=groups,proto3" json:"Groups,omitempty"` Groups []string `protobuf:"bytes,4,rep,name=Groups,proto3" json:"Groups,omitempty"`
NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,json=notBefore,proto3" json:"NotBefore,omitempty"` NotBefore int64 `protobuf:"varint,5,opt,name=NotBefore,proto3" json:"NotBefore,omitempty"`
NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,json=notAfter,proto3" json:"NotAfter,omitempty"` NotAfter int64 `protobuf:"varint,6,opt,name=NotAfter,proto3" json:"NotAfter,omitempty"`
PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,json=publicKey,proto3" json:"PublicKey,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"`
IsCA bool `protobuf:"varint,8,opt,name=IsCA,json=isCA,proto3" json:"IsCA,omitempty"` IsCA bool `protobuf:"varint,8,opt,name=IsCA,proto3" json:"IsCA,omitempty"`
// sha-256 of the issuer certificate, if this field is blank the cert is self-signed // sha-256 of the issuer certificate, if this field is blank the cert is self-signed
Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,json=issuer,proto3" json:"Issuer,omitempty"` Issuer []byte `protobuf:"bytes,9,opt,name=Issuer,proto3" json:"Issuer,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *RawNebulaCertificateDetails) Reset() { *m = RawNebulaCertificateDetails{} } func (x *RawNebulaCertificateDetails) Reset() {
func (m *RawNebulaCertificateDetails) String() string { return proto.CompactTextString(m) } *x = RawNebulaCertificateDetails{}
func (*RawNebulaCertificateDetails) ProtoMessage() {} if protoimpl.UnsafeEnabled {
mi := &file_cert_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RawNebulaCertificateDetails) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RawNebulaCertificateDetails) ProtoMessage() {}
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
mi := &file_cert_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) { func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
return fileDescriptor_a142e29cbef9b1cf, []int{1} return file_cert_proto_rawDescGZIP(), []int{1}
} }
func (m *RawNebulaCertificateDetails) XXX_Unmarshal(b []byte) error { func (x *RawNebulaCertificateDetails) GetName() string {
return xxx_messageInfo_RawNebulaCertificateDetails.Unmarshal(m, b) if x != nil {
} return x.Name
func (m *RawNebulaCertificateDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RawNebulaCertificateDetails.Marshal(b, m, deterministic)
}
func (m *RawNebulaCertificateDetails) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawNebulaCertificateDetails.Merge(m, src)
}
func (m *RawNebulaCertificateDetails) XXX_Size() int {
return xxx_messageInfo_RawNebulaCertificateDetails.Size(m)
}
func (m *RawNebulaCertificateDetails) XXX_DiscardUnknown() {
xxx_messageInfo_RawNebulaCertificateDetails.DiscardUnknown(m)
}
var xxx_messageInfo_RawNebulaCertificateDetails proto.InternalMessageInfo
func (m *RawNebulaCertificateDetails) GetName() string {
if m != nil {
return m.Name
} }
return "" return ""
} }
func (m *RawNebulaCertificateDetails) GetIps() []uint32 { func (x *RawNebulaCertificateDetails) GetIps() []uint32 {
if m != nil { if x != nil {
return m.Ips return x.Ips
} }
return nil return nil
} }
func (m *RawNebulaCertificateDetails) GetSubnets() []uint32 { func (x *RawNebulaCertificateDetails) GetSubnets() []uint32 {
if m != nil { if x != nil {
return m.Subnets return x.Subnets
} }
return nil return nil
} }
func (m *RawNebulaCertificateDetails) GetGroups() []string { func (x *RawNebulaCertificateDetails) GetGroups() []string {
if m != nil { if x != nil {
return m.Groups return x.Groups
} }
return nil return nil
} }
func (m *RawNebulaCertificateDetails) GetNotBefore() int64 { func (x *RawNebulaCertificateDetails) GetNotBefore() int64 {
if m != nil { if x != nil {
return m.NotBefore return x.NotBefore
} }
return 0 return 0
} }
func (m *RawNebulaCertificateDetails) GetNotAfter() int64 { func (x *RawNebulaCertificateDetails) GetNotAfter() int64 {
if m != nil { if x != nil {
return m.NotAfter return x.NotAfter
} }
return 0 return 0
} }
func (m *RawNebulaCertificateDetails) GetPublicKey() []byte { func (x *RawNebulaCertificateDetails) GetPublicKey() []byte {
if m != nil { if x != nil {
return m.PublicKey return x.PublicKey
} }
return nil return nil
} }
func (m *RawNebulaCertificateDetails) GetIsCA() bool { func (x *RawNebulaCertificateDetails) GetIsCA() bool {
if m != nil { if x != nil {
return m.IsCA return x.IsCA
} }
return false return false
} }
func (m *RawNebulaCertificateDetails) GetIssuer() []byte { func (x *RawNebulaCertificateDetails) GetIssuer() []byte {
if m != nil { if x != nil {
return m.Issuer return x.Issuer
} }
return nil return nil
} }
func init() { var File_cert_proto protoreflect.FileDescriptor
proto.RegisterType((*RawNebulaCertificate)(nil), "cert.RawNebulaCertificate")
proto.RegisterType((*RawNebulaCertificateDetails)(nil), "cert.RawNebulaCertificateDetails") var file_cert_proto_rawDesc = []byte{
0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65,
0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53,
0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75,
0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18,
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a,
0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e,
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e,
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c,
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
0x72, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63,
0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
} }
func init() { proto.RegisterFile("cert.proto", fileDescriptor_a142e29cbef9b1cf) } var (
file_cert_proto_rawDescOnce sync.Once
file_cert_proto_rawDescData = file_cert_proto_rawDesc
)
var fileDescriptor_a142e29cbef9b1cf = []byte{ func file_cert_proto_rawDescGZIP() []byte {
// 279 bytes of a gzipped FileDescriptorProto file_cert_proto_rawDescOnce.Do(func() {
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xcf, 0x4a, 0xf4, 0x30, file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData)
0x14, 0xc5, 0xc9, 0xa4, 0x5f, 0xdb, 0xe4, 0x53, 0x90, 0x20, 0x12, 0xd4, 0x45, 0x9c, 0x55, 0x56, })
0xb3, 0xd0, 0xa5, 0xab, 0x71, 0x04, 0x29, 0x42, 0x91, 0xcc, 0x13, 0xa4, 0xf5, 0x76, 0x08, 0x74, return file_cert_proto_rawDescData
0x9a, 0x9a, 0x3f, 0x88, 0x8f, 0xee, 0x4e, 0x9a, 0x4e, 0x77, 0xe2, 0xee, 0x9e, 0x5f, 0xce, 0x49, }
0x4e, 0x2e, 0xa5, 0x2d, 0xb8, 0xb0, 0x19, 0x9d, 0x0d, 0x96, 0x65, 0xd3, 0xbc, 0xfe, 0xa0, 0x97,
0x4a, 0x7f, 0xd6, 0xd0, 0xc4, 0x5e, 0xef, 0xc0, 0x05, 0xd3, 0x99, 0x56, 0x07, 0x60, 0x8f, 0xb4, var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
0x78, 0x86, 0xa0, 0x4d, 0xef, 0x39, 0x12, 0x48, 0xfe, 0xbf, 0xbf, 0xdb, 0xa4, 0xec, 0x6f, 0xe6, var file_cert_proto_goTypes = []interface{}{
0x93, 0x51, 0x15, 0xef, 0xf3, 0xc0, 0x6e, 0x29, 0xd9, 0x9b, 0xc3, 0xa0, 0x43, 0x74, 0xc0, 0x57, (*RawNebulaCertificate)(nil), // 0: cert.RawNebulaCertificate
0x02, 0xc9, 0x33, 0x45, 0xfc, 0x02, 0xd6, 0xdf, 0x88, 0xde, 0xfc, 0x71, 0x0d, 0x63, 0x34, 0xab, (*RawNebulaCertificateDetails)(nil), // 1: cert.RawNebulaCertificateDetails
0xf5, 0x11, 0xd2, 0xbb, 0x44, 0x65, 0x83, 0x3e, 0x02, 0xbb, 0xa0, 0xb8, 0x1a, 0x3d, 0x5f, 0x09, }
0x2c, 0xcf, 0x15, 0x36, 0xa3, 0x67, 0x9c, 0x16, 0xfb, 0xd8, 0x0c, 0x10, 0x3c, 0xc7, 0x89, 0x16, var file_cert_proto_depIdxs = []int32{
0x7e, 0x96, 0xec, 0x8a, 0xe6, 0x2f, 0xce, 0xc6, 0xd1, 0xf3, 0x4c, 0x60, 0x49, 0x54, 0x7e, 0x48, 1, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
0x6a, 0x6a, 0x55, 0xdb, 0xf0, 0x04, 0x9d, 0x75, 0xc0, 0xff, 0x09, 0x24, 0xb1, 0x22, 0xc3, 0x02, 1, // [1:1] is the sub-list for method output_type
0xd8, 0x35, 0x2d, 0x6b, 0x1b, 0xb6, 0x5d, 0x00, 0xc7, 0xf3, 0x74, 0x58, 0x0e, 0x27, 0x3d, 0x25, 1, // [1:1] is the sub-list for method input_type
0xdf, 0x62, 0xd3, 0x9b, 0xf6, 0x15, 0xbe, 0x78, 0x31, 0xff, 0x67, 0x5c, 0xc0, 0xd4, 0xb7, 0xf2, 1, // [1:1] is the sub-list for extension type_name
0xbb, 0x2d, 0x2f, 0x05, 0x92, 0xa5, 0xca, 0x8c, 0xdf, 0x6d, 0xa7, 0x0e, 0x95, 0xf7, 0x11, 0x1c, 1, // [1:1] is the sub-list for extension extendee
0x27, 0xc9, 0x9e, 0x9b, 0xa4, 0x9a, 0x3c, 0xed, 0xfe, 0xe1, 0x27, 0x00, 0x00, 0xff, 0xff, 0x2c, 0, // [0:1] is the sub-list for field type_name
0xe3, 0x08, 0x37, 0x89, 0x01, 0x00, 0x00, }
func init() { file_cert_proto_init() }
func file_cert_proto_init() {
if File_cert_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaCertificate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RawNebulaCertificateDetails); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cert_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cert_proto_goTypes,
DependencyIndexes: file_cert_proto_depIdxs,
MessageInfos: file_cert_proto_msgTypes,
}.Build()
File_cert_proto = out.File
file_cert_proto_rawDesc = nil
file_cert_proto_goTypes = nil
file_cert_proto_depIdxs = nil
} }

View File

@@ -1,6 +1,8 @@
syntax = "proto3"; syntax = "proto3";
package cert; package cert;
option go_package = "github.com/slackhq/nebula/cert";
//import "google/protobuf/timestamp.proto"; //import "google/protobuf/timestamp.proto";
message RawNebulaCertificate { message RawNebulaCertificate {

View File

@@ -8,10 +8,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/golang/protobuf/proto" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
"google.golang.org/protobuf/proto"
) )
func TestMarshalingNebulaCertificate(t *testing.T) { func TestMarshalingNebulaCertificate(t *testing.T) {
@@ -172,13 +173,13 @@ func TestNebulaCertificate_Verify(t *testing.T) {
f, err := c.Sha256Sum() f, err := c.Sha256Sum()
assert.Nil(t, err) assert.Nil(t, err)
caPool.BlacklistFingerprint(f) caPool.BlocklistFingerprint(f)
v, err := c.Verify(time.Now(), caPool) v, err := c.Verify(time.Now(), caPool)
assert.False(t, v) assert.False(t, v)
assert.EqualError(t, err, "certificate has been blacklisted") assert.EqualError(t, err, "certificate has been blocked")
caPool.ResetCertBlacklist() caPool.ResetCertBlocklist()
v, err = c.Verify(time.Now(), caPool) v, err = c.Verify(time.Now(), caPool)
assert.True(t, v) assert.True(t, v)
assert.Nil(t, err) assert.Nil(t, err)
@@ -374,9 +375,16 @@ func TestNebulaCertificate_Verify_Subnets(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
} }
func TestNebulaVerifyPrivateKey(t *testing.T) { func TestNebulaCertificate_VerifyPrivateKey(t *testing.T) {
ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err) assert.Nil(t, err)
err = ca.VerifyPrivateKey(caKey)
assert.Nil(t, err)
_, _, caKey2, err := newTestCaCert(time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
err = ca.VerifyPrivateKey(caKey2)
assert.NotNil(t, err)
c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{}) c, _, priv, err := newTestCert(ca, caKey, time.Time{}, time.Time{}, []*net.IPNet{}, []*net.IPNet{}, []string{})
err = c.VerifyPrivateKey(priv) err = c.VerifyPrivateKey(priv)
@@ -421,6 +429,15 @@ BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf
8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF 8/phAUt+FLzqTECzQKisYswKvE3pl9mbEYKbOdIHrxdIp95mo4sF
-----END NEBULA CERTIFICATE----- -----END NEBULA CERTIFICATE-----
`
expired := `
# expired certificate
-----BEGIN NEBULA CERTIFICATE-----
CjkKB2V4cGlyZWQouPmWjQYwufmWjQY6ILCRaoCkJlqHgv5jfDN4lzLHBvDzaQm4
vZxfu144hmgjQAESQG4qlnZi8DncvD/LDZnLgJHOaX1DWCHHEh59epVsC+BNgTie
WH1M9n4O7cFtGlM6sJJOS+rCVVEJ3ABS7+MPdQs=
-----END NEBULA CERTIFICATE-----
` `
rootCA := NebulaCertificate{ rootCA := NebulaCertificate{
@@ -444,6 +461,268 @@ BVG+oJpAoqokUBbI4U0N8CSfpUABEkB/Pm5A2xyH/nc8mg/wvGUWG3pZ7nHzaDMf
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
// expired cert, no valid certs
ppp, err := NewCAPoolFromBytes([]byte(expired))
assert.Equal(t, ErrExpired, err)
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
// expired cert, with valid certs
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
assert.Equal(t, ErrExpired, err)
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
assert.Equal(t, len(pppp.CAs), 3)
}
func appendByteSlices(b ...[]byte) []byte {
retSlice := []byte{}
for _, v := range b {
retSlice = append(retSlice, v...)
}
return retSlice
}
func TestUnmrshalCertPEM(t *testing.T) {
goodCert := []byte(`
# A good cert
-----BEGIN NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-----END NEBULA CERTIFICATE-----
`)
badBanner := []byte(`# A bad banner
-----BEGIN NOT A NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-----END NOT A NEBULA CERTIFICATE-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
-END NEBULA CERTIFICATE----`)
certBundle := appendByteSlices(goodCert, badBanner, invalidPem)
// Success test case
cert, rest, err := UnmarshalNebulaCertificateFromPEM(certBundle)
assert.NotNil(t, cert)
assert.Equal(t, rest, append(badBanner, invalidPem...))
assert.Nil(t, err)
// Fail due to invalid banner.
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
assert.Nil(t, cert)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula certificate banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
cert, rest, err = UnmarshalNebulaCertificateFromPEM(rest)
assert.Nil(t, cert)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalEd25519PrivateKey(t *testing.T) {
privKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA ED25519 PRIVATE KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-----END NEBULA ED25519 PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NOT A NEBULA PRIVATE KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA ED25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-END NEBULA ED25519 PRIVATE KEY-----`)
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, err := UnmarshalEd25519PrivateKey(keyBundle)
assert.Len(t, k, 64)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
assert.Nil(t, err)
// Fail due to short key
k, rest, err = UnmarshalEd25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
// Fail due to invalid banner
k, rest, err = UnmarshalEd25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 private key banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, err = UnmarshalEd25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalX25519PrivateKey(t *testing.T) {
privKey := []byte(`# A good key
-----BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA X25519 PRIVATE KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA X25519 PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PRIVATE KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA X25519 PRIVATE KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA X25519 PRIVATE KEY-----`)
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, err := UnmarshalX25519PrivateKey(keyBundle)
assert.Len(t, k, 32)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
assert.Nil(t, err)
// Fail due to short key
k, rest, err = UnmarshalX25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 private key")
// Fail due to invalid banner
k, rest, err = UnmarshalX25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 private key banner")
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, err = UnmarshalX25519PrivateKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalEd25519PublicKey(t *testing.T) {
pubKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA ED25519 PUBLIC KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA ED25519 PUBLIC KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PUBLIC KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA ED25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA ED25519 PUBLIC KEY-----`)
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, err := UnmarshalEd25519PublicKey(keyBundle)
assert.Equal(t, len(k), 32)
assert.Nil(t, err)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
// Fail due to short key
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid ed25519 public key")
// Fail due to invalid banner
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.EqualError(t, err, "bytes did not contain a proper nebula Ed25519 public key banner")
assert.Equal(t, rest, invalidPem)
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, err = UnmarshalEd25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
}
func TestUnmarshalX25519PublicKey(t *testing.T) {
pubKey := []byte(`# A good key
-----BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NEBULA X25519 PUBLIC KEY-----
`)
shortKey := []byte(`# A short key
-----BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
-----END NEBULA X25519 PUBLIC KEY-----
`)
invalidBanner := []byte(`# Invalid banner
-----BEGIN NOT A NEBULA PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-----END NOT A NEBULA PUBLIC KEY-----
`)
invalidPem := []byte(`# Not a valid PEM format
-BEGIN NEBULA X25519 PUBLIC KEY-----
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
-END NEBULA X25519 PUBLIC KEY-----`)
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
// Success test case
k, rest, err := UnmarshalX25519PublicKey(keyBundle)
assert.Equal(t, len(k), 32)
assert.Nil(t, err)
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
// Fail due to short key
k, rest, err = UnmarshalX25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.EqualError(t, err, "key was not 32 bytes, is invalid X25519 public key")
// Fail due to invalid banner
k, rest, err = UnmarshalX25519PublicKey(rest)
assert.Nil(t, k)
assert.EqualError(t, err, "bytes did not contain a proper nebula X25519 public key banner")
assert.Equal(t, rest, invalidPem)
// Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary.
k, rest, err = UnmarshalX25519PublicKey(rest)
assert.Nil(t, k)
assert.Equal(t, rest, invalidPem)
assert.EqualError(t, err, "input did not contain a valid PEM encoded block")
} }
// Ensure that upgrading the protobuf library does not change how certificates // Ensure that upgrading the protobuf library does not change how certificates
@@ -487,6 +766,24 @@ func TestMarshalingNebulaCertificateConsistency(t *testing.T) {
assert.Equal(t, "0a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b)) assert.Equal(t, "0a0774657374696e67121b8182845080feffff0f828284508080fcff0f8382845080fe83f80f1a1b8182844880fe83f80f8282844880feffff0f838284488080fcff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328f0e0e7d70430a08681c4053a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b))
} }
func TestNebulaCertificate_Copy(t *testing.T) {
ca, _, caKey, err := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
c, _, _, err := newTestCert(ca, caKey, time.Now(), time.Now().Add(5*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
assert.Nil(t, err)
cc := c.Copy()
test.AssertDeepCopyEqual(t, c, cc)
}
func TestUnmarshalNebulaCertificate(t *testing.T) {
// Test that we don't panic with an invalid certificate (#332)
data := []byte("\x98\x00\x00")
_, err := UnmarshalNebulaCertificate(data)
assert.EqualError(t, err, "encoded Details was nil")
}
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) { func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*NebulaCertificate, []byte, []byte, error) {
pub, priv, err := ed25519.GenerateKey(rand.Reader) pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() { if before.IsZero() {
@@ -498,11 +795,12 @@ func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []
nc := &NebulaCertificate{ nc := &NebulaCertificate{
Details: NebulaCertificateDetails{ Details: NebulaCertificateDetails{
Name: "test ca", Name: "test ca",
NotBefore: before, NotBefore: time.Unix(before.Unix(), 0),
NotAfter: after, NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub, PublicKey: pub,
IsCA: true, IsCA: true,
InvertedGroups: make(map[string]struct{}),
}, },
} }
@@ -544,17 +842,17 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
if len(ips) == 0 { if len(ips) == 0 {
ips = []*net.IPNet{ ips = []*net.IPNet{
{IP: net.ParseIP("10.1.1.1"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("10.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())},
{IP: net.ParseIP("10.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("10.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())},
{IP: net.ParseIP("10.1.1.3"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("10.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())},
} }
} }
if len(subnets) == 0 { if len(subnets) == 0 {
subnets = []*net.IPNet{ subnets = []*net.IPNet{
{IP: net.ParseIP("9.1.1.1"), Mask: net.IPMask(net.ParseIP("255.0.255.0"))}, {IP: net.ParseIP("9.1.1.1").To4(), Mask: net.IPMask(net.ParseIP("255.0.255.0").To4())},
{IP: net.ParseIP("9.1.1.2"), Mask: net.IPMask(net.ParseIP("255.255.255.0"))}, {IP: net.ParseIP("9.1.1.2").To4(), Mask: net.IPMask(net.ParseIP("255.255.255.0").To4())},
{IP: net.ParseIP("9.1.1.3"), Mask: net.IPMask(net.ParseIP("255.255.0.0"))}, {IP: net.ParseIP("9.1.1.3").To4(), Mask: net.IPMask(net.ParseIP("255.255.0.0").To4())},
} }
} }
@@ -562,15 +860,16 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
nc := &NebulaCertificate{ nc := &NebulaCertificate{
Details: NebulaCertificateDetails{ Details: NebulaCertificateDetails{
Name: "testing", Name: "testing",
Ips: ips, Ips: ips,
Subnets: subnets, Subnets: subnets,
Groups: groups, Groups: groups,
NotBefore: before, NotBefore: time.Unix(before.Unix(), 0),
NotAfter: after, NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub, PublicKey: pub,
IsCA: false, IsCA: false,
Issuer: issuer, Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
}, },
} }
@@ -583,10 +882,15 @@ func newTestCert(ca *NebulaCertificate, key []byte, before, after time.Time, ips
} }
func x25519Keypair() ([]byte, []byte) { func x25519Keypair() ([]byte, []byte) {
var pubkey, privkey [32]byte privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil { if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err) panic(err)
} }
curve25519.ScalarBaseMult(&pubkey, &privkey)
return pubkey[:], privkey[:] pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
} }

9
cert/errors.go Normal file
View File

@@ -0,0 +1,9 @@
package cert
import "errors"
var (
ErrExpired = errors.New("certificate is expired")
ErrNotCA = errors.New("certificate is not a CA")
ErrNotSelfSigned = errors.New("certificate is not self-signed")
)

10
cidr/parse.go Normal file
View File

@@ -0,0 +1,10 @@
package cidr
import "net"
// Parse is a convenience function that returns only the IPNet
// This function ignores errors since it is primarily a test helper, the result could be nil
func Parse(s string) *net.IPNet {
_, c, _ := net.ParseCIDR(s)
return c
}

View File

@@ -1,39 +1,39 @@
package nebula package cidr
import ( import (
"encoding/binary"
"fmt"
"net" "net"
"github.com/slackhq/nebula/iputil"
) )
type CIDRNode struct { type Node struct {
left *CIDRNode left *Node
right *CIDRNode right *Node
parent *CIDRNode parent *Node
value interface{} value interface{}
} }
type CIDRTree struct { type Tree4 struct {
root *CIDRNode root *Node
} }
const ( const (
startbit = uint32(0x80000000) startbit = iputil.VpnIp(0x80000000)
) )
func NewCIDRTree() *CIDRTree { func NewTree4() *Tree4 {
tree := new(CIDRTree) tree := new(Tree4)
tree.root = &CIDRNode{} tree.root = &Node{}
return tree return tree
} }
func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) { func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
next := tree.root next := tree.root
ip := ip2int(cidr.IP) ip := iputil.Ip2VpnIp(cidr.IP)
mask := ip2int(cidr.Mask) mask := iputil.Ip2VpnIp(cidr.Mask)
// Find our last ancestor in the tree // Find our last ancestor in the tree
for bit&mask != 0 { for bit&mask != 0 {
@@ -59,7 +59,7 @@ func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
// Build up the rest of the tree we don't already have // Build up the rest of the tree we don't already have
for bit&mask != 0 { for bit&mask != 0 {
next = &CIDRNode{} next = &Node{}
next.parent = node next.parent = node
if ip&bit != 0 { if ip&bit != 0 {
@@ -76,8 +76,8 @@ func (tree *CIDRTree) AddCIDR(cidr *net.IPNet, val interface{}) {
node.value = val node.value = val
} }
// Finds the first match, which way be the least specific // Finds the first match, which may be the least specific
func (tree *CIDRTree) Contains(ip uint32) (value interface{}) { func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
@@ -100,7 +100,7 @@ func (tree *CIDRTree) Contains(ip uint32) (value interface{}) {
} }
// Finds the most specific match // Finds the most specific match
func (tree *CIDRTree) MostSpecificContains(ip uint32) (value interface{}) { func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
@@ -116,14 +116,13 @@ func (tree *CIDRTree) MostSpecificContains(ip uint32) (value interface{}) {
} }
bit >>= 1 bit >>= 1
} }
return value return value
} }
// Finds the most specific match // Finds the most specific match
func (tree *CIDRTree) Match(ip uint32) (value interface{}) { func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
bit := startbit bit := startbit
node := tree.root node := tree.root
lastNode := node lastNode := node
@@ -144,27 +143,3 @@ func (tree *CIDRTree) Match(ip uint32) (value interface{}) {
} }
return value return value
} }
// A helper type to avoid converting to IP when logging
type IntIp uint32
func (ip IntIp) String() string {
return fmt.Sprintf("%v", int2ip(uint32(ip)))
}
func (ip IntIp) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", int2ip(uint32(ip)).String())), nil
}
func ip2int(ip []byte) uint32 {
if len(ip) == 16 {
return binary.BigEndian.Uint32(ip[12:16])
}
return binary.BigEndian.Uint32(ip)
}
func int2ip(nn uint32) net.IP {
ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, nn)
return ip
}

153
cidr/tree4_test.go Normal file
View File

@@ -0,0 +1,153 @@
package cidr
import (
"net"
"testing"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
)
func TestCIDRTree_Contains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
tests := []struct {
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
}

185
cidr/tree6.go Normal file
View File

@@ -0,0 +1,185 @@
package cidr
import (
"net"
"github.com/slackhq/nebula/iputil"
)
const startbit6 = uint64(1 << 63)
type Tree6 struct {
root4 *Node
root6 *Node
}
func NewTree6() *Tree6 {
tree := new(Tree6)
tree.root4 = &Node{}
tree.root6 = &Node{}
return tree
}
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
var node, next *Node
cidrIP, ipv4 := isIPV4(cidr.IP)
if ipv4 {
node = tree.root4
next = tree.root4
} else {
node = tree.root6
next = tree.root6
}
for i := 0; i < len(cidrIP); i += 4 {
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
bit := startbit
// Find our last ancestor in the tree
for bit&mask != 0 {
if ip&bit != 0 {
next = node.right
} else {
next = node.left
}
if next == nil {
break
}
bit = bit >> 1
node = next
}
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next.parent = node
if ip&bit != 0 {
node.right = next
} else {
node.left = next
}
bit >>= 1
node = next
}
}
// Final node marks our cidr, set the value
node.value = val
}
// Finds the most specific match
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
var node *Node
wholeIP, ipv4 := isIPV4(ip)
if ipv4 {
node = tree.root4
} else {
node = tree.root6
}
for i := 0; i < len(wholeIP); i += 4 {
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
bit := startbit
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root4
for node != nil {
if node.value != nil {
value = node.value
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
ip := hi
node := tree.root6
for i := 0; i < 2; i++ {
bit := startbit6
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
ip = lo
}
return value
}
func isIPV4(ip net.IP) (net.IP, bool) {
if len(ip) == net.IPv4len {
return ip, true
}
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
return ip[12:16], true
}
return ip, false
}
func isZeros(p net.IP) bool {
for i := 0; i < len(p); i++ {
if p[i] != 0 {
return false
}
}
return true
}

81
cidr/tree6_test.go Normal file
View File

@@ -0,0 +1,81 @@
package cidr
import (
"encoding/binary"
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
}
tree = NewTree6()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
tree.AddCIDR(Parse("::/0"), "cool6")
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
}
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
}
for _, tt := range tests {
ip := net.ParseIP(tt.IP)
hi := binary.BigEndian.Uint64(ip[:8])
lo := binary.BigEndian.Uint64(ip[8:])
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
}
}

View File

@@ -1,157 +0,0 @@
package nebula
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCIDRTree_Contains(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4b")
tree.AddCIDR(getCIDR("4.1.2.1/32"), "4c")
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.0.0.0/8"), "1")
tree.AddCIDR(getCIDR("2.1.0.0/16"), "2")
tree.AddCIDR(getCIDR("3.1.1.0/24"), "3")
tree.AddCIDR(getCIDR("4.1.1.0/24"), "4a")
tree.AddCIDR(getCIDR("4.1.1.0/30"), "4b")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "4c")
tree.AddCIDR(getCIDR("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(ip2int(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("4.1.1.0/32"), "1a")
tree.AddCIDR(getCIDR("4.1.1.1/32"), "1b")
tests := []struct {
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(ip2int(net.ParseIP(tt.IP))))
}
tree = NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(ip2int(net.ParseIP("255.255.255.255"))))
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
ip := ip2int(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
ip = ip2int(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewCIDRTree()
tree.AddCIDR(getCIDR("1.1.0.0/16"), "1")
tree.AddCIDR(getCIDR("1.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("192.2.1.1/32"), "1")
tree.AddCIDR(getCIDR("172.2.1.1/32"), "1")
ip := ip2int(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
ip = ip2int(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
}
func getCIDR(s string) *net.IPNet {
_, c, _ := net.ParseCIDR(s)
return c
}

View File

@@ -11,6 +11,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"golang.org/x/crypto/ed25519" "golang.org/x/crypto/ed25519"
) )
@@ -21,6 +22,7 @@ type caFlags struct {
duration *time.Duration duration *time.Duration
outKeyPath *string outKeyPath *string
outCertPath *string outCertPath *string
outQRPath *string
groups *string groups *string
ips *string ips *string
subnets *string subnets *string
@@ -33,9 +35,10 @@ func newCaFlags() *caFlags {
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to") cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to") cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use") cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use") cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use") cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
return &cf return &cf
} }
@@ -79,6 +82,9 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
if err != nil { if err != nil {
return newHelpErrorf("invalid ip definition: %s", err) return newHelpErrorf("invalid ip definition: %s", err)
} }
if ip.To4() == nil {
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", rs)
}
ipNet.IP = ip ipNet.IP = ip
ips = append(ips, ipNet) ips = append(ips, ipNet)
@@ -95,6 +101,9 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
if err != nil { if err != nil {
return newHelpErrorf("invalid subnet definition: %s", err) return newHelpErrorf("invalid subnet definition: %s", err)
} }
if s.IP.To4() == nil {
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
}
subnets = append(subnets, s) subnets = append(subnets, s)
} }
} }
@@ -146,6 +155,18 @@ func ca(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
if *cf.outQRPath != "" {
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
}
return nil return nil
} }

View File

@@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package main package main
@@ -30,15 +31,17 @@ func Test_caHelp(t *testing.T) {
" -groups string\n"+ " -groups string\n"+
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+ " \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
" -ips string\n"+ " -ips string\n"+
" \tOptional: comma separated list of ip and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use\n"+ " \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses\n"+
" -name string\n"+ " -name string\n"+
" \tRequired: name of the certificate authority\n"+ " \tRequired: name of the certificate authority\n"+
" -out-crt string\n"+ " -out-crt string\n"+
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+ " \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
" -out-key string\n"+ " -out-key string\n"+
" \tOptional: path to write the private key to (default \"ca.key\")\n"+ " \tOptional: path to write the private key to (default \"ca.key\")\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -subnets string\n"+ " -subnets string\n"+
" \tOptional: comma separated list of ip and network in CIDR notation. This will limit which subnet addresses and networks subordinate certs can use\n", " \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets\n",
ob.String(), ob.String(),
) )
} }
@@ -52,6 +55,16 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// ipv4 only ips
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String())
// ipv4 only subnets
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
assert.Equal(t, "", ob.String())
assert.Equal(t, "", eb.String())
// failed key write // failed key write
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()

View File

@@ -3,10 +3,11 @@ package main
import ( import (
"bytes" "bytes"
"errors" "errors"
"github.com/stretchr/testify/assert"
"io" "io"
"os" "os"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
//TODO: all flag parsing continueOnError will print to stderr on its own currently //TODO: all flag parsing continueOnError will print to stderr on its own currently

View File

@@ -4,23 +4,27 @@ import (
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
"github.com/slackhq/nebula/cert"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"strings" "strings"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert"
) )
type printFlags struct { type printFlags struct {
set *flag.FlagSet set *flag.FlagSet
json *bool json *bool
path *string outQRPath *string
path *string
} }
func newPrintFlags() *printFlags { func newPrintFlags() *printFlags {
pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)} pf := printFlags{set: flag.NewFlagSet("print", flag.ContinueOnError)}
pf.set.Usage = func() {} pf.set.Usage = func() {}
pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format") pf.json = pf.set.Bool("json", false, "Optional: outputs certificates in json format")
pf.outQRPath = pf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
pf.path = pf.set.String("path", "", "Required: path to the certificate") pf.path = pf.set.String("path", "", "Required: path to the certificate")
return &pf return &pf
@@ -43,6 +47,8 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
} }
var c *cert.NebulaCertificate var c *cert.NebulaCertificate
var qrBytes []byte
part := 0
for { for {
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert) c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
@@ -60,9 +66,31 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
out.Write([]byte("\n")) out.Write([]byte("\n"))
} }
if *pf.outQRPath != "" {
b, err := c.MarshalToPEM()
if err != nil {
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
}
qrBytes = append(qrBytes, b...)
}
if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" { if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" {
break break
} }
part++
}
if *pf.outQRPath != "" {
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
} }
return nil return nil

View File

@@ -2,12 +2,13 @@ package main
import ( import (
"bytes" "bytes"
"github.com/slackhq/nebula/cert"
"github.com/stretchr/testify/assert"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/cert"
"github.com/stretchr/testify/assert"
) )
func Test_printSummary(t *testing.T) { func Test_printSummary(t *testing.T) {
@@ -22,6 +23,8 @@ func Test_printHelp(t *testing.T) {
"Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+ "Usage of "+os.Args[0]+" print <flags>: prints details about a certificate\n"+
" -json\n"+ " -json\n"+
" \tOptional: outputs certificates in json format\n"+ " \tOptional: outputs certificates in json format\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -path string\n"+ " -path string\n"+
" \tRequired: path to the certificate\n", " \tRequired: path to the certificate\n",
ob.String(), ob.String(),

View File

@@ -11,6 +11,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"golang.org/x/crypto/curve25519" "golang.org/x/crypto/curve25519"
) )
@@ -25,6 +26,7 @@ type signFlags struct {
inPubPath *string inPubPath *string
outKeyPath *string outKeyPath *string
outCertPath *string outCertPath *string
outQRPath *string
groups *string groups *string
subnets *string subnets *string
} }
@@ -35,13 +37,14 @@ func newSignFlags() *signFlags {
sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key") sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key")
sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert") sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert")
sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname") sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname")
sf.ip = sf.set.String("ip", "", "Required: ip and network in CIDR notation to assign the cert") sf.ip = sf.set.String("ip", "", "Required: ipv4 address and network in CIDR notation to assign the cert")
sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"") sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key") sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to") sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to") sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups") sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
sf.subnets = sf.set.String("subnets", "", "Optional: comma seperated list of subnet this cert can serve for") sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for")
return &sf return &sf
} }
@@ -89,6 +92,10 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while parsing ca-crt: %s", err) return fmt.Errorf("error while parsing ca-crt: %s", err)
} }
if err := caCert.VerifyPrivateKey(caKey); err != nil {
return fmt.Errorf("refusing to sign, root certificate does not match private key")
}
issuer, err := caCert.Sha256Sum() issuer, err := caCert.Sha256Sum()
if err != nil { if err != nil {
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err) return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
@@ -107,6 +114,9 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
if err != nil { if err != nil {
return newHelpErrorf("invalid ip definition: %s", err) return newHelpErrorf("invalid ip definition: %s", err)
} }
if ip.To4() == nil {
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", *sf.ip)
}
ipNet.IP = ip ipNet.IP = ip
groups := []string{} groups := []string{}
@@ -128,6 +138,9 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
if err != nil { if err != nil {
return newHelpErrorf("invalid subnet definition: %s", err) return newHelpErrorf("invalid subnet definition: %s", err)
} }
if s.IP.To4() == nil {
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
}
subnets = append(subnets, s) subnets = append(subnets, s)
} }
} }
@@ -203,16 +216,33 @@ func signCert(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
if *sf.outQRPath != "" {
b, err = qrcode.Encode(string(b), qrcode.Medium, -5)
if err != nil {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}
}
return nil return nil
} }
func x25519Keypair() ([]byte, []byte) { func x25519Keypair() ([]byte, []byte) {
var pubkey, privkey [32]byte privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil { if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err) panic(err)
} }
curve25519.ScalarBaseMult(&pubkey, &privkey)
return pubkey[:], privkey[:] pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
} }
func signSummary() string { func signSummary() string {

View File

@@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package main package main
@@ -38,15 +39,17 @@ func Test_signHelp(t *testing.T) {
" -in-pub string\n"+ " -in-pub string\n"+
" \tOptional (if out-key not set): path to read a previously generated public key\n"+ " \tOptional (if out-key not set): path to read a previously generated public key\n"+
" -ip string\n"+ " -ip string\n"+
" \tRequired: ip and network in CIDR notation to assign the cert\n"+ " \tRequired: ipv4 address and network in CIDR notation to assign the cert\n"+
" -name string\n"+ " -name string\n"+
" \tRequired: name of the cert, usually a hostname\n"+ " \tRequired: name of the cert, usually a hostname\n"+
" -out-crt string\n"+ " -out-crt string\n"+
" \tOptional: path to write the certificate to\n"+ " \tOptional: path to write the certificate to\n"+
" -out-key string\n"+ " -out-key string\n"+
" \tOptional (if in-pub not set): path to write the private key to\n"+ " \tOptional (if in-pub not set): path to write the private key to\n"+
" -out-qr string\n"+
" \tOptional: output a qr code image (png) of the certificate\n"+
" -subnets string\n"+ " -subnets string\n"+
" \tOptional: comma seperated list of subnet this cert can serve for\n", " \tOptional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for\n",
ob.String(), ob.String(),
) )
} }
@@ -56,7 +59,6 @@ func Test_signCert(t *testing.T) {
eb := &bytes.Buffer{} eb := &bytes.Buffer{}
// required args // required args
assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required") assertHelpError(t, signCert([]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb), "-name is required")
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
@@ -157,6 +159,13 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
assertHelpError(t, signCert(args, ob, eb), "invalid ip definition: can only be ipv4, have 100::100/100")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
// bad subnet cidr // bad subnet cidr
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
@@ -165,6 +174,27 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
assertHelpError(t, signCert(args, ob, eb), "invalid subnet definition: can only be ipv4, have 100::100/100")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
// mismatched ca key
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
assert.Nil(t, err)
defer os.Remove(caKeyF2.Name())
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
ob.Reset()
eb.Reset()
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
assert.EqualError(t, signCert(args, ob, eb), "refusing to sign, root certificate does not match private key")
assert.Empty(t, ob.String())
assert.Empty(t, eb.String())
// failed key write // failed key write
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
@@ -286,5 +316,4 @@ func Test_signCert(t *testing.T) {
assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name()) assert.EqualError(t, signCert(args, ob, eb), "refusing to overwrite existing cert: "+crtF.Name())
assert.Empty(t, ob.String()) assert.Empty(t, ob.String())
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
} }

View File

@@ -3,12 +3,13 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/slackhq/nebula/cert"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"strings" "strings"
"time" "time"
"github.com/slackhq/nebula/cert"
) )
type verifyFlags struct { type verifyFlags struct {

View File

@@ -3,13 +3,14 @@ package main
import ( import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"github.com/slackhq/nebula/cert"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/ed25519"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/cert"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/ed25519"
) )
func Test_verifySummary(t *testing.T) { func Test_verifySummary(t *testing.T) {
@@ -71,7 +72,7 @@ func Test_verify(t *testing.T) {
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
Name: "test-ca", Name: "test-ca",
NotBefore: time.Now().Add(time.Hour * -1), NotBefore: time.Now().Add(time.Hour * -1),
NotAfter: time.Now().Add(time.Hour), NotAfter: time.Now().Add(time.Hour * 2),
PublicKey: caPub, PublicKey: caPub,
IsCA: true, IsCA: true,
}, },

View File

@@ -0,0 +1,10 @@
//go:build !windows
// +build !windows
package main
import "github.com/sirupsen/logrus"
func HookLogger(l *logrus.Logger) {
// Do nothing, let the logs flow to stdout/stderr
}

View File

@@ -0,0 +1,54 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"github.com/kardianos/service"
"github.com/sirupsen/logrus"
)
// HookLogger routes the logrus logs through the service logger so that they end up in the Windows Event Viewer
// logrus output will be discarded
func HookLogger(l *logrus.Logger) {
l.AddHook(newLogHook(logger))
l.SetOutput(ioutil.Discard)
}
type logHook struct {
sl service.Logger
}
func newLogHook(sl service.Logger) *logHook {
return &logHook{sl: sl}
}
func (h *logHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return h.sl.Error(line)
case logrus.FatalLevel:
return h.sl.Error(line)
case logrus.ErrorLevel:
return h.sl.Error(line)
case logrus.WarnLevel:
return h.sl.Warning(line)
case logrus.InfoLevel:
return h.sl.Info(line)
case logrus.DebugLevel:
return h.sl.Info(line)
default:
return nil
}
}
func (h *logHook) Levels() []logrus.Level {
return logrus.AllLevels
}

View File

@@ -5,7 +5,10 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/util"
) )
// A version string that can be set with // A version string that can be set with
@@ -45,5 +48,31 @@ func main() {
os.Exit(1) os.Exit(1)
} }
nebula.Main(*configPath, *configTest, Build) l := logrus.New()
l.Out = os.Stdout
c := config.NewC(l)
err := c.Load(*configPath)
if err != nil {
fmt.Printf("failed to load config: %s", err)
os.Exit(1)
}
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
switch v := err.(type) {
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
os.Exit(1)
}
if !*configTest {
ctrl.Start()
ctrl.ShutdownBlock()
}
os.Exit(0)
} }

View File

@@ -1,44 +1,55 @@
package main package main
import ( import (
"fmt"
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"github.com/kardianos/service" "github.com/kardianos/service"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
) )
var logger service.Logger var logger service.Logger
type program struct { type program struct {
exit chan struct{}
configPath *string configPath *string
configTest *bool configTest *bool
build string build string
control *nebula.Control
} }
func (p *program) Start(s service.Service) error { func (p *program) Start(s service.Service) error {
logger.Info("Nebula service starting.")
p.exit = make(chan struct{})
// Start should not block. // Start should not block.
go p.run() logger.Info("Nebula service starting.")
return nil
}
func (p *program) run() error { l := logrus.New()
nebula.Main(*p.configPath, *p.configTest, Build) HookLogger(l)
c := config.NewC(l)
err := c.Load(*p.configPath)
if err != nil {
return fmt.Errorf("failed to load config: %s", err)
}
p.control, err = nebula.Main(c, *p.configTest, Build, l, nil)
if err != nil {
return err
}
p.control.Start()
return nil return nil
} }
func (p *program) Stop(s service.Service) error { func (p *program) Stop(s service.Service) error {
logger.Info("Nebula service stopping.") logger.Info("Nebula service stopping.")
close(p.exit) p.control.Stop()
return nil return nil
} }
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {
if *configPath == "" { if *configPath == "" {
ex, err := os.Executable() ex, err := os.Executable()
if err != nil { if err != nil {
@@ -60,6 +71,10 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
build: build, build: build,
} }
// Here are what the different loggers are doing:
// - `log` is the standard go log utility, meant to be used while the process is still attached to stdout/stderr
// - `logger` is the service log utility that may be attached to a special place depending on OS (Windows will have it attached to the event log)
// - above, in `Run` we create a `logrus.Logger` which is what nebula expects to use
s, err := service.New(prg, svcConfig) s, err := service.New(prg, svcConfig)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@@ -75,6 +90,7 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
for { for {
err := <-errs err := <-errs
if err != nil { if err != nil {
// Route any errors from the system logger to stdout as a best effort to notice issues there
log.Print(err) log.Print(err)
} }
} }
@@ -84,6 +100,7 @@ func doService(configPath *string, configTest *bool, build string, serviceFlag *
case "run": case "run":
err = s.Run() err = s.Run()
if err != nil { if err != nil {
// Route any errors to the system logger
logger.Error(err) logger.Error(err)
} }
default: default:

View File

@@ -5,7 +5,10 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/util"
) )
// A version string that can be set with // A version string that can be set with
@@ -39,5 +42,31 @@ func main() {
os.Exit(1) os.Exit(1)
} }
nebula.Main(*configPath, *configTest, Build) l := logrus.New()
l.Out = os.Stdout
c := config.NewC(l)
err := c.Load(*configPath)
if err != nil {
fmt.Printf("failed to load config: %s", err)
os.Exit(1)
}
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
switch v := err.(type) {
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
os.Exit(1)
}
if !*configTest {
ctrl.Start()
ctrl.ShutdownBlock()
}
os.Exit(0)
} }

View File

@@ -1,10 +1,9 @@
package nebula package config
import ( import (
"context"
"errors"
"fmt" "fmt"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"io/ioutil" "io/ioutil"
"os" "os"
"os/signal" "os/signal"
@@ -12,30 +11,38 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"time" "time"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
) )
type Config struct { type C struct {
path string path string
files []string files []string
Settings map[interface{}]interface{} Settings map[interface{}]interface{}
oldSettings map[interface{}]interface{} oldSettings map[interface{}]interface{}
callbacks []func(*Config) callbacks []func(*C)
l *logrus.Logger
reloadLock sync.Mutex
} }
func NewConfig() *Config { func NewC(l *logrus.Logger) *C {
return &Config{ return &C{
Settings: make(map[interface{}]interface{}), Settings: make(map[interface{}]interface{}),
l: l,
} }
} }
// Load will find all yaml files within path and load them in lexical order // Load will find all yaml files within path and load them in lexical order
func (c *Config) Load(path string) error { func (c *C) Load(path string) error {
c.path = path c.path = path
c.files = make([]string, 0) c.files = make([]string, 0)
err := c.resolve(path) err := c.resolve(path, true)
if err != nil { if err != nil {
return err return err
} }
@@ -54,20 +61,32 @@ func (c *Config) Load(path string) error {
return nil return nil
} }
func (c *C) LoadString(raw string) error {
if raw == "" {
return errors.New("Empty configuration")
}
return c.parseRaw([]byte(raw))
}
// RegisterReloadCallback stores a function to be called when a config reload is triggered. The functions registered // RegisterReloadCallback stores a function to be called when a config reload is triggered. The functions registered
// here should decide if they need to make a change to the current process before making the change. HasChanged can be // here should decide if they need to make a change to the current process before making the change. HasChanged can be
// used to help decide if a change is necessary. // used to help decide if a change is necessary.
// These functions should return quickly or spawn their own go routine if they will take a while // These functions should return quickly or spawn their own go routine if they will take a while
func (c *Config) RegisterReloadCallback(f func(*Config)) { func (c *C) RegisterReloadCallback(f func(*C)) {
c.callbacks = append(c.callbacks, f) c.callbacks = append(c.callbacks, f)
} }
// InitialLoad returns true if this is the first load of the config, and ReloadConfig has not been called yet.
func (c *C) InitialLoad() bool {
return c.oldSettings == nil
}
// HasChanged checks if the underlying structure of the provided key has changed after a config reload. The value of // HasChanged checks if the underlying structure of the provided key has changed after a config reload. The value of
// k in both the old and new settings will be serialized, the result of the string comparison is returned. // k in both the old and new settings will be serialized, the result of the string comparison is returned.
// If k is an empty string the entire config is tested. // If k is an empty string the entire config is tested.
// It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating // It's important to note that this is very rudimentary and susceptible to configuration ordering issues indicating
// there is change when there actually wasn't any. // there is change when there actually wasn't any.
func (c *Config) HasChanged(k string) bool { func (c *C) HasChanged(k string) bool {
if c.oldSettings == nil { if c.oldSettings == nil {
return false return false
} }
@@ -88,12 +107,12 @@ func (c *Config) HasChanged(k string) bool {
newVals, err := yaml.Marshal(nv) newVals, err := yaml.Marshal(nv)
if err != nil { if err != nil {
l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config") c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling new config")
} }
oldVals, err := yaml.Marshal(ov) oldVals, err := yaml.Marshal(ov)
if err != nil { if err != nil {
l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config") c.l.WithField("config_path", k).WithError(err).Error("Error while marshaling old config")
} }
return string(newVals) != string(oldVals) return string(newVals) != string(oldVals)
@@ -101,19 +120,29 @@ func (c *Config) HasChanged(k string) bool {
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the // CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
// original path provided to Load. The old settings are shallow copied for change detection after the reload. // original path provided to Load. The old settings are shallow copied for change detection after the reload.
func (c *Config) CatchHUP() { func (c *C) CatchHUP(ctx context.Context) {
ch := make(chan os.Signal, 1) ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP) signal.Notify(ch, syscall.SIGHUP)
go func() { go func() {
for range ch { for {
l.Info("Caught HUP, reloading config") select {
c.ReloadConfig() case <-ctx.Done():
signal.Stop(ch)
close(ch)
return
case <-ch:
c.l.Info("Caught HUP, reloading config")
c.ReloadConfig()
}
} }
}() }()
} }
func (c *Config) ReloadConfig() { func (c *C) ReloadConfig() {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{}) c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings { for k, v := range c.Settings {
c.oldSettings[k] = v c.oldSettings[k] = v
@@ -121,7 +150,7 @@ func (c *Config) ReloadConfig() {
err := c.Load(c.path) err := c.Load(c.path)
if err != nil { if err != nil {
l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config") c.l.WithField("config_path", c.path).WithError(err).Error("Error occurred while reloading config")
return return
} }
@@ -130,8 +159,29 @@ func (c *Config) ReloadConfig() {
} }
} }
func (c *C) ReloadConfigString(raw string) error {
c.reloadLock.Lock()
defer c.reloadLock.Unlock()
c.oldSettings = make(map[interface{}]interface{})
for k, v := range c.Settings {
c.oldSettings[k] = v
}
err := c.LoadString(raw)
if err != nil {
return err
}
for _, v := range c.callbacks {
v(c)
}
return nil
}
// GetString will get the string for k or return the default d if not found or invalid // GetString will get the string for k or return the default d if not found or invalid
func (c *Config) GetString(k, d string) string { func (c *C) GetString(k, d string) string {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -141,7 +191,7 @@ func (c *Config) GetString(k, d string) string {
} }
// GetStringSlice will get the slice of strings for k or return the default d if not found or invalid // GetStringSlice will get the slice of strings for k or return the default d if not found or invalid
func (c *Config) GetStringSlice(k string, d []string) []string { func (c *C) GetStringSlice(k string, d []string) []string {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -161,7 +211,7 @@ func (c *Config) GetStringSlice(k string, d []string) []string {
} }
// GetMap will get the map for k or return the default d if not found or invalid // GetMap will get the map for k or return the default d if not found or invalid
func (c *Config) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} { func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
r := c.Get(k) r := c.Get(k)
if r == nil { if r == nil {
return d return d
@@ -176,7 +226,7 @@ func (c *Config) GetMap(k string, d map[interface{}]interface{}) map[interface{}
} }
// GetInt will get the int for k or return the default d if not found or invalid // GetInt will get the int for k or return the default d if not found or invalid
func (c *Config) GetInt(k string, d int) int { func (c *C) GetInt(k string, d int) int {
r := c.GetString(k, strconv.Itoa(d)) r := c.GetString(k, strconv.Itoa(d))
v, err := strconv.Atoi(r) v, err := strconv.Atoi(r)
if err != nil { if err != nil {
@@ -187,7 +237,7 @@ func (c *Config) GetInt(k string, d int) int {
} }
// GetBool will get the bool for k or return the default d if not found or invalid // GetBool will get the bool for k or return the default d if not found or invalid
func (c *Config) GetBool(k string, d bool) bool { func (c *C) GetBool(k string, d bool) bool {
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d))) r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
v, err := strconv.ParseBool(r) v, err := strconv.ParseBool(r)
if err != nil { if err != nil {
@@ -204,7 +254,7 @@ func (c *Config) GetBool(k string, d bool) bool {
} }
// GetDuration will get the duration for k or return the default d if not found or invalid // GetDuration will get the duration for k or return the default d if not found or invalid
func (c *Config) GetDuration(k string, d time.Duration) time.Duration { func (c *C) GetDuration(k string, d time.Duration) time.Duration {
r := c.GetString(k, "") r := c.GetString(k, "")
v, err := time.ParseDuration(r) v, err := time.ParseDuration(r)
if err != nil { if err != nil {
@@ -213,11 +263,15 @@ func (c *Config) GetDuration(k string, d time.Duration) time.Duration {
return v return v
} }
func (c *Config) Get(k string) interface{} { func (c *C) Get(k string) interface{} {
return c.get(k, c.Settings) return c.get(k, c.Settings)
} }
func (c *Config) get(k string, v interface{}) interface{} { func (c *C) IsSet(k string) bool {
return c.get(k, c.Settings) != nil
}
func (c *C) get(k string, v interface{}) interface{} {
parts := strings.Split(k, ".") parts := strings.Split(k, ".")
for _, p := range parts { for _, p := range parts {
m, ok := v.(map[interface{}]interface{}) m, ok := v.(map[interface{}]interface{})
@@ -234,14 +288,16 @@ func (c *Config) get(k string, v interface{}) interface{} {
return v return v
} }
func (c *Config) resolve(path string) error { // direct signifies if this is the config path directly specified by the user,
// versus a file/dir found by recursing into that path
func (c *C) resolve(path string, direct bool) error {
i, err := os.Stat(path) i, err := os.Stat(path)
if err != nil { if err != nil {
return nil return nil
} }
if !i.IsDir() { if !i.IsDir() {
c.addFile(path) c.addFile(path, direct)
return nil return nil
} }
@@ -251,7 +307,7 @@ func (c *Config) resolve(path string) error {
} }
for _, p := range paths { for _, p := range paths {
err := c.resolve(filepath.Join(path, p)) err := c.resolve(filepath.Join(path, p), false)
if err != nil { if err != nil {
return err return err
} }
@@ -260,10 +316,10 @@ func (c *Config) resolve(path string) error {
return nil return nil
} }
func (c *Config) addFile(path string) error { func (c *C) addFile(path string, direct bool) error {
ext := filepath.Ext(path) ext := filepath.Ext(path)
if ext != ".yaml" && ext != ".yml" { if !direct && ext != ".yaml" && ext != ".yml" {
return nil return nil
} }
@@ -276,7 +332,19 @@ func (c *Config) addFile(path string) error {
return nil return nil
} }
func (c *Config) parse() error { func (c *C) parseRaw(b []byte) error {
var m map[interface{}]interface{}
err := yaml.Unmarshal(b, &m)
if err != nil {
return err
}
c.Settings = m
return nil
}
func (c *C) parse() error {
var m map[interface{}]interface{} var m map[interface{}]interface{}
for _, path := range c.files { for _, path := range c.files {
@@ -319,24 +387,3 @@ func readDirNames(path string) ([]string, error) {
sort.Strings(paths) sort.Strings(paths)
return paths, nil return paths, nil
} }
func configLogger(c *Config) error {
// set up our logging level
logLevel, err := logrus.ParseLevel(strings.ToLower(c.GetString("logging.level", "info")))
if err != nil {
return fmt.Errorf("%s; possible levels: %s", err, logrus.AllLevels)
}
l.SetLevel(logLevel)
logFormat := strings.ToLower(c.GetString("logging.format", "text"))
switch logFormat {
case "text":
l.Formatter = &logrus.TextFormatter{}
case "json":
l.Formatter = &logrus.JSONFormatter{}
default:
return fmt.Errorf("unknown log format `%s`. possible formats: %s", logFormat, []string{"text", "json"})
}
return nil
}

View File

@@ -1,23 +1,26 @@
package nebula package config
import ( import (
"github.com/stretchr/testify/assert"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert"
) )
func TestConfig_Load(t *testing.T) { func TestConfig_Load(t *testing.T) {
l := test.NewLogger()
dir, err := ioutil.TempDir("", "config-test") dir, err := ioutil.TempDir("", "config-test")
// invalid yaml // invalid yaml
c := NewConfig() c := NewC(l)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}") assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
// simple multi config merge // simple multi config merge
c = NewConfig() c = NewC(l)
os.RemoveAll(dir) os.RemoveAll(dir)
os.Mkdir(dir, 0755) os.Mkdir(dir, 0755)
@@ -39,8 +42,9 @@ func TestConfig_Load(t *testing.T) {
} }
func TestConfig_Get(t *testing.T) { func TestConfig_Get(t *testing.T) {
l := test.NewLogger()
// test simple type // test simple type
c := NewConfig() c := NewC(l)
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"} c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
assert.Equal(t, "hi", c.Get("firewall.outbound")) assert.Equal(t, "hi", c.Get("firewall.outbound"))
@@ -54,13 +58,15 @@ func TestConfig_Get(t *testing.T) {
} }
func TestConfig_GetStringSlice(t *testing.T) { func TestConfig_GetStringSlice(t *testing.T) {
c := NewConfig() l := test.NewLogger()
c := NewC(l)
c.Settings["slice"] = []interface{}{"one", "two"} c.Settings["slice"] = []interface{}{"one", "two"}
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{})) assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
} }
func TestConfig_GetBool(t *testing.T) { func TestConfig_GetBool(t *testing.T) {
c := NewConfig() l := test.NewLogger()
c := NewC(l)
c.Settings["bool"] = true c.Settings["bool"] = true
assert.Equal(t, true, c.GetBool("bool", false)) assert.Equal(t, true, c.GetBool("bool", false))
@@ -87,20 +93,21 @@ func TestConfig_GetBool(t *testing.T) {
} }
func TestConfig_HasChanged(t *testing.T) { func TestConfig_HasChanged(t *testing.T) {
l := test.NewLogger()
// No reload has occurred, return false // No reload has occurred, return false
c := NewConfig() c := NewC(l)
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
assert.False(t, c.HasChanged("")) assert.False(t, c.HasChanged(""))
// Test key change // Test key change
c = NewConfig() c = NewC(l)
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
c.oldSettings = map[interface{}]interface{}{"test": "no"} c.oldSettings = map[interface{}]interface{}{"test": "no"}
assert.True(t, c.HasChanged("test")) assert.True(t, c.HasChanged("test"))
assert.True(t, c.HasChanged("")) assert.True(t, c.HasChanged(""))
// No key change // No key change
c = NewConfig() c = NewC(l)
c.Settings["test"] = "hi" c.Settings["test"] = "hi"
c.oldSettings = map[interface{}]interface{}{"test": "hi"} c.oldSettings = map[interface{}]interface{}{"test": "hi"}
assert.False(t, c.HasChanged("test")) assert.False(t, c.HasChanged("test"))
@@ -108,12 +115,13 @@ func TestConfig_HasChanged(t *testing.T) {
} }
func TestConfig_ReloadConfig(t *testing.T) { func TestConfig_ReloadConfig(t *testing.T) {
l := test.NewLogger()
done := make(chan bool, 1) done := make(chan bool, 1)
dir, err := ioutil.TempDir("", "config-test") dir, err := ioutil.TempDir("", "config-test")
assert.Nil(t, err) assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
c := NewConfig() c := NewC(l)
assert.Nil(t, c.Load(dir)) assert.Nil(t, c.Load(dir))
assert.False(t, c.HasChanged("outer.inner")) assert.False(t, c.HasChanged("outer.inner"))
@@ -122,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644) ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
c.RegisterReloadCallback(func(c *Config) { c.RegisterReloadCallback(func(c *C) {
done <- true done <- true
}) })

View File

@@ -1,10 +1,13 @@
package nebula package nebula
import ( import (
"context"
"sync" "sync"
"time" "time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
) )
// TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet // TODO: incount and outcount are intended as a shortcut to locking the mutexes for every single packet
@@ -12,47 +15,49 @@ import (
type connectionManager struct { type connectionManager struct {
hostMap *HostMap hostMap *HostMap
in map[uint32]struct{} in map[iputil.VpnIp]struct{}
inLock *sync.RWMutex inLock *sync.RWMutex
inCount int inCount int
out map[uint32]struct{} out map[iputil.VpnIp]struct{}
outLock *sync.RWMutex outLock *sync.RWMutex
outCount int outCount int
TrafficTimer *SystemTimerWheel TrafficTimer *SystemTimerWheel
intf *Interface intf *Interface
pendingDeletion map[uint32]int pendingDeletion map[iputil.VpnIp]int
pendingDeletionLock *sync.RWMutex pendingDeletionLock *sync.RWMutex
pendingDeletionTimer *SystemTimerWheel pendingDeletionTimer *SystemTimerWheel
checkInterval int checkInterval int
pendingDeletionInterval int pendingDeletionInterval int
l *logrus.Logger
// I wanted to call one matLock // I wanted to call one matLock
} }
func newConnectionManager(intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager { func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval int) *connectionManager {
nc := &connectionManager{ nc := &connectionManager{
hostMap: intf.hostMap, hostMap: intf.hostMap,
in: make(map[uint32]struct{}), in: make(map[iputil.VpnIp]struct{}),
inLock: &sync.RWMutex{}, inLock: &sync.RWMutex{},
inCount: 0, inCount: 0,
out: make(map[uint32]struct{}), out: make(map[iputil.VpnIp]struct{}),
outLock: &sync.RWMutex{}, outLock: &sync.RWMutex{},
outCount: 0, outCount: 0,
TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), TrafficTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
intf: intf, intf: intf,
pendingDeletion: make(map[uint32]int), pendingDeletion: make(map[iputil.VpnIp]int),
pendingDeletionLock: &sync.RWMutex{}, pendingDeletionLock: &sync.RWMutex{},
pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60), pendingDeletionTimer: NewSystemTimerWheel(time.Millisecond*500, time.Second*60),
checkInterval: checkInterval, checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval, pendingDeletionInterval: pendingDeletionInterval,
l: l,
} }
nc.Start() nc.Start(ctx)
return nc return nc
} }
func (n *connectionManager) In(ip uint32) { func (n *connectionManager) In(ip iputil.VpnIp) {
n.inLock.RLock() n.inLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.in[ip]; ok { if _, ok := n.in[ip]; ok {
@@ -65,7 +70,7 @@ func (n *connectionManager) In(ip uint32) {
n.inLock.Unlock() n.inLock.Unlock()
} }
func (n *connectionManager) Out(ip uint32) { func (n *connectionManager) Out(ip iputil.VpnIp) {
n.outLock.RLock() n.outLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.out[ip]; ok { if _, ok := n.out[ip]; ok {
@@ -84,9 +89,9 @@ func (n *connectionManager) Out(ip uint32) {
n.outLock.Unlock() n.outLock.Unlock()
} }
func (n *connectionManager) CheckIn(vpnIP uint32) bool { func (n *connectionManager) CheckIn(vpnIp iputil.VpnIp) bool {
n.inLock.RLock() n.inLock.RLock()
if _, ok := n.in[vpnIP]; ok { if _, ok := n.in[vpnIp]; ok {
n.inLock.RUnlock() n.inLock.RUnlock()
return true return true
} }
@@ -94,7 +99,7 @@ func (n *connectionManager) CheckIn(vpnIP uint32) bool {
return false return false
} }
func (n *connectionManager) ClearIP(ip uint32) { func (n *connectionManager) ClearIP(ip iputil.VpnIp) {
n.inLock.Lock() n.inLock.Lock()
n.outLock.Lock() n.outLock.Lock()
delete(n.in, ip) delete(n.in, ip)
@@ -103,13 +108,13 @@ func (n *connectionManager) ClearIP(ip uint32) {
n.outLock.Unlock() n.outLock.Unlock()
} }
func (n *connectionManager) ClearPendingDeletion(ip uint32) { func (n *connectionManager) ClearPendingDeletion(ip iputil.VpnIp) {
n.pendingDeletionLock.Lock() n.pendingDeletionLock.Lock()
delete(n.pendingDeletion, ip) delete(n.pendingDeletion, ip)
n.pendingDeletionLock.Unlock() n.pendingDeletionLock.Unlock()
} }
func (n *connectionManager) AddPendingDeletion(ip uint32) { func (n *connectionManager) AddPendingDeletion(ip iputil.VpnIp) {
n.pendingDeletionLock.Lock() n.pendingDeletionLock.Lock()
if _, ok := n.pendingDeletion[ip]; ok { if _, ok := n.pendingDeletion[ip]; ok {
n.pendingDeletion[ip] += 1 n.pendingDeletion[ip] += 1
@@ -120,7 +125,7 @@ func (n *connectionManager) AddPendingDeletion(ip uint32) {
n.pendingDeletionLock.Unlock() n.pendingDeletionLock.Unlock()
} }
func (n *connectionManager) checkPendingDeletion(ip uint32) bool { func (n *connectionManager) checkPendingDeletion(ip iputil.VpnIp) bool {
n.pendingDeletionLock.RLock() n.pendingDeletionLock.RLock()
if _, ok := n.pendingDeletion[ip]; ok { if _, ok := n.pendingDeletion[ip]; ok {
@@ -131,24 +136,34 @@ func (n *connectionManager) checkPendingDeletion(ip uint32) bool {
return false return false
} }
func (n *connectionManager) AddTrafficWatch(vpnIP uint32, seconds int) { func (n *connectionManager) AddTrafficWatch(vpnIp iputil.VpnIp, seconds int) {
n.TrafficTimer.Add(vpnIP, time.Second*time.Duration(seconds)) n.TrafficTimer.Add(vpnIp, time.Second*time.Duration(seconds))
} }
func (n *connectionManager) Start() { func (n *connectionManager) Start(ctx context.Context) {
go n.Run() go n.Run(ctx)
} }
func (n *connectionManager) Run() { func (n *connectionManager) Run(ctx context.Context) {
clockSource := time.Tick(500 * time.Millisecond) clockSource := time.NewTicker(500 * time.Millisecond)
defer clockSource.Stop()
for now := range clockSource { p := []byte("")
n.HandleMonitorTick(now) nb := make([]byte, 12, 12)
n.HandleDeletionTick(now) out := make([]byte, mtu)
for {
select {
case <-ctx.Done():
return
case now := <-clockSource.C:
n.HandleMonitorTick(now, p, nb, out)
n.HandleDeletionTick(now)
}
} }
} }
func (n *connectionManager) HandleMonitorTick(now time.Time) { func (n *connectionManager) HandleMonitorTick(now time.Time, p, nb, out []byte) {
n.TrafficTimer.advance(now) n.TrafficTimer.advance(now)
for { for {
ep := n.TrafficTimer.Purge() ep := n.TrafficTimer.Purge()
@@ -156,44 +171,48 @@ func (n *connectionManager) HandleMonitorTick(now time.Time) {
break break
} }
vpnIP := ep.(uint32) vpnIp := ep.(iputil.VpnIp)
// Check for traffic coming back in from this host. // Check for traffic coming back in from this host.
traf := n.CheckIn(vpnIP) traf := n.CheckIn(vpnIp)
// If we saw incoming packets from this ip, just return hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
if err != nil {
n.l.Debugf("Not found in hostmap: %s", vpnIp)
n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIp)
continue
}
if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
continue
}
// If we saw an incoming packets from this ip and peer's certificate is not
// expired, just ignore.
if traf { if traf {
if l.Level >= logrus.DebugLevel { if n.l.Level >= logrus.DebugLevel {
l.WithField("vpnIp", IntIp(vpnIP)). n.l.WithField("vpnIp", vpnIp).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
n.ClearIP(vpnIP) n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIP) n.ClearPendingDeletion(vpnIp)
continue continue
} }
// If we didn't we may need to probe or destroy the conn hostinfo.logger(n.l).
hostinfo, err := n.hostMap.QueryVpnIP(vpnIP)
if err != nil {
l.Debugf("Not found in hostmap: %s", IntIp(vpnIP))
n.ClearIP(vpnIP)
n.ClearPendingDeletion(vpnIP)
continue
}
l.WithField("vpnIp", IntIp(vpnIP)).
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
if hostinfo != nil && hostinfo.ConnectionState != nil { if hostinfo != nil && hostinfo.ConnectionState != nil {
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
n.intf.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) n.intf.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, p, nb, out)
} else { } else {
l.Debugf("Hostinfo sadness: %s", IntIp(vpnIP)) hostinfo.logger(n.l).Debugf("Hostinfo sadness: %s", vpnIp)
} }
n.AddPendingDeletion(vpnIP) n.AddPendingDeletion(vpnIp)
} }
} }
@@ -206,49 +225,85 @@ func (n *connectionManager) HandleDeletionTick(now time.Time) {
break break
} }
vpnIP := ep.(uint32) vpnIp := ep.(iputil.VpnIp)
// If we saw incoming packets from this ip, just return hostinfo, err := n.hostMap.QueryVpnIp(vpnIp)
traf := n.CheckIn(vpnIP) if err != nil {
if traf { n.l.Debugf("Not found in hostmap: %s", vpnIp)
l.WithField("vpnIp", IntIp(vpnIP)). n.ClearIP(vpnIp)
WithField("tunnelCheck", m{"state": "alive", "method": "active"}). n.ClearPendingDeletion(vpnIp)
Debug("Tunnel status")
n.ClearIP(vpnIP)
n.ClearPendingDeletion(vpnIP)
continue continue
} }
hostinfo, err := n.hostMap.QueryVpnIP(vpnIP) if n.handleInvalidCertificate(now, vpnIp, hostinfo) {
if err != nil { continue
n.ClearIP(vpnIP) }
n.ClearPendingDeletion(vpnIP)
l.Debugf("Not found in hostmap: %s", IntIp(vpnIP)) // If we saw an incoming packets from this ip and peer's certificate is not
// expired, just ignore.
traf := n.CheckIn(vpnIp)
if traf {
n.l.WithField("vpnIp", vpnIp).
WithField("tunnelCheck", m{"state": "alive", "method": "active"}).
Debug("Tunnel status")
n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIp)
continue continue
} }
// If it comes around on deletion wheel and hasn't resolved itself, delete // If it comes around on deletion wheel and hasn't resolved itself, delete
if n.checkPendingDeletion(vpnIP) { if n.checkPendingDeletion(vpnIp) {
cn := "" cn := ""
if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil { if hostinfo.ConnectionState != nil && hostinfo.ConnectionState.peerCert != nil {
cn = hostinfo.ConnectionState.peerCert.Details.Name cn = hostinfo.ConnectionState.peerCert.Details.Name
} }
l.WithField("vpnIp", IntIp(vpnIP)). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
WithField("certName", cn). WithField("certName", cn).
Info("Tunnel status") Info("Tunnel status")
n.ClearIP(vpnIP) n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIP) n.ClearPendingDeletion(vpnIp)
// TODO: This is only here to let tests work. Should do proper mocking // TODO: This is only here to let tests work. Should do proper mocking
if n.intf.lightHouse != nil { if n.intf.lightHouse != nil {
n.intf.lightHouse.DeleteVpnIP(vpnIP) n.intf.lightHouse.DeleteVpnIp(vpnIp)
} }
n.hostMap.DeleteVpnIP(vpnIP) n.hostMap.DeleteHostInfo(hostinfo)
n.hostMap.DeleteIndex(hostinfo.localIndexId)
} else { } else {
n.ClearIP(vpnIP) n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIP) n.ClearPendingDeletion(vpnIp)
} }
} }
} }
// handleInvalidCertificates will destroy a tunnel if pki.disconnect_invalid is true and the certificate is no longer valid
func (n *connectionManager) handleInvalidCertificate(now time.Time, vpnIp iputil.VpnIp, hostinfo *HostInfo) bool {
if !n.intf.disconnectInvalid {
return false
}
remoteCert := hostinfo.GetCert()
if remoteCert == nil {
return false
}
valid, err := remoteCert.Verify(now, n.intf.caPool)
if valid {
return false
}
fingerprint, _ := remoteCert.Sha256Sum()
n.l.WithField("vpnIp", vpnIp).WithError(err).
WithField("certName", remoteCert.Details.Name).
WithField("fingerprint", fingerprint).
Info("Remote certificate is no longer valid, tearing down the tunnel")
// Inform the remote and close the tunnel locally
n.intf.sendCloseTunnel(hostinfo)
n.intf.closeTunnel(hostinfo)
n.ClearIP(vpnIp)
n.ClearPendingDeletion(vpnIp)
return true
}

View File

@@ -1,26 +1,33 @@
package nebula package nebula
import ( import (
"context"
"crypto/ed25519"
"crypto/rand"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
var vpnIP uint32 var vpnIp iputil.VpnIp
func Test_NewConnectionManagerTest(t *testing.T) { func Test_NewConnectionManagerTest(t *testing.T) {
l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
vpnIP = ip2int(net.ParseIP("172.1.1.2")) vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap("test", vpncidr, preferredRanges) hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, rawCertificate: []byte{},
privateKey: []byte{}, privateKey: []byte{},
@@ -28,62 +35,68 @@ func Test_NewConnectionManagerTest(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false) lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &Tun{}, inside: &test.NoopTun{},
outside: &udpConn{}, outside: &udp.Conn{},
certState: cs, certState: cs,
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}), handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
l: l,
} }
now := time.Now() now := time.Now()
// Create manager // Create manager
nc := newConnectionManager(ifce, 5, 10) ctx, cancel := context.WithCancel(context.Background())
nc.HandleMonitorTick(now) defer cancel()
nc := newConnectionManager(ctx, l, ifce, 5, 10)
p := []byte("")
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
nc.HandleMonitorTick(now, p, nb, out)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
hostinfo := nc.hostMap.AddVpnIP(vpnIP) hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
messageCounter: new(uint64),
} }
// We saw traffic out to vpnIP // We saw traffic out to vpnIp
nc.Out(vpnIP) nc.Out(vpnIp)
assert.NotContains(t, nc.pendingDeletion, vpnIP) assert.NotContains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIP) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
// Move ahead 5s. Nothing should happen // Move ahead 5s. Nothing should happen
next_tick := now.Add(5 * time.Second) next_tick := now.Add(5 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// Move ahead 6s. We haven't heard back // Move ahead 6s. We haven't heard back
next_tick = now.Add(6 * time.Second) next_tick = now.Add(6 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion // This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIP) assert.Contains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIP) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
// Move ahead some more // Move ahead some more
next_tick = now.Add(45 * time.Second) next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// The host should be evicted // The host should be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIP) assert.NotContains(t, nc.pendingDeletion, vpnIp)
assert.NotContains(t, nc.hostMap.Hosts, vpnIP) assert.NotContains(t, nc.hostMap.Hosts, vpnIp)
} }
func Test_NewConnectionManagerTest2(t *testing.T) { func Test_NewConnectionManagerTest2(t *testing.T) {
l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap("test", vpncidr, preferredRanges) hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, rawCertificate: []byte{},
privateKey: []byte{}, privateKey: []byte{},
@@ -91,52 +104,152 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
rawCertificateNoKey: []byte{}, rawCertificateNoKey: []byte{},
} }
lh := NewLightHouse(false, 0, []uint32{}, 1000, 0, &udpConn{}, false) lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &Tun{}, inside: &test.NoopTun{},
outside: &udpConn{}, outside: &udp.Conn{},
certState: cs, certState: cs,
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(vpncidr, preferredRanges, hostMap, lh, &udpConn{}), handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
l: l,
} }
now := time.Now() now := time.Now()
// Create manager // Create manager
nc := newConnectionManager(ifce, 5, 10) ctx, cancel := context.WithCancel(context.Background())
nc.HandleMonitorTick(now) defer cancel()
nc := newConnectionManager(ctx, l, ifce, 5, 10)
p := []byte("")
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
nc.HandleMonitorTick(now, p, nb, out)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
hostinfo := nc.hostMap.AddVpnIP(vpnIP) hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, certState: cs,
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
messageCounter: new(uint64),
} }
// We saw traffic out to vpnIP // We saw traffic out to vpnIp
nc.Out(vpnIP) nc.Out(vpnIp)
assert.NotContains(t, nc.pendingDeletion, vpnIP) assert.NotContains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIP) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
// Move ahead 5s. Nothing should happen // Move ahead 5s. Nothing should happen
next_tick := now.Add(5 * time.Second) next_tick := now.Add(5 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// Move ahead 6s. We haven't heard back // Move ahead 6s. We haven't heard back
next_tick = now.Add(6 * time.Second) next_tick = now.Add(6 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// This host should now be up for deletion // This host should now be up for deletion
assert.Contains(t, nc.pendingDeletion, vpnIP) assert.Contains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIP) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
// We heard back this time // We heard back this time
nc.In(vpnIP) nc.In(vpnIp)
// Move ahead some more // Move ahead some more
next_tick = now.Add(45 * time.Second) next_tick = now.Add(45 * time.Second)
nc.HandleMonitorTick(next_tick) nc.HandleMonitorTick(next_tick, p, nb, out)
nc.HandleDeletionTick(next_tick) nc.HandleDeletionTick(next_tick)
// The host should be evicted // The host should be evicted
assert.NotContains(t, nc.pendingDeletion, vpnIP) assert.NotContains(t, nc.pendingDeletion, vpnIp)
assert.Contains(t, nc.hostMap.Hosts, vpnIP) assert.Contains(t, nc.hostMap.Hosts, vpnIp)
} }
// Check if we can disconnect the peer.
// Validate if the peer's certificate is invalid (expired, etc.)
// Disconnect only if disconnectInvalid: true is set.
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
now := time.Now()
l := test.NewLogger()
ipNet := net.IPNet{
IP: net.IPv4(172, 1, 1, 2),
Mask: net.IPMask{255, 255, 255, 0},
}
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange}
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
// Generate keys for CA and peer's cert.
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
caCert := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "ca",
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
IsCA: true,
PublicKey: pubCA,
},
}
caCert.Sign(privCA)
ncp := &cert.NebulaCAPool{
CAs: cert.NewCAPool().CAs,
}
ncp.CAs["ca"] = &caCert
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
peerCert := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host",
Ips: []*net.IPNet{&ipNet},
Subnets: []*net.IPNet{},
NotBefore: now,
NotAfter: now.Add(60 * time.Second),
PublicKey: pubCrt,
IsCA: false,
Issuer: "ca",
},
}
peerCert.Sign(privCA)
cs := &CertState{
rawCertificate: []byte{},
privateKey: []byte{},
certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{},
}
lh := &LightHouse{l: l, atomicStaticList: make(map[iputil.VpnIp]struct{}), atomicLighthouses: make(map[iputil.VpnIp]struct{})}
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.Conn{},
certState: cs,
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
l: l,
disconnectInvalid: true,
caPool: ncp,
}
// Create manager
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nc := newConnectionManager(ctx, l, ifce, 5, 10)
ifce.connectionManager = nc
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{
certState: cs,
peerCert: &peerCert,
H: &noise.HandshakeState{},
}
// Move ahead 45s.
// Check if to disconnect with invalid certificate.
// Should be alive.
nextTick := now.Add(45 * time.Second)
destroyed := nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
assert.False(t, destroyed)
// Move ahead 61s.
// Check if to disconnect with invalid certificate.
// Should be disconnected.
nextTick = now.Add(61 * time.Second)
destroyed = nc.handleInvalidCertificate(nextTick, vpnIp, hostinfo)
assert.True(t, destroyed)
}

View File

@@ -4,28 +4,30 @@ import (
"crypto/rand" "crypto/rand"
"encoding/json" "encoding/json"
"sync" "sync"
"sync/atomic"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
) )
const ReplayWindow = 1024 const ReplayWindow = 1024
type ConnectionState struct { type ConnectionState struct {
eKey *NebulaCipherState eKey *NebulaCipherState
dKey *NebulaCipherState dKey *NebulaCipherState
H *noise.HandshakeState H *noise.HandshakeState
certState *CertState certState *CertState
peerCert *cert.NebulaCertificate peerCert *cert.NebulaCertificate
initiator bool initiator bool
messageCounter *uint64 atomicMessageCounter uint64
window *Bits window *Bits
queueLock sync.Mutex queueLock sync.Mutex
writeLock sync.Mutex writeLock sync.Mutex
ready bool ready bool
} }
func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState { func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256) cs := noise.NewCipherSuite(noise.DH25519, noise.CipherAESGCM, noise.HashSHA256)
if f.cipher == "chachapoly" { if f.cipher == "chachapoly" {
cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256) cs = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
@@ -36,7 +38,7 @@ func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePa
b := NewBits(ReplayWindow) b := NewBits(ReplayWindow)
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss // Clear out bit 0, we never transmit it and we don't want it showing as packet loss
b.Update(0) b.Update(l, 0)
hs, err := noise.NewHandshakeState(noise.Config{ hs, err := noise.NewHandshakeState(noise.Config{
CipherSuite: cs, CipherSuite: cs,
@@ -54,12 +56,11 @@ func (f *Interface) newConnectionState(initiator bool, pattern noise.HandshakePa
// The queue and ready params prevent a counter race that would happen when // The queue and ready params prevent a counter race that would happen when
// sending stored packets and simultaneously accepting new traffic. // sending stored packets and simultaneously accepting new traffic.
ci := &ConnectionState{ ci := &ConnectionState{
H: hs, H: hs,
initiator: initiator, initiator: initiator,
window: b, window: b,
ready: false, ready: false,
certState: curCertState, certState: curCertState,
messageCounter: new(uint64),
} }
return ci return ci
@@ -69,7 +70,7 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
return json.Marshal(m{ return json.Marshal(m{
"certificate": cs.peerCert, "certificate": cs.peerCert,
"initiator": cs.initiator, "initiator": cs.initiator,
"message_counter": cs.messageCounter, "message_counter": atomic.LoadUint64(&cs.atomicMessageCounter),
"ready": cs.ready, "ready": cs.ready,
}) })
} }

247
control.go Normal file
View File

@@ -0,0 +1,247 @@
package nebula
import (
"context"
"net"
"os"
"os/signal"
"sync/atomic"
"syscall"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
)
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
type Control struct {
f *Interface
l *logrus.Logger
cancel context.CancelFunc
sshStart func()
statsStart func()
dnsStart func()
}
type ControlHostInfo struct {
VpnIp net.IP `json:"vpnIp"`
LocalIndex uint32 `json:"localIndex"`
RemoteIndex uint32 `json:"remoteIndex"`
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
CachedPackets int `json:"cachedPackets"`
Cert *cert.NebulaCertificate `json:"cert"`
MessageCounter uint64 `json:"messageCounter"`
CurrentRemote *udp.Addr `json:"currentRemote"`
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"`
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"`
}
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
func (c *Control) Start() {
// Activate the interface
c.f.activate()
// Call all the delayed funcs that waited patiently for the interface to be created.
if c.sshStart != nil {
go c.sshStart()
}
if c.statsStart != nil {
go c.statsStart()
}
if c.dnsStart != nil {
go c.dnsStart()
}
// Start reading packets.
c.f.run()
}
// Stop signals nebula to shutdown, returns after the shutdown is complete
func (c *Control) Stop() {
// Stop the handshakeManager (and other serivces), to prevent new tunnels from
// being created while we're shutting them all down.
c.cancel()
c.CloseAllTunnels(false)
if err := c.f.Close(); err != nil {
c.l.WithError(err).Error("Close interface failed")
}
c.l.Info("Goodbye")
}
// ShutdownBlock will listen for and block on term and interrupt signals, calling Control.Stop() once signalled
func (c *Control) ShutdownBlock() {
sigChan := make(chan os.Signal)
signal.Notify(sigChan, syscall.SIGTERM)
signal.Notify(sigChan, syscall.SIGINT)
rawSig := <-sigChan
sig := rawSig.String()
c.l.WithField("signal", sig).Info("Caught signal, shutting down")
c.Stop()
}
// RebindUDPServer asks the UDP listener to rebind it's listener. Mainly used on mobile clients when interfaces change
func (c *Control) RebindUDPServer() {
_ = c.f.outside.Rebind()
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
c.f.lightHouse.SendUpdate(c.f)
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
c.f.rebindCount++
}
// ListHostmap returns details about the actual or pending (handshaking) hostmap
func (c *Control) ListHostmap(pendingMap bool) []ControlHostInfo {
if pendingMap {
return listHostMap(c.f.handshakeManager.pendingHostMap)
} else {
return listHostMap(c.f.hostMap)
}
}
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
var hm *HostMap
if pending {
hm = c.f.handshakeManager.pendingHostMap
} else {
hm = c.f.hostMap
}
h, err := hm.QueryVpnIp(vpnIp)
if err != nil {
return nil
}
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
return &ch
}
// SetRemoteForTunnel forces a tunnel to use a specific remote
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
if err != nil {
return nil
}
hostInfo.SetRemote(addr.Copy())
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
return &ch
}
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
if err != nil {
return false
}
if !localOnly {
c.f.send(
header.CloseTunnel,
0,
hostInfo.ConnectionState,
hostInfo,
[]byte{},
make([]byte, 12, 12),
make([]byte, mtu),
)
}
c.f.closeTunnel(hostInfo)
return true
}
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels
// the int returned is a count of tunnels closed
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
lighthouses := c.f.lightHouse.GetLighthouses()
shutdown := func(h *HostInfo) {
if excludeLighthouses {
if _, ok := lighthouses[h.vpnIp]; ok {
return
}
}
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
c.f.closeTunnel(h)
c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).
Debug("Sending close tunnel message")
closed++
}
// Learn which hosts are being used as relays, so we can shut them down last.
relayingHosts := map[iputil.VpnIp]*HostInfo{}
// Grab the hostMap lock to access the Relays map
c.f.hostMap.Lock()
for _, relayingHost := range c.f.hostMap.Relays {
relayingHosts[relayingHost.vpnIp] = relayingHost
}
c.f.hostMap.Unlock()
hostInfos := []*HostInfo{}
// Grab the hostMap lock to access the Hosts map
c.f.hostMap.Lock()
for _, relayHost := range c.f.hostMap.Hosts {
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
hostInfos = append(hostInfos, relayHost)
}
}
c.f.hostMap.Unlock()
for _, h := range hostInfos {
shutdown(h)
}
for _, h := range relayingHosts {
shutdown(h)
}
return
}
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
chi := ControlHostInfo{
VpnIp: h.vpnIp.ToIP(),
LocalIndex: h.localIndexId,
RemoteIndex: h.remoteIndexId,
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
CachedPackets: len(h.packetStore),
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
}
if h.ConnectionState != nil {
chi.MessageCounter = atomic.LoadUint64(&h.ConnectionState.atomicMessageCounter)
}
if c := h.GetCert(); c != nil {
chi.Cert = c.Copy()
}
if h.remote != nil {
chi.CurrentRemote = h.remote.Copy()
}
return chi
}
func listHostMap(hm *HostMap) []ControlHostInfo {
hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Hosts))
i := 0
for _, v := range hm.Hosts {
hosts[i] = copyHostInfo(v, hm.preferredRanges)
i++
}
hm.RUnlock()
return hosts
}

125
control_test.go Normal file
View File

@@ -0,0 +1,125 @@
package nebula
import (
"net"
"reflect"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert"
)
func TestControl_GetHostInfoByVpnIp(t *testing.T) {
l := test.NewLogger()
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
// To properly ensure we are not exposing core memory to the caller
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
ipNet2 := net.IPNet{
IP: net.ParseIP("1:2:3:4:5:6:7:8"),
Mask: net.IPMask{255, 255, 255, 0},
}
crt := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test",
Ips: []*net.IPNet{&ipNet},
Subnets: []*net.IPNet{},
Groups: []string{"default-group"},
NotBefore: time.Unix(1, 0),
NotAfter: time.Unix(2, 0),
PublicKey: []byte{5, 6, 7, 8},
IsCA: false,
Issuer: "the-issuer",
InvertedGroups: map[string]struct{}{"default-group": {}},
},
Signature: []byte{1, 2, 1, 2, 1, 3},
}
remotes := NewRemoteList()
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
remote: remote1,
remotes: remotes,
ConnectionState: &ConnectionState{
peerCert: crt,
},
remoteIndexId: 200,
localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
})
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
remote: remote1,
remotes: remotes,
ConnectionState: &ConnectionState{
peerCert: nil,
},
remoteIndexId: 200,
localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet2.IP),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
})
c := Control{
f: &Interface{
hostMap: hm,
},
l: logrus.New(),
}
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false)
expectedInfo := ControlHostInfo{
VpnIp: net.IPv4(1, 2, 3, 4).To4(),
LocalIndex: 201,
RemoteIndex: 200,
RemoteAddrs: []*udp.Addr{remote2, remote1},
CachedPackets: 0,
Cert: crt.Copy(),
MessageCounter: 0,
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
CurrentRelaysToMe: []iputil.VpnIp{},
CurrentRelaysThroughMe: []iputil.VpnIp{},
}
// Make sure we don't have any unexpected fields
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
// Make sure we don't panic if the host info doesn't have a cert yet
assert.NotPanics(t, func() {
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false)
})
}
func assertFields(t *testing.T, expected []string, actualStruct interface{}) {
val := reflect.ValueOf(actualStruct).Elem()
fields := make([]string, val.NumField())
for i := 0; i < val.NumField(); i++ {
fields[i] = val.Type().Field(i).Name
}
assert.Equal(t, expected, fields)
}

155
control_tester.go Normal file
View File

@@ -0,0 +1,155 @@
//go:build e2e_testing
// +build e2e_testing
package nebula
import (
"net"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp"
)
// WaitForTypeByIndex will pipe all messages from this control device into the pipeTo control device
// returning after a message matching the criteria has been piped
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
pipeTo.InjectUDPPacket(p)
if h.Type == msgType && h.Subtype == subType {
return
}
}
}
// WaitForTypeByIndex is similar to WaitForType except it adds an index check
// Useful if you have many nodes communicating and want to wait to find a specific nodes packet
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
pipeTo.InjectUDPPacket(p)
if h.RemoteIndex == toIndex && h.Type == msgType && h.Subtype == subType {
return
}
}
}
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
// This is necessary if you did not configure static hosts or are not running a lighthouse
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
remoteList.Lock()
defer remoteList.Unlock()
c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp)
if v4 := toAddr.IP.To4(); v4 != nil {
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
} else {
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
}
}
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
// This is necessary to inform an initiator of possible relays for communicating with a responder
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) {
c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
remoteList.Lock()
defer remoteList.Unlock()
c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp)
uVpnIp := []uint32{}
for _, rVPnIp := range relayVpnIps {
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
}
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
}
// GetFromTun will pull a packet off the tun side of nebula
func (c *Control) GetFromTun(block bool) []byte {
return c.f.inside.(*overlay.TestTun).Get(block)
}
// GetFromUDP will pull a udp packet off the udp side of nebula
func (c *Control) GetFromUDP(block bool) *udp.Packet {
return c.f.outside.Get(block)
}
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
return c.f.outside.TxPackets
}
func (c *Control) GetTunTxChan() <-chan []byte {
return c.f.inside.(*overlay.TestTun).TxPackets
}
// InjectUDPPacket will inject a packet into the udp side of nebula
func (c *Control) InjectUDPPacket(p *udp.Packet) {
c.f.outside.Send(p)
}
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
ip := layers.IPv4{
Version: 4,
TTL: 64,
Protocol: layers.IPProtocolUDP,
SrcIP: c.f.inside.Cidr().IP,
DstIP: toIp,
}
udp := layers.UDP{
SrcPort: layers.UDPPort(fromPort),
DstPort: layers.UDPPort(toPort),
}
err := udp.SetNetworkLayerForChecksum(&ip)
if err != nil {
panic(err)
}
buffer := gopacket.NewSerializeBuffer()
opt := gopacket.SerializeOptions{
ComputeChecksums: true,
FixLengths: true,
}
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data))
if err != nil {
panic(err)
}
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
}
func (c *Control) GetVpnIp() iputil.VpnIp {
return c.f.myVpnIp
}
func (c *Control) GetUDPAddr() string {
return c.f.outside.Addr.String()
}
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
if !ok {
return false
}
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
return true
}

13
dist/arch/nebula.service vendored Normal file
View File

@@ -0,0 +1,13 @@
[Unit]
Description=nebula
Wants=basic.target network-online.target
After=basic.target network.target network-online.target
[Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always
[Install]
WantedBy=multi-user.target

15
dist/fedora/nebula.service vendored Normal file
View File

@@ -0,0 +1,15 @@
[Unit]
Description=Nebula overlay networking tool
After=basic.target network.target network-online.target
Before=sshd.service
Wants=basic.target network-online.target
[Service]
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always
SyslogIdentifier=nebula
[Install]
WantedBy=multi-user.target

84
dist/windows/wintun/LICENSE.txt vendored Normal file
View File

@@ -0,0 +1,84 @@
Prebuilt Binaries License
-------------------------
1. DEFINITIONS. "Software" means the precise contents of the "wintun.dll"
files that are included in the .zip file that contains this document as
downloaded from wintun.net/builds.
2. LICENSE GRANT. WireGuard LLC grants to you a non-exclusive and
non-transferable right to use Software for lawful purposes under certain
obligations and limited rights as set forth in this agreement.
3. RESTRICTIONS. Software is owned and copyrighted by WireGuard LLC. It is
licensed, not sold. Title to Software and all associated intellectual
property rights are retained by WireGuard. You must not:
a. reverse engineer, decompile, disassemble, extract from, or otherwise
modify the Software;
b. modify or create derivative work based upon Software in whole or in
parts, except insofar as only the API interfaces of the "wintun.h" file
distributed alongside the Software (the "Permitted API") are used;
c. remove any proprietary notices, labels, or copyrights from the Software;
d. resell, redistribute, lease, rent, transfer, sublicense, or otherwise
transfer rights of the Software without the prior written consent of
WireGuard LLC, except insofar as the Software is distributed alongside
other software that uses the Software only via the Permitted API;
e. use the name of WireGuard LLC, the WireGuard project, the Wintun
project, or the names of its contributors to endorse or promote products
derived from the Software without specific prior written consent.
4. LIMITED WARRANTY. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF
ANY KIND. WIREGUARD LLC HEREBY EXCLUDES AND DISCLAIMS ALL IMPLIED OR
STATUTORY WARRANTIES, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE, QUALITY, NON-INFRINGEMENT, TITLE, RESULTS,
EFFORTS, OR QUIET ENJOYMENT. THERE IS NO WARRANTY THAT THE PRODUCT WILL BE
ERROR-FREE OR WILL FUNCTION WITHOUT INTERRUPTION. YOU ASSUME THE ENTIRE
RISK FOR THE RESULTS OBTAINED USING THE PRODUCT. TO THE EXTENT THAT
WIREGUARD LLC MAY NOT DISCLAIM ANY WARRANTY AS A MATTER OF APPLICABLE LAW,
THE SCOPE AND DURATION OF SUCH WARRANTY WILL BE THE MINIMUM PERMITTED UNDER
SUCH LAW. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND
WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE OR NON-INFRINGEMENT ARE DISCLAIMED, EXCEPT TO THE
EXTENT THAT THESE DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
5. LIMITATION OF LIABILITY. To the extent not prohibited by law, in no event
WireGuard LLC or any third-party-developer will be liable for any lost
revenue, profit or data or for special, indirect, consequential, incidental
or punitive damages, however caused regardless of the theory of liability,
arising out of or related to the use of or inability to use Software, even
if WireGuard LLC has been advised of the possibility of such damages.
Solely you are responsible for determining the appropriateness of using
Software and accept full responsibility for all risks associated with its
exercise of rights under this agreement, including but not limited to the
risks and costs of program errors, compliance with applicable laws, damage
to or loss of data, programs or equipment, and unavailability or
interruption of operations. The foregoing limitations will apply even if
the above stated warranty fails of its essential purpose. You acknowledge,
that it is in the nature of software that software is complex and not
completely free of errors. In no event shall WireGuard LLC or any
third-party-developer be liable to you under any theory for any damages
suffered by you or any user of Software or for any special, incidental,
indirect, consequential or similar damages (including without limitation
damages for loss of business profits, business interruption, loss of
business information or any other pecuniary loss) arising out of the use or
inability to use Software, even if WireGuard LLC has been advised of the
possibility of such damages and regardless of the legal or quitable theory
(contract, tort, or otherwise) upon which the claim is based.
6. TERMINATION. This agreement is affected until terminated. You may
terminate this agreement at any time. This agreement will terminate
immediately without notice from WireGuard LLC if you fail to comply with
the terms and conditions of this agreement. Upon termination, you must
delete Software and all copies of Software and cease all forms of
distribution of Software.
7. SEVERABILITY. If any provision of this agreement is held to be
unenforceable, this agreement will remain in effect with the provision
omitted, unless omission would frustrate the intent of the parties, in
which case this agreement will immediately terminate.
8. RESERVATION OF RIGHTS. All rights not expressly granted in this agreement
are reserved by WireGuard LLC. For example, WireGuard LLC reserves the
right at any time to cease development of Software, to alter distribution
details, features, specifications, capabilities, functions, licensing
terms, release dates, APIs, ABIs, general availability, or other
characteristics of the Software.

339
dist/windows/wintun/README.md vendored Normal file
View File

@@ -0,0 +1,339 @@
# [Wintun Network Adapter](https://www.wintun.net/)
### TUN Device Driver for Windows
This is a layer 3 TUN driver for Windows 7, 8, 8.1, and 10. Originally created for [WireGuard](https://www.wireguard.com/), it is intended to be useful to a wide variety of projects that require layer 3 tunneling devices with implementations primarily in userspace.
## Installation
Wintun is deployed as a platform-specific `wintun.dll` file. Install the `wintun.dll` file side-by-side with your application. Download the dll from [wintun.net](https://www.wintun.net/), alongside the header file for your application described below.
## Usage
Include the [`wintun.h` file](https://git.zx2c4.com/wintun/tree/api/wintun.h) in your project simply by copying it there and dynamically load the `wintun.dll` using [`LoadLibraryEx()`](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-loadlibraryexa) and [`GetProcAddress()`](https://docs.microsoft.com/en-us/windows/win32/api/libloaderapi/nf-libloaderapi-getprocaddress) to resolve each function, using the typedefs provided in the header file. The [`InitializeWintun` function in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c) provides this in a function that you can simply copy and paste.
With the library setup, Wintun can then be used by first creating an adapter, configuring it, and then setting its status to "up". Adapters have names (e.g. "OfficeNet") and types (e.g. "Wintun").
```C
WINTUN_ADAPTER_HANDLE Adapter1 = WintunCreateAdapter(L"OfficeNet", L"Wintun", &SomeFixedGUID1);
WINTUN_ADAPTER_HANDLE Adapter2 = WintunCreateAdapter(L"HomeNet", L"Wintun", &SomeFixedGUID2);
WINTUN_ADAPTER_HANDLE Adapter3 = WintunCreateAdapter(L"Data Center", L"Wintun", &SomeFixedGUID3);
```
After creating an adapter, we can use it by starting a session:
```C
WINTUN_SESSION_HANDLE Session = WintunStartSession(Adapter2, 0x400000);
```
Then, the `WintunAllocateSendPacket` and `WintunSendPacket` functions can be used for sending packets ([used by `SendPackets` in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c)):
```C
BYTE *OutgoingPacket = WintunAllocateSendPacket(Session, PacketDataSize);
if (OutgoingPacket)
{
memcpy(OutgoingPacket, PacketData, PacketDataSize);
WintunSendPacket(Session, OutgoingPacket);
}
else if (GetLastError() != ERROR_BUFFER_OVERFLOW) // Silently drop packets if the ring is full
Log(L"Packet write failed");
```
And the `WintunReceivePacket` and `WintunReleaseReceivePacket` functions can be used for receiving packets ([used by `ReceivePackets` in the example.c code](https://git.zx2c4.com/wintun/tree/example/example.c)):
```C
for (;;)
{
DWORD IncomingPacketSize;
BYTE *IncomingPacket = WintunReceivePacket(Session, &IncomingPacketSize);
if (IncomingPacket)
{
DoSomethingWithPacket(IncomingPacket, IncomingPacketSize);
WintunReleaseReceivePacket(Session, IncomingPacket);
}
else if (GetLastError() == ERROR_NO_MORE_ITEMS)
WaitForSingleObject(WintunGetReadWaitEvent(Session), INFINITE);
else
{
Log(L"Packet read failed");
break;
}
}
```
Some high performance use cases may want to spin on `WintunReceivePackets` for a number of cycles before falling back to waiting on the read-wait event.
You are **highly encouraged** to read the [**example.c short example**](https://git.zx2c4.com/wintun/tree/example/example.c) to see how to put together a simple userspace network tunnel.
The various functions and definitions are [documented in the reference below](#Reference).
## Reference
### Macro Definitions
#### WINTUN\_MAX\_POOL
`#define WINTUN_MAX_POOL 256`
Maximum pool name length including zero terminator
#### WINTUN\_MIN\_RING\_CAPACITY
`#define WINTUN_MIN_RING_CAPACITY 0x20000 /* 128kiB */`
Minimum ring capacity.
#### WINTUN\_MAX\_RING\_CAPACITY
`#define WINTUN_MAX_RING_CAPACITY 0x4000000 /* 64MiB */`
Maximum ring capacity.
#### WINTUN\_MAX\_IP\_PACKET\_SIZE
`#define WINTUN_MAX_IP_PACKET_SIZE 0xFFFF`
Maximum IP packet size
### Typedefs
#### WINTUN\_ADAPTER\_HANDLE
`typedef void* WINTUN_ADAPTER_HANDLE`
A handle representing Wintun adapter
#### WINTUN\_ENUM\_CALLBACK
`typedef BOOL(* WINTUN_ENUM_CALLBACK) (WINTUN_ADAPTER_HANDLE Adapter, LPARAM Param)`
Called by WintunEnumAdapters for each adapter in the pool.
**Parameters**
- *Adapter*: Adapter handle, which will be freed when this function returns.
- *Param*: An application-defined value passed to the WintunEnumAdapters.
**Returns**
Non-zero to continue iterating adapters; zero to stop.
#### WINTUN\_LOGGER\_CALLBACK
`typedef void(* WINTUN_LOGGER_CALLBACK) (WINTUN_LOGGER_LEVEL Level, DWORD64 Timestamp, const WCHAR *Message)`
Called by internal logger to report diagnostic messages
**Parameters**
- *Level*: Message level.
- *Timestamp*: Message timestamp in in 100ns intervals since 1601-01-01 UTC.
- *Message*: Message text.
#### WINTUN\_SESSION\_HANDLE
`typedef void* WINTUN_SESSION_HANDLE`
A handle representing Wintun session
### Enumeration Types
#### WINTUN\_LOGGER\_LEVEL
`enum WINTUN_LOGGER_LEVEL`
Determines the level of logging, passed to WINTUN\_LOGGER\_CALLBACK.
- *WINTUN\_LOG\_INFO*: Informational
- *WINTUN\_LOG\_WARN*: Warning
- *WINTUN\_LOG\_ERR*: Error
Enumerator
### Functions
#### WintunCreateAdapter()
`WINTUN_ADAPTER_HANDLE WintunCreateAdapter (const WCHAR * Name, const WCHAR * TunnelType, const GUID * RequestedGUID)`
Creates a new Wintun adapter.
**Parameters**
- *Name*: The requested name of the adapter. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
- *Name*: Name of the adapter tunnel type. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
- *RequestedGUID*: The GUID of the created network adapter, which then influences NLA generation deterministically. If it is set to NULL, the GUID is chosen by the system at random, and hence a new NLA entry is created for each new adapter. It is called "requested" GUID because the API it uses is completely undocumented, and so there could be minor interesting complications with its usage.
**Returns**
If the function succeeds, the return value is the adapter handle. Must be released with WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunOpenAdapter()
`WINTUN_ADAPTER_HANDLE WintunOpenAdapter (const WCHAR * Name)`
Opens an existing Wintun adapter.
**Parameters**
- *Name*: The requested name of the adapter. Zero-terminated string of up to MAX\_ADAPTER\_NAME-1 characters.
**Returns**
If the function succeeds, the return value is adapter handle. Must be released with WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunCloseAdapter()
`void WintunCloseAdapter (WINTUN_ADAPTER_HANDLE Adapter)`
Releases Wintun adapter resources and, if adapter was created with WintunCreateAdapter, removes adapter.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter.
#### WintunDeleteDriver()
`BOOL WintunDeleteDriver ()`
Deletes the Wintun driver if there are no more adapters in use.
**Returns**
If the function succeeds, the return value is nonzero. If the function fails, the return value is zero. To get extended error information, call GetLastError.
#### WintunGetAdapterLuid()
`void WintunGetAdapterLuid (WINTUN_ADAPTER_HANDLE Adapter, NET_LUID * Luid)`
Returns the LUID of the adapter.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
- *Luid*: Pointer to LUID to receive adapter LUID.
#### WintunGetRunningDriverVersion()
`DWORD WintunGetRunningDriverVersion (void )`
Determines the version of the Wintun driver currently loaded.
**Returns**
If the function succeeds, the return value is the version number. If the function fails, the return value is zero. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_FILE\_NOT\_FOUND Wintun not loaded
#### WintunSetLogger()
`void WintunSetLogger (WINTUN_LOGGER_CALLBACK NewLogger)`
Sets logger callback function.
**Parameters**
- *NewLogger*: Pointer to callback function to use as a new global logger. NewLogger may be called from various threads concurrently. Should the logging require serialization, you must handle serialization in NewLogger. Set to NULL to disable.
#### WintunStartSession()
`WINTUN_SESSION_HANDLE WintunStartSession (WINTUN_ADAPTER_HANDLE Adapter, DWORD Capacity)`
Starts Wintun session.
**Parameters**
- *Adapter*: Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
- *Capacity*: Rings capacity. Must be between WINTUN\_MIN\_RING\_CAPACITY and WINTUN\_MAX\_RING\_CAPACITY (incl.) Must be a power of two.
**Returns**
Wintun session handle. Must be released with WintunEndSession. If the function fails, the return value is NULL. To get extended error information, call GetLastError.
#### WintunEndSession()
`void WintunEndSession (WINTUN_SESSION_HANDLE Session)`
Ends Wintun session.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
#### WintunGetReadWaitEvent()
`HANDLE WintunGetReadWaitEvent (WINTUN_SESSION_HANDLE Session)`
Gets Wintun session's read-wait event handle.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
**Returns**
Pointer to receive event handle to wait for available data when reading. Should WintunReceivePackets return ERROR\_NO\_MORE\_ITEMS (after spinning on it for a while under heavy load), wait for this event to become signaled before retrying WintunReceivePackets. Do not call CloseHandle on this event - it is managed by the session.
#### WintunReceivePacket()
`BYTE* WintunReceivePacket (WINTUN_SESSION_HANDLE Session, DWORD * PacketSize)`
Retrieves one or packet. After the packet content is consumed, call WintunReleaseReceivePacket with Packet returned from this function to release internal buffer. This function is thread-safe.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *PacketSize*: Pointer to receive packet size.
**Returns**
Pointer to layer 3 IPv4 or IPv6 packet. Client may modify its content at will. If the function fails, the return value is NULL. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_HANDLE\_EOF Wintun adapter is terminating; ERROR\_NO\_MORE\_ITEMS Wintun buffer is exhausted; ERROR\_INVALID\_DATA Wintun buffer is corrupt
#### WintunReleaseReceivePacket()
`void WintunReleaseReceivePacket (WINTUN_SESSION_HANDLE Session, const BYTE * Packet)`
Releases internal buffer after the received packet has been processed by the client. This function is thread-safe.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *Packet*: Packet obtained with WintunReceivePacket
#### WintunAllocateSendPacket()
`BYTE* WintunAllocateSendPacket (WINTUN_SESSION_HANDLE Session, DWORD PacketSize)`
Allocates memory for a packet to send. After the memory is filled with packet data, call WintunSendPacket to send and release internal buffer. WintunAllocateSendPacket is thread-safe and the WintunAllocateSendPacket order of calls define the packet sending order.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *PacketSize*: Exact packet size. Must be less or equal to WINTUN\_MAX\_IP\_PACKET\_SIZE.
**Returns**
Returns pointer to memory where to prepare layer 3 IPv4 or IPv6 packet for sending. If the function fails, the return value is NULL. To get extended error information, call GetLastError. Possible errors include the following: ERROR\_HANDLE\_EOF Wintun adapter is terminating; ERROR\_BUFFER\_OVERFLOW Wintun buffer is full;
#### WintunSendPacket()
`void WintunSendPacket (WINTUN_SESSION_HANDLE Session, const BYTE * Packet)`
Sends the packet and releases internal buffer. WintunSendPacket is thread-safe, but the WintunAllocateSendPacket order of calls define the packet sending order. This means the packet is not guaranteed to be sent in the WintunSendPacket yet.
**Parameters**
- *Session*: Wintun session handle obtained with WintunStartSession
- *Packet*: Packet obtained with WintunAllocateSendPacket
## Building
**Do not distribute drivers or files named "Wintun", as they will most certainly clash with official deployments. Instead distribute [`wintun.dll` as downloaded from wintun.net](https://www.wintun.net).**
General requirements:
- [Visual Studio 2019](https://visualstudio.microsoft.com/downloads/) with Windows SDK
- [Windows Driver Kit](https://docs.microsoft.com/en-us/windows-hardware/drivers/download-the-wdk)
`wintun.sln` may be opened in Visual Studio for development and building. Be sure to run `bcdedit /set testsigning on` and then reboot before to enable unsigned driver loading. The default run sequence (F5) in Visual Studio will build the example project and its dependencies.
## License
The entire contents of [the repository](https://git.zx2c4.com/wintun/), including all documentation and example code, is "Copyright © 2018-2021 WireGuard LLC. All Rights Reserved." Source code is licensed under the [GPLv2](COPYING). Prebuilt binaries from [wintun.net](https://www.wintun.net/) are released under a more permissive license suitable for more forms of software contained inside of the .zip files distributed there.

BIN
dist/windows/wintun/bin/amd64/wintun.dll vendored Normal file

Binary file not shown.

BIN
dist/windows/wintun/bin/arm/wintun.dll vendored Normal file

Binary file not shown.

BIN
dist/windows/wintun/bin/arm64/wintun.dll vendored Normal file

Binary file not shown.

BIN
dist/windows/wintun/bin/x86/wintun.dll vendored Normal file

Binary file not shown.

270
dist/windows/wintun/include/wintun.h vendored Normal file
View File

@@ -0,0 +1,270 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Copyright (C) 2018-2021 WireGuard LLC. All Rights Reserved.
*/
#pragma once
#include <winsock2.h>
#include <windows.h>
#include <ipexport.h>
#include <ifdef.h>
#include <ws2ipdef.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef ALIGNED
# if defined(_MSC_VER)
# define ALIGNED(n) __declspec(align(n))
# elif defined(__GNUC__)
# define ALIGNED(n) __attribute__((aligned(n)))
# else
# error "Unable to define ALIGNED"
# endif
#endif
/* MinGW is missing this one, unfortunately. */
#ifndef _Post_maybenull_
# define _Post_maybenull_
#endif
#pragma warning(push)
#pragma warning(disable : 4324) /* structure was padded due to alignment specifier */
/**
* A handle representing Wintun adapter
*/
typedef struct _WINTUN_ADAPTER *WINTUN_ADAPTER_HANDLE;
/**
* Creates a new Wintun adapter.
*
* @param Name The requested name of the adapter. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @param TunnelType Name of the adapter tunnel type. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @param RequestedGUID The GUID of the created network adapter, which then influences NLA generation deterministically.
* If it is set to NULL, the GUID is chosen by the system at random, and hence a new NLA entry is
* created for each new adapter. It is called "requested" GUID because the API it uses is
* completely undocumented, and so there could be minor interesting complications with its usage.
*
* @return If the function succeeds, the return value is the adapter handle. Must be released with
* WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call
* GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_ADAPTER_HANDLE(WINAPI WINTUN_CREATE_ADAPTER_FUNC)
(_In_z_ LPCWSTR Name, _In_z_ LPCWSTR TunnelType, _In_opt_ const GUID *RequestedGUID);
/**
* Opens an existing Wintun adapter.
*
* @param Name The requested name of the adapter. Zero-terminated string of up to MAX_ADAPTER_NAME-1
* characters.
*
* @return If the function succeeds, the return value is the adapter handle. Must be released with
* WintunCloseAdapter. If the function fails, the return value is NULL. To get extended error information, call
* GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_ADAPTER_HANDLE(WINAPI WINTUN_OPEN_ADAPTER_FUNC)(_In_z_ LPCWSTR Name);
/**
* Releases Wintun adapter resources and, if adapter was created with WintunCreateAdapter, removes adapter.
*
* @param Adapter Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter.
*/
typedef VOID(WINAPI WINTUN_CLOSE_ADAPTER_FUNC)(_In_opt_ WINTUN_ADAPTER_HANDLE Adapter);
/**
* Deletes the Wintun driver if there are no more adapters in use.
*
* @return If the function succeeds, the return value is nonzero. If the function fails, the return value is zero. To
* get extended error information, call GetLastError.
*/
typedef _Return_type_success_(return != FALSE)
BOOL(WINAPI WINTUN_DELETE_DRIVER_FUNC)(VOID);
/**
* Returns the LUID of the adapter.
*
* @param Adapter Adapter handle obtained with WintunCreateAdapter or WintunOpenAdapter
*
* @param Luid Pointer to LUID to receive adapter LUID.
*/
typedef VOID(WINAPI WINTUN_GET_ADAPTER_LUID_FUNC)(_In_ WINTUN_ADAPTER_HANDLE Adapter, _Out_ NET_LUID *Luid);
/**
* Determines the version of the Wintun driver currently loaded.
*
* @return If the function succeeds, the return value is the version number. If the function fails, the return value is
* zero. To get extended error information, call GetLastError. Possible errors include the following:
* ERROR_FILE_NOT_FOUND Wintun not loaded
*/
typedef _Return_type_success_(return != 0)
DWORD(WINAPI WINTUN_GET_RUNNING_DRIVER_VERSION_FUNC)(VOID);
/**
* Determines the level of logging, passed to WINTUN_LOGGER_CALLBACK.
*/
typedef enum
{
WINTUN_LOG_INFO, /**< Informational */
WINTUN_LOG_WARN, /**< Warning */
WINTUN_LOG_ERR /**< Error */
} WINTUN_LOGGER_LEVEL;
/**
* Called by internal logger to report diagnostic messages
*
* @param Level Message level.
*
* @param Timestamp Message timestamp in in 100ns intervals since 1601-01-01 UTC.
*
* @param Message Message text.
*/
typedef VOID(CALLBACK *WINTUN_LOGGER_CALLBACK)(
_In_ WINTUN_LOGGER_LEVEL Level,
_In_ DWORD64 Timestamp,
_In_z_ LPCWSTR Message);
/**
* Sets logger callback function.
*
* @param NewLogger Pointer to callback function to use as a new global logger. NewLogger may be called from various
* threads concurrently. Should the logging require serialization, you must handle serialization in
* NewLogger. Set to NULL to disable.
*/
typedef VOID(WINAPI WINTUN_SET_LOGGER_FUNC)(_In_ WINTUN_LOGGER_CALLBACK NewLogger);
/**
* Minimum ring capacity.
*/
#define WINTUN_MIN_RING_CAPACITY 0x20000 /* 128kiB */
/**
* Maximum ring capacity.
*/
#define WINTUN_MAX_RING_CAPACITY 0x4000000 /* 64MiB */
/**
* A handle representing Wintun session
*/
typedef struct _TUN_SESSION *WINTUN_SESSION_HANDLE;
/**
* Starts Wintun session.
*
* @param Adapter Adapter handle obtained with WintunOpenAdapter or WintunCreateAdapter
*
* @param Capacity Rings capacity. Must be between WINTUN_MIN_RING_CAPACITY and WINTUN_MAX_RING_CAPACITY (incl.)
* Must be a power of two.
*
* @return Wintun session handle. Must be released with WintunEndSession. If the function fails, the return value is
* NULL. To get extended error information, call GetLastError.
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
WINTUN_SESSION_HANDLE(WINAPI WINTUN_START_SESSION_FUNC)(_In_ WINTUN_ADAPTER_HANDLE Adapter, _In_ DWORD Capacity);
/**
* Ends Wintun session.
*
* @param Session Wintun session handle obtained with WintunStartSession
*/
typedef VOID(WINAPI WINTUN_END_SESSION_FUNC)(_In_ WINTUN_SESSION_HANDLE Session);
/**
* Gets Wintun session's read-wait event handle.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @return Pointer to receive event handle to wait for available data when reading. Should
* WintunReceivePackets return ERROR_NO_MORE_ITEMS (after spinning on it for a while under heavy
* load), wait for this event to become signaled before retrying WintunReceivePackets. Do not call
* CloseHandle on this event - it is managed by the session.
*/
typedef HANDLE(WINAPI WINTUN_GET_READ_WAIT_EVENT_FUNC)(_In_ WINTUN_SESSION_HANDLE Session);
/**
* Maximum IP packet size
*/
#define WINTUN_MAX_IP_PACKET_SIZE 0xFFFF
/**
* Retrieves one or packet. After the packet content is consumed, call WintunReleaseReceivePacket with Packet returned
* from this function to release internal buffer. This function is thread-safe.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param PacketSize Pointer to receive packet size.
*
* @return Pointer to layer 3 IPv4 or IPv6 packet. Client may modify its content at will. If the function fails, the
* return value is NULL. To get extended error information, call GetLastError. Possible errors include the
* following:
* ERROR_HANDLE_EOF Wintun adapter is terminating;
* ERROR_NO_MORE_ITEMS Wintun buffer is exhausted;
* ERROR_INVALID_DATA Wintun buffer is corrupt
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
_Post_writable_byte_size_(*PacketSize)
BYTE *(WINAPI WINTUN_RECEIVE_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _Out_ DWORD *PacketSize);
/**
* Releases internal buffer after the received packet has been processed by the client. This function is thread-safe.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param Packet Packet obtained with WintunReceivePacket
*/
typedef VOID(
WINAPI WINTUN_RELEASE_RECEIVE_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ const BYTE *Packet);
/**
* Allocates memory for a packet to send. After the memory is filled with packet data, call WintunSendPacket to send
* and release internal buffer. WintunAllocateSendPacket is thread-safe and the WintunAllocateSendPacket order of
* calls define the packet sending order.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param PacketSize Exact packet size. Must be less or equal to WINTUN_MAX_IP_PACKET_SIZE.
*
* @return Returns pointer to memory where to prepare layer 3 IPv4 or IPv6 packet for sending. If the function fails,
* the return value is NULL. To get extended error information, call GetLastError. Possible errors include the
* following:
* ERROR_HANDLE_EOF Wintun adapter is terminating;
* ERROR_BUFFER_OVERFLOW Wintun buffer is full;
*/
typedef _Must_inspect_result_
_Return_type_success_(return != NULL)
_Post_maybenull_
_Post_writable_byte_size_(PacketSize)
BYTE *(WINAPI WINTUN_ALLOCATE_SEND_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ DWORD PacketSize);
/**
* Sends the packet and releases internal buffer. WintunSendPacket is thread-safe, but the WintunAllocateSendPacket
* order of calls define the packet sending order. This means the packet is not guaranteed to be sent in the
* WintunSendPacket yet.
*
* @param Session Wintun session handle obtained with WintunStartSession
*
* @param Packet Packet obtained with WintunAllocateSendPacket
*/
typedef VOID(WINAPI WINTUN_SEND_PACKET_FUNC)(_In_ WINTUN_SESSION_HANDLE Session, _In_ const BYTE *Packet);
#pragma warning(pop)
#ifdef __cplusplus
}
#endif

113
dist/wireshark/nebula.lua vendored Normal file
View File

@@ -0,0 +1,113 @@
local nebula = Proto("nebula", "nebula")
local default_settings = {
port = 4242,
all_ports = false,
}
nebula.prefs.port = Pref.uint("Port number", default_settings.port, "The UDP port number for Nebula")
nebula.prefs.all_ports = Pref.bool("All ports", default_settings.all_ports, "Assume nebula packets on any port, useful when dealing with hole punching")
local pf_version = ProtoField.new("version", "nebula.version", ftypes.UINT8, nil, base.DEC, 0xF0)
local pf_type = ProtoField.new("type", "nebula.type", ftypes.UINT8, {
[0] = "handshake",
[1] = "message",
[2] = "recvError",
[3] = "lightHouse",
[4] = "test",
[5] = "closeTunnel",
}, base.DEC, 0x0F)
local pf_subtype = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, nil, base.DEC)
local pf_subtype_test = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, {
[0] = "request",
[1] = "reply",
}, base.DEC)
local pf_subtype_handshake = ProtoField.new("subtype", "nebula.subtype", ftypes.UINT8, {
[0] = "ix_psk0",
}, base.DEC)
local pf_reserved = ProtoField.new("reserved", "nebula.reserved", ftypes.UINT16, nil, base.HEX)
local pf_remote_index = ProtoField.new("remote index", "nebula.remote_index", ftypes.UINT32, nil, base.DEC)
local pf_message_counter = ProtoField.new("counter", "nebula.counter", ftypes.UINT64, nil, base.DEC)
local pf_payload = ProtoField.new("payload", "nebula.payload", ftypes.BYTES, nil, base.NONE)
nebula.fields = { pf_version, pf_type, pf_subtype, pf_subtype_handshake, pf_subtype_test, pf_reserved, pf_remote_index, pf_message_counter, pf_payload }
local ef_holepunch = ProtoExpert.new("nebula.holepunch.expert", "Nebula hole punch packet", expert.group.PROTOCOL, expert.severity.NOTE)
local ef_punchy = ProtoExpert.new("nebula.punchy.expert", "Nebula punchy keepalive packet", expert.group.PROTOCOL, expert.severity.NOTE)
nebula.experts = { ef_holepunch, ef_punchy }
local type_field = Field.new("nebula.type")
local subtype_field = Field.new("nebula.subtype")
function nebula.dissector(tvbuf, pktinfo, root)
-- set the protocol column to show our protocol name
pktinfo.cols.protocol:set("NEBULA")
local pktlen = tvbuf:reported_length_remaining()
local tree = root:add(nebula, tvbuf:range(0,pktlen))
if pktlen == 0 then
tree:add_proto_expert_info(ef_holepunch)
pktinfo.cols.info:append(" (holepunch)")
return
elseif pktlen == 1 then
tree:add_proto_expert_info(ef_punchy)
pktinfo.cols.info:append(" (punchy)")
return
end
tree:add(pf_version, tvbuf:range(0,1))
local type = tree:add(pf_type, tvbuf:range(0,1))
local nebula_type = bit32.band(tvbuf:range(0,1):uint(), 0x0F)
if nebula_type == 0 then
local stage = tvbuf(8,8):uint64()
tree:add(pf_subtype_handshake, tvbuf:range(1,1))
type:append_text(" stage " .. stage)
pktinfo.cols.info:append(" (" .. type_field().display .. ", stage " .. stage .. ", " .. subtype_field().display .. ")")
elseif nebula_type == 4 then
tree:add(pf_subtype_test, tvbuf:range(1,1))
pktinfo.cols.info:append(" (" .. type_field().display .. ", " .. subtype_field().display .. ")")
else
tree:add(pf_subtype, tvbuf:range(1,1))
pktinfo.cols.info:append(" (" .. type_field().display .. ")")
end
tree:add(pf_reserved, tvbuf:range(2,2))
tree:add(pf_remote_index, tvbuf:range(4,4))
tree:add(pf_message_counter, tvbuf:range(8,8))
tree:add(pf_payload, tvbuf:range(16,tvbuf:len() - 16))
end
function nebula.prefs_changed()
if default_settings.all_ports == nebula.prefs.all_ports and default_settings.port == nebula.prefs.port then
-- Nothing changed, bail
return
end
-- Remove our old dissector
DissectorTable.get("udp.port"):remove_all(nebula)
if nebula.prefs.all_ports and default_settings.all_ports ~= nebula.prefs.all_ports then
default_settings.all_port = nebula.prefs.all_ports
for i=0, 65535 do
DissectorTable.get("udp.port"):add(i, nebula)
end
-- no need to establish again on specific ports
return
end
if default_settings.all_ports ~= nebula.prefs.all_ports then
-- Add our new port dissector
default_settings.port = nebula.prefs.port
DissectorTable.get("udp.port"):add(default_settings.port, nebula)
end
end
DissectorTable.get("udp.port"):add(default_settings.port, nebula)

View File

@@ -7,6 +7,9 @@ import (
"sync" "sync"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
// This whole thing should be rewritten to use context // This whole thing should be rewritten to use context
@@ -43,8 +46,8 @@ func (d *dnsRecords) QueryCert(data string) string {
if ip == nil { if ip == nil {
return "" return ""
} }
iip := ip2int(ip) iip := iputil.Ip2VpnIp(ip)
hostinfo, err := d.hostMap.QueryVpnIP(iip) hostinfo, err := d.hostMap.QueryVpnIp(iip)
if err != nil { if err != nil {
return "" return ""
} }
@@ -63,7 +66,7 @@ func (d *dnsRecords) Add(host, data string) {
d.Unlock() d.Unlock()
} }
func parseQuery(m *dns.Msg, w dns.ResponseWriter) { func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
for _, q := range m.Question { for _, q := range m.Question {
switch q.Qtype { switch q.Qtype {
case dns.TypeA: case dns.TypeA:
@@ -95,37 +98,44 @@ func parseQuery(m *dns.Msg, w dns.ResponseWriter) {
} }
} }
func handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) { func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg) m := new(dns.Msg)
m.SetReply(r) m.SetReply(r)
m.Compress = false m.Compress = false
switch r.Opcode { switch r.Opcode {
case dns.OpcodeQuery: case dns.OpcodeQuery:
parseQuery(m, w) parseQuery(l, m, w)
} }
w.WriteMsg(m) w.WriteMsg(m)
} }
func dnsMain(hostMap *HostMap, c *Config) { func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
dnsR = newDnsRecords(hostMap) dnsR = newDnsRecords(hostMap)
// attach request handler func // attach request handler func
dns.HandleFunc(".", handleDnsRequest) dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) {
handleDnsRequest(l, w, r)
})
c.RegisterReloadCallback(reloadDns) c.RegisterReloadCallback(func(c *config.C) {
startDns(c) reloadDns(l, c)
})
return func() {
startDns(l, c)
}
} }
func getDnsServerAddr(c *Config) string { func getDnsServerAddr(c *config.C) string {
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)) return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
} }
func startDns(c *Config) { func startDns(l *logrus.Logger, c *config.C) {
dnsAddr = getDnsServerAddr(c) dnsAddr = getDnsServerAddr(c)
dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"} dnsServer = &dns.Server{Addr: dnsAddr, Net: "udp"}
l.Debugf("Starting DNS responder at %s\n", dnsAddr) l.WithField("dnsListener", dnsAddr).Info("Starting DNS responder")
err := dnsServer.ListenAndServe() err := dnsServer.ListenAndServe()
defer dnsServer.Shutdown() defer dnsServer.Shutdown()
if err != nil { if err != nil {
@@ -133,7 +143,7 @@ func startDns(c *Config) {
} }
} }
func reloadDns(c *Config) { func reloadDns(l *logrus.Logger, c *config.C) {
if dnsAddr == getDnsServerAddr(c) { if dnsAddr == getDnsServerAddr(c) {
l.Debug("No DNS server config change detected") l.Debug("No DNS server config change detected")
return return
@@ -141,5 +151,5 @@ func reloadDns(c *Config) {
l.Debug("Restarting DNS server") l.Debug("Restarting DNS server")
dnsServer.Shutdown() dnsServer.Shutdown()
go startDns(c) go startDns(l, c)
} }

3
e2e/doc.go Normal file
View File

@@ -0,0 +1,3 @@
package e2e
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before

241
e2e/handshakes_test.go Normal file
View File

@@ -0,0 +1,241 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"net"
"testing"
"time"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert"
)
func BenchmarkHotPath(b *testing.B) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
r := router.NewR(b, myControl, theirControl)
r.CancelFlowLogs()
for n := 0; n < b.N; n++ {
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
_ = r.RouteForAllUntilTxTun(theirControl)
}
myControl.Stop()
theirControl.Stop()
}
func TestGoodHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
t.Log("Get their stage 1 packet so that we can play with it")
stage1Packet := theirControl.GetFromUDP(true)
t.Log("I consume a garbage packet with a proper nebula header for our tunnel")
// this should log a statement and get ignored, allowing the real handshake packet to complete the tunnel
badPacket := stage1Packet.Copy()
badPacket.Data = badPacket.Data[:len(badPacket.Data)-header.Len]
myControl.InjectUDPPacket(badPacket)
t.Log("Have me consume their real stage 1 packet. I have a tunnel now")
myControl.InjectUDPPacket(stage1Packet)
t.Log("Wait until we see my cached packet come through")
myControl.WaitForType(1, 0, theirControl)
t.Log("Make sure our host infos are correct")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
t.Log("Get that cached packet and make sure it looks right")
myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
t.Log("Do a bidirectional tunnel test")
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
myControl.Stop()
theirControl.Stop()
//TODO: assert hostmaps
}
func TestWrongResponderHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
// The IPs here are chosen on purpose:
// The current remote handling will sort by preference, public, and then lexically.
// So we need them to have a higher address than evil (we could apply a preference though)
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
evilControl, evilVpnIp, evilUdpAddr := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
// Add their real udp addr, which should be tried after evil.
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
myControl.InjectLightHouseAddr(theirVpnIp, evilUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl, evilControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
evilControl.Start()
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
h := &header.H{}
err := h.Parse(p.Data)
if err != nil {
panic(err)
}
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
return router.RouteAndExit
}
return router.KeepRouting
})
//TODO: Assert pending hostmap - I should have a correct hostinfo for them now
t.Log("My cached packet should be received by them")
myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
t.Log("Test the tunnel with them")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
t.Log("Flush all packets from all controllers")
r.FlushAll()
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), true), "My pending hostmap should not contain evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp), false), "My main hostmap should not contain evil")
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
//TODO: assert hostmaps for everyone
t.Log("Success!")
myControl.Stop()
theirControl.Stop()
}
func Test_Case1_Stage1Race(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, myUdpAddr := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
// Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIp, myUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
theirControl.Start()
t.Log("Trigger a handshake to start on both me and them")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIp, 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1 handshake packets")
myHsForThem := myControl.GetFromUDP(true)
theirHsForMe := theirControl.GetFromUDP(true)
r.Log("Now inject both stage 1 handshake packets")
r.InjectUDPPacket(theirControl, myControl, theirHsForMe)
r.InjectUDPPacket(myControl, theirControl, myHsForThem)
//TODO: they should win, grab their index for me and make sure I use it in the end.
r.Log("They should not have a stage 2 (won the race) but I should send one")
r.InjectUDPPacket(myControl, theirControl, myControl.GetFromUDP(true))
r.Log("Route for me until I send a message packet to them")
r.RouteForAllUntilAfterMsgTypeTo(theirControl, header.Message, header.MessageNone)
t.Log("My cached packet should be received by them")
myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp, theirVpnIp, 80, 80)
t.Log("Route for them until I send a message packet to me")
theirControl.WaitForType(1, 0, myControl)
t.Log("Their cached packet should be received by me")
theirCachedPacket := myControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIp, myVpnIp, 80, 80)
t.Log("Do a bidirectional tunnel test")
assertTunnel(t, myVpnIp, theirVpnIp, myControl, theirControl, r)
myControl.Stop()
theirControl.Stop()
//TODO: assert hostmaps
}
func TestRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIp, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIp, relayUdpAddr := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIp, theirUdpAddr := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIp, relayUdpAddr)
myControl.InjectRelays(theirVpnIp, []net.IP{relayVpnIp})
relayControl.InjectLightHouseAddr(theirVpnIp, theirUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIp, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIp, theirVpnIp, 80, 80)
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
}
//TODO: add a test with many lies

321
e2e/helpers_test.go Normal file
View File

@@ -0,0 +1,321 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"crypto/rand"
"fmt"
"io"
"net"
"os"
"testing"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2"
)
type m map[string]interface{}
// newSimpleServer creates a nebula instance with many assumptions
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, net.IP, *net.UDPAddr) {
l := NewTestLogger()
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
copy(vpnIpNet.IP, udpIp)
vpnIpNet.IP[1] += 128
udpAddr := net.UDPAddr{
IP: udpIp,
Port: 4242,
}
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM()
if err != nil {
panic(err)
}
mc := m{
"pki": m{
"ca": string(caB),
"cert": string(myPEM),
"key": string(myPrivKey),
},
//"tun": m{"disabled": true},
"firewall": m{
"outbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
"inbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
},
//"handshakes": m{
// "try_interval": "1s",
//},
"listen": m{
"host": udpAddr.IP.String(),
"port": udpAddr.Port,
},
"logging": m{
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
"level": l.Level.String(),
},
}
if overrides != nil {
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
if err != nil {
panic(err)
}
mc = overrides
}
cb, err := yaml.Marshal(mc)
if err != nil {
panic(err)
}
c := config.NewC(l)
c.LoadString(string(cb))
control, err := nebula.Main(c, false, "e2e-test", l, nil)
if err != nil {
panic(err)
}
return control, vpnIpNet.IP, &udpAddr
}
// newTestCaCert will generate a CA cert
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// newTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}
type doneCb func()
func deadline(t *testing.T, seconds time.Duration) doneCb {
timeout := time.After(seconds * time.Second)
done := make(chan bool)
go func() {
select {
case <-timeout:
t.Fatal("Test did not finish in time")
case <-done:
}
}()
return func() {
done <- true
}
}
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
// Send a packet from them to me
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
bPacket := r.RouteUntilTxTun(controlB, controlA)
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
// And once more from me to them
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
aPacket := r.RouteUntilTxTun(controlA, controlB)
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
}
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
// Get both host infos
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
// Check that both vpn and real addr are correct
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
// Check that our indexes match
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
//TODO: Would be nice to assert this memory
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
//
// //TODO: remote indexes are susceptible to collision
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
//}
//
//// Check hostmap indexes too
//checkIndexes("hmA", hmA, hBinA)
//checkIndexes("hmB", hmB, hAinB)
}
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
assert.NotNil(t, v4, "No ipv4 data found")
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
assert.NotNil(t, udp, "No udp data found")
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
data := packet.ApplicationLayer()
assert.NotNil(t, data)
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
}
func NewTestLogger() *logrus.Logger {
l := logrus.New()
v := os.Getenv("TEST_LOGS")
if v == "" {
l.SetOutput(io.Discard)
l.SetLevel(logrus.PanicLevel)
return l
}
switch v {
case "2":
l.SetLevel(logrus.DebugLevel)
case "3":
l.SetLevel(logrus.TraceLevel)
default:
l.SetLevel(logrus.InfoLevel)
}
return l
}

3
e2e/router/doc.go Normal file
View File

@@ -0,0 +1,3 @@
package router
// This file exists to allow `go fmt` to traverse here on its own. The build tags were keeping it out before

653
e2e/router/router.go Normal file
View File

@@ -0,0 +1,653 @@
//go:build e2e_testing
// +build e2e_testing
package router
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
)
type R struct {
// Simple map of the ip:port registered on a control to the control
// Basically a router, right?
controls map[string]*nebula.Control
// A map for inbound packets for a control that doesn't know about this address
inNat map[string]*nebula.Control
// A last used map, if an inbound packet hit the inNat map then
// all return packets should use the same last used inbound address for the outbound sender
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
outNat map[string]net.UDPAddr
// A map of vpn ip to the nebula control it belongs to
vpnControls map[iputil.VpnIp]*nebula.Control
flow []flowEntry
// All interactions are locked to help serialize behavior
sync.Mutex
fn string
cancelRender context.CancelFunc
t testing.TB
}
type flowEntry struct {
note string
packet *packet
}
type packet struct {
from *nebula.Control
to *nebula.Control
packet *udp.Packet
tun bool // a packet pulled off a tun device
rx bool // the packet was received by a udp device
}
func (p *packet) WasReceived() {
if p != nil {
p.rx = true
}
}
type ExitType int
const (
// KeepRouting the function will get called again on the next packet
KeepRouting ExitType = 0
// ExitNow does not route this packet and exits immediately
ExitNow ExitType = 1
// RouteAndExit routes this packet and exits immediately afterwards
RouteAndExit ExitType = 2
)
type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
// NewR creates a new router to pass packets in a controlled fashion between the provided controllers.
// The packet flow will be recorded in a file within the mermaid directory under the same name as the test.
// Renders will occur automatically, roughly every 100ms, until a call to RenderFlow() is made
func NewR(t testing.TB, controls ...*nebula.Control) *R {
ctx, cancel := context.WithCancel(context.Background())
if err := os.MkdirAll("mermaid", 0755); err != nil {
panic(err)
}
r := &R{
controls: make(map[string]*nebula.Control),
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
inNat: make(map[string]*nebula.Control),
outNat: make(map[string]net.UDPAddr),
flow: []flowEntry{},
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
t: t,
cancelRender: cancel,
}
// Try to remove our render file
os.Remove(r.fn)
for _, c := range controls {
addr := c.GetUDPAddr()
if _, ok := r.controls[addr]; ok {
panic("Duplicate listen address: " + addr)
}
r.vpnControls[c.GetVpnIp()] = c
r.controls[addr] = c
}
// Spin the renderer in case we go nuts and the test never completes
go func() {
clockSource := time.NewTicker(time.Millisecond * 100)
defer clockSource.Stop()
for {
select {
case <-ctx.Done():
return
case <-clockSource.C:
r.renderFlow()
}
}
}()
return r
}
// AddRoute will place the nebula controller at the ip and port specified.
// It does not look at the addr attached to the instance.
// If a route is used, this will behave like a NAT for the return path.
// Rewriting the source ip:port to what was last sent to from the origin
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
r.Lock()
defer r.Unlock()
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
if _, ok := r.inNat[inAddr]; ok {
panic("Duplicate listen address inNat: " + inAddr)
}
r.inNat[inAddr] = c
}
// RenderFlow renders the packet flow seen up until now and stops further automatic renders from happening.
func (r *R) RenderFlow() {
r.cancelRender()
r.renderFlow()
}
// CancelFlowLogs stops flow logs from being tracked and destroys any logs already collected
func (r *R) CancelFlowLogs() {
r.cancelRender()
r.flow = nil
}
func (r *R) renderFlow() {
if r.flow == nil {
return
}
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
if err != nil {
panic(err)
}
var participants = map[string]struct{}{}
var participantsVals []string
fmt.Fprintln(f, "```mermaid")
fmt.Fprintln(f, "sequenceDiagram")
// Assemble participants
for _, e := range r.flow {
if e.packet == nil {
continue
}
addr := e.packet.from.GetUDPAddr()
if _, ok := participants[addr]; ok {
continue
}
participants[addr] = struct{}{}
sanAddr := strings.Replace(addr, ":", "#58;", 1)
participantsVals = append(participantsVals, sanAddr)
fmt.Fprintf(
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
)
}
// Print packets
h := &header.H{}
for _, e := range r.flow {
if e.packet == nil {
fmt.Fprintf(f, " note over %s: %s\n", strings.Join(participantsVals, ", "), e.note)
continue
}
p := e.packet
if p.tun {
fmt.Fprintln(f, r.formatUdpPacket(p))
} else {
if err := h.Parse(p.packet.Data); err != nil {
panic(err)
}
line := "--x"
if p.rx {
line = "->>"
}
fmt.Fprintf(f,
" %s%s%s: %s(%s), counter: %v\n",
strings.Replace(p.from.GetUDPAddr(), ":", "#58;", 1),
line,
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
h.TypeName(), h.SubTypeName(), h.MessageCounter,
)
}
}
fmt.Fprintln(f, "```")
}
// InjectFlow can be used to record packet flow if the test is handling the routing on its own.
// The packet is assumed to have been received
func (r *R) InjectFlow(from, to *nebula.Control, p *udp.Packet) {
r.Lock()
defer r.Unlock()
r.unlockedInjectFlow(from, to, p, false)
}
func (r *R) Log(arg ...any) {
if r.flow == nil {
return
}
r.Lock()
r.flow = append(r.flow, flowEntry{note: fmt.Sprint(arg...)})
r.t.Log(arg...)
r.Unlock()
}
func (r *R) Logf(format string, arg ...any) {
if r.flow == nil {
return
}
r.Lock()
r.flow = append(r.flow, flowEntry{note: fmt.Sprintf(format, arg...)})
r.t.Logf(format, arg...)
r.Unlock()
}
// unlockedInjectFlow is used by the router to record a packet has been transmitted, the packet is returned and
// should be marked as received AFTER it has been placed on the receivers channel.
// If flow logs have been disabled this function will return nil
func (r *R) unlockedInjectFlow(from, to *nebula.Control, p *udp.Packet, tun bool) *packet {
if r.flow == nil {
return nil
}
fp := &packet{
from: from,
to: to,
packet: p.Copy(),
tun: tun,
}
r.flow = append(r.flow, flowEntry{packet: fp})
return fp
}
// OnceFrom will route a single packet from sender then return
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) OnceFrom(sender *nebula.Control) {
r.RouteExitFunc(sender, func(*udp.Packet, *nebula.Control) ExitType {
return RouteAndExit
})
}
// RouteUntilTxTun will route for sender and return when a packet is seen on receivers tun
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []byte {
tunTx := receiver.GetTunTxChan()
udpTx := sender.GetUDPTxChan()
for {
select {
// Maybe we already have something on the tun for us
case b := <-tunTx:
r.Lock()
np := udp.Packet{Data: make([]byte, len(b))}
copy(np.Data, b)
r.unlockedInjectFlow(receiver, receiver, &np, true)
r.Unlock()
return b
// Nope, lets push the sender along
case p := <-udpTx:
outAddr := sender.GetUDPAddr()
r.Lock()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
c := r.getControl(outAddr, inAddr, p)
if c == nil {
r.Unlock()
panic("No control for udp tx")
}
fp := r.unlockedInjectFlow(sender, c, p, false)
c.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
}
}
}
// RouteForAllUntilTxTun will route for everyone and return when a packet is seen on receivers tun
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
sc := make([]reflect.SelectCase, len(r.controls)+1)
cm := make([]*nebula.Control, len(r.controls)+1)
i := 0
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(receiver.GetTunTxChan()),
Send: reflect.Value{},
}
cm[i] = receiver
i++
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
for {
x, rx, _ := reflect.Select(sc)
r.Lock()
if x == 0 {
// we are the tun tx, we can exit
p := rx.Interface().([]byte)
np := udp.Packet{Data: make([]byte, len(p))}
copy(np.Data, p)
r.unlockedInjectFlow(cm[x], cm[x], &np, true)
r.Unlock()
return p
} else {
// we are a udp tx, route and continue
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
c := r.getControl(outAddr, inAddr, p)
if c == nil {
r.Unlock()
panic("No control for udp tx")
}
fp := r.unlockedInjectFlow(cm[x], c, p, false)
c.InjectUDPPacket(p)
fp.WasReceived()
}
r.Unlock()
}
}
// RouteExitFunc will call the whatDo func with each udp packet from sender.
// whatDo can return:
// - exitNow: the packet will not be routed and this call will return immediately
// - routeAndExit: this call will return immediately after routing the last packet from sender
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
h := &header.H{}
for {
p := sender.GetFromUDP(true)
r.Lock()
if err := h.Parse(p.Data); err != nil {
panic(err)
}
outAddr := sender.GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
e := whatDo(p, receiver)
switch e {
case ExitNow:
r.Unlock()
return
case RouteAndExit:
fp := r.unlockedInjectFlow(sender, receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
return
case KeepRouting:
fp := r.unlockedInjectFlow(sender, receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
default:
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
}
r.Unlock()
}
}
// RouteUntilAfterMsgType will route for sender until a message type is seen and sent from sender
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteUntilAfterMsgType(sender *nebula.Control, msgType header.MessageType, subType header.MessageSubType) {
h := &header.H{}
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
if err := h.Parse(p.Data); err != nil {
panic(err)
}
if h.Type == msgType && h.Subtype == subType {
return RouteAndExit
}
return KeepRouting
})
}
func (r *R) RouteForAllUntilAfterMsgTypeTo(receiver *nebula.Control, msgType header.MessageType, subType header.MessageSubType) {
h := &header.H{}
r.RouteForAllExitFunc(func(p *udp.Packet, r *nebula.Control) ExitType {
if r != receiver {
return KeepRouting
}
if err := h.Parse(p.Data); err != nil {
panic(err)
}
if h.Type == msgType && h.Subtype == subType {
return RouteAndExit
}
return KeepRouting
})
}
func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet) {
r.Lock()
defer r.Unlock()
fp := r.unlockedInjectFlow(sender, receiver, packet, false)
receiver.InjectUDPPacket(packet)
fp.WasReceived()
}
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
// If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
if finish == KeepRouting {
finish = RouteAndExit
}
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
return finish
}
return KeepRouting
})
}
// RouteForAllExitFunc will route for every registered controller and calls the whatDo func with each udp packet from
// whatDo can return:
// - exitNow: the packet will not be routed and this call will return immediately
// - routeAndExit: this call will return immediately after routing the last packet from sender
// - keepRouting: the packet will be routed and whatDo will be called again on the next packet from sender
func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
sc := make([]reflect.SelectCase, len(r.controls))
cm := make([]*nebula.Control, len(r.controls))
i := 0
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
for {
x, rx, _ := reflect.Select(sc)
r.Lock()
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
e := whatDo(p, receiver)
switch e {
case ExitNow:
r.Unlock()
return
case RouteAndExit:
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
r.Unlock()
return
case KeepRouting:
fp := r.unlockedInjectFlow(cm[x], receiver, p, false)
receiver.InjectUDPPacket(p)
fp.WasReceived()
default:
panic(fmt.Sprintf("Unknown exitFunc return: %v", e))
}
r.Unlock()
}
}
// FlushAll will route for every registered controller, exiting once there are no packets left to route
func (r *R) FlushAll() {
sc := make([]reflect.SelectCase, len(r.controls))
cm := make([]*nebula.Control, len(r.controls))
i := 0
for _, c := range r.controls {
sc[i] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(c.GetUDPTxChan()),
Send: reflect.Value{},
}
cm[i] = c
i++
}
// Add a default case to exit when nothing is left to send
sc = append(sc, reflect.SelectCase{
Dir: reflect.SelectDefault,
Chan: reflect.Value{},
Send: reflect.Value{},
})
for {
x, rx, ok := reflect.Select(sc)
if !ok {
return
}
r.Lock()
p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil {
r.Unlock()
panic("Can't route for host: " + inAddr)
}
r.Unlock()
}
}
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
// This is an internal router function, the caller must hold the lock
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
p.FromIp = newAddr.IP
p.FromPort = uint16(newAddr.Port)
}
c, ok := r.inNat[toAddr]
if ok {
sHost, sPort, err := net.SplitHostPort(toAddr)
if err != nil {
panic(err)
}
port, err := strconv.Atoi(sPort)
if err != nil {
panic(err)
}
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
IP: net.ParseIP(sHost),
Port: port,
}
return c
}
return r.controls[toAddr]
}
func (r *R) formatUdpPacket(p *packet) string {
packet := gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
if v4 == nil {
panic("not an ipv4 packet")
}
from := "unknown"
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok {
from = c.GetUDPAddr()
}
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
if udp == nil {
panic("not a udp packet")
}
data := packet.ApplicationLayer()
return fmt.Sprintf(
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
strings.Replace(from, ":", "#58;", 1),
strings.Replace(p.to.GetUDPAddr(), ":", "#58;", 1),
udp.SrcPort,
udp.DstPort,
string(data.Payload()),
)
}

View File

@@ -7,9 +7,11 @@ pki:
ca: /etc/nebula/ca.crt ca: /etc/nebula/ca.crt
cert: /etc/nebula/host.crt cert: /etc/nebula/host.crt
key: /etc/nebula/host.key key: /etc/nebula/host.key
#blacklist is a list of certificate fingerprints that we will refuse to talk to # blocklist is a list of certificate fingerprints that we will refuse to talk to
#blacklist: #blocklist:
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72 # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
#disconnect_invalid: false
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network). # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel. # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
@@ -36,12 +38,62 @@ lighthouse:
interval: 60 interval: 60
# hosts is a list of lighthouse hosts this node should report to and query from # hosts is a list of lighthouse hosts this node should report to and query from
# IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES # IMPORTANT: THIS SHOULD BE EMPTY ON LIGHTHOUSE NODES
# IMPORTANT2: THIS SHOULD BE LIGHTHOUSES' NEBULA IPs, NOT LIGHTHOUSES' REAL ROUTABLE IPs
hosts: hosts:
- "192.168.100.1" - "192.168.100.1"
# remote_allow_list allows you to control ip ranges that this node will
# consider when handshaking to another node. By default, any remote IPs are
# allowed. You can provide CIDRs here with `true` to allow and `false` to
# deny. The most specific CIDR rule applies to each remote. If all rules are
# "allow", the default will be "deny", and vice-versa. If both "allow" and
# "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the
# default.
#remote_allow_list:
# Example to block IPs from this subnet from being used for remote IPs.
#"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
#"0.0.0.0/0": true
#"10.0.0.0/8": false
#"10.42.42.0/24": true
# EXPERIMENTAL: This option my change or disappear in the future.
# Optionally allows the definition of remote_allow_list blocks
# specific to an inside VPN IP CIDR.
#remote_allow_ranges:
# This rule would only allow only private IPs for this VPN range
#"10.42.42.0/24":
#"192.168.0.0/16": true
# local_allow_list allows you to filter which local IP addresses we advertise
# to the lighthouses. This uses the same logic as `remote_allow_list`, but
# additionally, you can specify an `interfaces` map of regular expressions
# to match against interface names. The regexp must match the entire name.
# All interface rules must be either true or false (and the default will be
# the inverse). CIDR rules are matched after interface name rules.
# Default is all local IP addresses.
#local_allow_list:
# Example to block tun0 and all docker interfaces.
#interfaces:
#tun0: false
#'docker.*': false
# Example to only advertise this subnet to the lighthouse.
#"10.0.0.0/8": true
# advertise_addrs are routable addresses that will be included along with discovered addresses to report to the
# lighthouse, the format is "ip:port". `port` can be `0`, in which case the actual listening port will be used in its
# place, useful if `listen.port` is set to 0.
# This option is mainly useful when there are static ip addresses the host can be reached at that nebula can not
# typically discover on its own. Examples being port forwarding or multiple paths to the internet.
#advertise_addrs:
#- "1.1.1.1:4242"
#- "1.2.3.4:0" # port will be replaced with the real listening port
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined, # Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
# however using port 0 will dynamically assign a port and is recommended for roaming nodes. # however using port 0 will dynamically assign a port and is recommended for roaming nodes.
listen: listen:
# To listen on both any ipv4 and ipv6 use "[::]"
host: 0.0.0.0 host: 0.0.0.0
port: 4242 port: 4242
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg) # Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
@@ -53,20 +105,41 @@ listen:
# max, net.core.rmem_max and net.core.wmem_max # max, net.core.rmem_max and net.core.wmem_max
#read_buffer: 10485760 #read_buffer: 10485760
#write_buffer: 10485760 #write_buffer: 10485760
# By default, Nebula replies to packets it has no tunnel for with a "recv_error" packet. This packet helps speed up reconnection
# in the case that Nebula on either side did not shut down cleanly. This response can be abused as a way to discover if Nebula is running
# on a host though. This option lets you configure if you want to send "recv_error" packets always, never, or only to private network remotes.
# valid values: always, never, private
# This setting is reloadable.
#send_recv_error: always
# Punchy continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings # Routines is the number of thread pairs to run that consume from the tun and UDP queues.
punchy: true # Currently, this defaults to 1 which means we have 1 tun queue reader and 1
# punch_back means that a node you are trying to reach will connect back out to you if your hole punching fails # UDP queue reader. Setting this above one will set IFF_MULTI_QUEUE on the tun
# this is extremely useful if one node is behind a difficult nat, such as symmetric # device and SO_REUSEPORT on the UDP socket to allow multiple queues.
#punch_back: true # This option is only supported on Linux.
#routines: 1
# Cipher allows you to choose between the available ciphers for your network. punchy:
# Continues to punch inbound/outbound at a regular interval to avoid expiration of firewall nat mappings
punch: true
# respond means that a node you are trying to reach will connect back out to you if your hole punching fails
# this is extremely useful if one node is behind a difficult nat, such as a symmetric NAT
# Default is false
#respond: true
# delays a punch response for misbehaving NATs, default is 1 second, respond must be true to take effect
#delay: 1s
# Cipher allows you to choose between the available ciphers for your network. Options are chachapoly or aes
# IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously! # IMPORTANT: this value must be identical on ALL NODES/LIGHTHOUSES. We do not/will not support use of different ciphers simultaneously!
#cipher: chachapoly #cipher: chachapoly
# Local range is used to define a hint about the local network range, which speeds up discovering the fastest # Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
# path to a network adjacent nebula node. # path to a network adjacent nebula node.
#local_range: "172.16.0.0/24" # NOTE: the previous option "local_range" only allowed definition of a single range
# and has been deprecated for "preferred_ranges"
#preferred_ranges: ["172.16.0.0/24"]
# sshd can expose informational and administrative functions via ssh this is a # sshd can expose informational and administrative functions via ssh this is a
#sshd: #sshd:
@@ -84,9 +157,27 @@ punchy: true
#keys: #keys:
#- "ssh public key string" #- "ssh public key string"
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
relay:
# Relays are a list of Nebula IP's that peers can use to relay packets to me.
# IPs in this list must have am_relay set to true in their configs, otherwise
# they will reject relay requests.
#relays:
#- 192.168.100.1
#- <other Nebula VPN IPs of hosts used as relays to access me>
# Set am_relay to true to permit other hosts to list my IP in their relays config. Default false.
am_relay: false
# Set use_relays to false to prevent this instance from attempting to establish connections through relays.
# default true
use_relays: true
# Configure the private interface. Note: addr is baked into the nebula certificate # Configure the private interface. Note: addr is baked into the nebula certificate
tun: tun:
# Name of the device # When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root)
disabled: false
# Name of the device. If not set, a default will be chosen by the OS.
# For macOS: if set, must be in the form `utun[0-9]+`.
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
dev: nebula1 dev: nebula1
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
drop_local_broadcast: false drop_local_broadcast: false
@@ -103,10 +194,13 @@ tun:
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes # Unsafe routes allows you to route traffic over nebula to non-nebula nodes
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula # Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate # NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
# `mtu` will default to tun mtu if this option is not specified
# `metric` will default to 0 if this option is not specified
unsafe_routes: unsafe_routes:
#- route: 172.16.1.0/24 #- route: 172.16.1.0/24
# via: 192.168.100.99 # via: 192.168.100.99
# mtu: 1300 #mtu will default to tun mtu if this option is not sepcified # mtu: 1300
# metric: 100
# TODO # TODO
@@ -116,6 +210,16 @@ logging:
level: info level: info
# json or text formats currently available. Default is text # json or text formats currently available. Default is text
format: text format: text
# Disable timestamp logging. useful when output is redirected to logging system that already adds timestamps. Default is false
#disable_timestamp: true
# timestamp format is specified in Go time format, see:
# https://golang.org/pkg/time/#pkg-constants
# default when `format: json`: "2006-01-02T15:04:05Z07:00" (RFC3339)
# default when `format: text`:
# when TTY attached: seconds since beginning of execution
# otherwise: "2006-01-02T15:04:05Z07:00" (RFC3339)
# As an example, to log as RFC3339 with millisecond precision, set to:
#timestamp_format: "2006-01-02T15:04:05.000Z07:00"
#stats: #stats:
#type: graphite #type: graphite
@@ -131,13 +235,33 @@ logging:
#subsystem: nebula #subsystem: nebula
#interval: 10s #interval: 10s
# enables counter metrics for meta packets
# e.g.: `messages.tx.handshake`
# NOTE: `message.{tx,rx}.recv_error` is always emitted
#message_metrics: false
# enables detailed counter metrics for lighthouse packets
# e.g.: `lighthouse.rx.HostQuery`
#lighthouse_metrics: false
# Handshake Manager Settings
#handshakes:
# Handshakes are sent to all known addresses at each interval with a linear backoff,
# Wait try_interval after the 1st attempt, 2 * try_interval after the 2nd, etc, until the handshake is older than timeout
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
#try_interval: 100ms
#retries: 20
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
# after receiving the response for lighthouse queries
#trigger_buffer: 64
# Nebula security group configuration # Nebula security group configuration
firewall: firewall:
conntrack: conntrack:
tcp_timeout: 120h tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100000
# The firewall is default deny. There is no way to write a deny rule. # The firewall is default deny. There is no way to write a deny rule.
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR

View File

@@ -119,7 +119,6 @@ firewall:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100,000
inbound: inbound:
- proto: icmp - proto: icmp

View File

@@ -5,8 +5,6 @@ After=basic.target network.target
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always

View File

@@ -65,7 +65,6 @@ firewall:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
default_timeout: 10m default_timeout: 10m
max_connections: 100,000
inbound: inbound:
- proto: icmp - proto: icmp

View File

@@ -2,11 +2,10 @@
Description=nebula Description=nebula
Wants=basic.target Wants=basic.target
After=basic.target network.target After=basic.target network.target
Before=sshd.service
[Service] [Service]
SyslogIdentifier=nebula SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
Restart=always Restart=always

View File

@@ -1,32 +1,24 @@
package nebula package nebula
import ( import (
"encoding/binary"
"encoding/json"
"fmt"
"net"
"sync"
"time"
"crypto/sha256" "crypto/sha256"
"encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt"
"net"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
"sync"
"time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
) "github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
const ( "github.com/slackhq/nebula/firewall"
fwProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
fwProtoTCP = 6
fwProtoUDP = 17
fwProtoICMP = 1
fwPortAny = 0 // Special value for matching `port: any`
fwPortFragment = -1 // Special value for matching `port: fragment`
) )
const tcpACK = 0x10 const tcpACK = 0x10
@@ -38,13 +30,19 @@ type FirewallInterface interface {
type conn struct { type conn struct {
Expires time.Time // Time when this conntrack entry will expire Expires time.Time // Time when this conntrack entry will expire
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
// record why the original connection passed the firewall, so we can re-validate
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
// fields pack for free after the uint32 above
incoming bool
rulesVersion uint16
} }
// TODO: need conntrack max tracked connections handling // TODO: need conntrack max tracked connections handling
type Firewall struct { type Firewall struct {
Conns map[FirewallPacket]*conn Conntrack *FirewallConntrack
InRules *FirewallTable InRules *FirewallTable
OutRules *FirewallTable OutRules *FirewallTable
@@ -55,16 +53,31 @@ type Firewall struct {
UDPTimeout time.Duration //linux: 180s max UDPTimeout time.Duration //linux: 180s max
DefaultTimeout time.Duration //linux: 600s DefaultTimeout time.Duration //linux: 600s
TimerWheel *TimerWheel
// Used to ensure we don't emit local packets for ips we don't own // Used to ensure we don't emit local packets for ips we don't own
localIps *CIDRTree localIps *cidr.Tree4
connMutex sync.Mutex rules string
rules string rulesVersion uint16
trackTCPRTT bool trackTCPRTT bool
metricTCPRTT metrics.Histogram metricTCPRTT metrics.Histogram
incomingMetrics firewallMetrics
outgoingMetrics firewallMetrics
l *logrus.Logger
}
type firewallMetrics struct {
droppedLocalIP metrics.Counter
droppedRemoteIP metrics.Counter
droppedNoRule metrics.Counter
}
type FirewallConntrack struct {
sync.Mutex
Conns map[firewall.Packet]*conn
TimerWheel *TimerWheel
} }
type FirewallTable struct { type FirewallTable struct {
@@ -94,57 +107,15 @@ type FirewallRule struct {
Any bool Any bool
Hosts map[string]struct{} Hosts map[string]struct{}
Groups [][]string Groups [][]string
CIDR *CIDRTree CIDR *cidr.Tree4
} }
// Even though ports are uint16, int32 maps are faster for lookup // Even though ports are uint16, int32 maps are faster for lookup
// Plus we can use `-1` for fragment rules // Plus we can use `-1` for fragment rules
type firewallPort map[int32]*FirewallCA type firewallPort map[int32]*FirewallCA
type FirewallPacket struct {
LocalIP uint32
RemoteIP uint32
LocalPort uint16
RemotePort uint16
Protocol uint8
Fragment bool
}
func (fp *FirewallPacket) Copy() *FirewallPacket {
return &FirewallPacket{
LocalIP: fp.LocalIP,
RemoteIP: fp.RemoteIP,
LocalPort: fp.LocalPort,
RemotePort: fp.RemotePort,
Protocol: fp.Protocol,
Fragment: fp.Fragment,
}
}
func (fp FirewallPacket) MarshalJSON() ([]byte, error) {
var proto string
switch fp.Protocol {
case fwProtoTCP:
proto = "tcp"
case fwProtoICMP:
proto = "icmp"
case fwProtoUDP:
proto = "udp"
default:
proto = fmt.Sprintf("unknown %v", fp.Protocol)
}
return json.Marshal(m{
"LocalIP": int2ip(fp.LocalIP).String(),
"RemoteIP": int2ip(fp.RemoteIP).String(),
"LocalPort": fp.LocalPort,
"RemotePort": fp.RemotePort,
"Protocol": proto,
"Fragment": fp.Fragment,
})
}
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts. // NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall { func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
//TODO: error on 0 duration //TODO: error on 0 duration
var min, max time.Duration var min, max time.Duration
@@ -162,7 +133,7 @@ func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.N
max = defaultTimeout max = defaultTimeout
} }
localIps := NewCIDRTree() localIps := cidr.NewTree4()
for _, ip := range c.Details.Ips { for _, ip := range c.Details.Ips {
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
} }
@@ -172,20 +143,35 @@ func NewFirewall(tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.N
} }
return &Firewall{ return &Firewall{
Conns: make(map[FirewallPacket]*conn), Conntrack: &FirewallConntrack{
Conns: make(map[firewall.Packet]*conn),
TimerWheel: NewTimerWheel(min, max),
},
InRules: newFirewallTable(), InRules: newFirewallTable(),
OutRules: newFirewallTable(), OutRules: newFirewallTable(),
TimerWheel: NewTimerWheel(min, max),
TCPTimeout: tcpTimeout, TCPTimeout: tcpTimeout,
UDPTimeout: UDPTimeout, UDPTimeout: UDPTimeout,
DefaultTimeout: defaultTimeout, DefaultTimeout: defaultTimeout,
localIps: localIps, localIps: localIps,
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)), l: l,
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
incomingMetrics: firewallMetrics{
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
droppedNoRule: metrics.GetOrRegisterCounter("firewall.incoming.dropped.no_rule", nil),
},
outgoingMetrics: firewallMetrics{
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.local_ip", nil),
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.remote_ip", nil),
droppedNoRule: metrics.GetOrRegisterCounter("firewall.outgoing.dropped.no_rule", nil),
},
} }
} }
func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, error) { func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *config.C) (*Firewall, error) {
fw := NewFirewall( fw := NewFirewall(
l,
c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12), c.GetDuration("firewall.conntrack.tcp_timeout", time.Minute*12),
c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3), c.GetDuration("firewall.conntrack.udp_timeout", time.Minute*3),
c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10), c.GetDuration("firewall.conntrack.default_timeout", time.Minute*10),
@@ -193,12 +179,12 @@ func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, er
//TODO: max_connections //TODO: max_connections
) )
err := AddFirewallRulesFromConfig(false, c, fw) err := AddFirewallRulesFromConfig(l, false, c, fw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = AddFirewallRulesFromConfig(true, c, fw) err = AddFirewallRulesFromConfig(l, true, c, fw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -208,11 +194,17 @@ func NewFirewallFromConfig(nc *cert.NebulaCertificate, c *Config) (*Firewall, er
// AddRule properly creates the in memory rule structure for a firewall table. // AddRule properly creates the in memory rule structure for a firewall table.
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error { func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, caName string, caSha string) error {
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
// https://github.com/golang/go/issues/14131
sIp := ""
if ip != nil {
sIp = ip.String()
}
// We need this rule string because we generate a hash. Removing this will break firewall reload. // We need this rule string because we generate a hash. Removing this will break firewall reload.
ruleString := fmt.Sprintf( ruleString := fmt.Sprintf(
"incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s", "incoming: %v, proto: %v, startPort: %v, endPort: %v, groups: %v, host: %v, ip: %v, caName: %v, caSha: %s",
incoming, proto, startPort, endPort, groups, host, ip, caName, caSha, incoming, proto, startPort, endPort, groups, host, sIp, caName, caSha,
) )
f.rules += ruleString + "\n" f.rules += ruleString + "\n"
@@ -220,7 +212,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
if !incoming { if !incoming {
direction = "outgoing" direction = "outgoing"
} }
l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": ip, "caName": caName, "caSha": caSha}). f.l.WithField("firewallRule", m{"direction": direction, "proto": proto, "startPort": startPort, "endPort": endPort, "groups": groups, "host": host, "ip": sIp, "caName": caName, "caSha": caSha}).
Info("Firewall rule added") Info("Firewall rule added")
var ( var (
@@ -235,13 +227,13 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
} }
switch proto { switch proto {
case fwProtoTCP: case firewall.ProtoTCP:
fp = ft.TCP fp = ft.TCP
case fwProtoUDP: case firewall.ProtoUDP:
fp = ft.UDP fp = ft.UDP
case fwProtoICMP: case firewall.ProtoICMP:
fp = ft.ICMP fp = ft.ICMP
case fwProtoAny: case firewall.ProtoAny:
fp = ft.AnyProto fp = ft.AnyProto
default: default:
return fmt.Errorf("unknown protocol %v", proto) return fmt.Errorf("unknown protocol %v", proto)
@@ -256,7 +248,7 @@ func (f *Firewall) GetRuleHash() string {
return hex.EncodeToString(sum[:]) return hex.EncodeToString(sum[:])
} }
func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterface) error { func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
var table string var table string
if inbound { if inbound {
table = "firewall.inbound" table = "firewall.inbound"
@@ -264,7 +256,7 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
table = "firewall.outbound" table = "firewall.outbound"
} }
r := config.Get(table) r := c.Get(table)
if r == nil { if r == nil {
return nil return nil
} }
@@ -276,7 +268,7 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
for i, t := range rs { for i, t := range rs {
var groups []string var groups []string
r, err := convertRule(t, table, i) r, err := convertRule(l, t, table, i)
if err != nil { if err != nil {
return fmt.Errorf("%s rule #%v; %s", table, i, err) return fmt.Errorf("%s rule #%v; %s", table, i, err)
} }
@@ -319,13 +311,13 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
var proto uint8 var proto uint8
switch r.Proto { switch r.Proto {
case "any": case "any":
proto = fwProtoAny proto = firewall.ProtoAny
case "tcp": case "tcp":
proto = fwProtoTCP proto = firewall.ProtoTCP
case "udp": case "udp":
proto = fwProtoUDP proto = firewall.ProtoUDP
case "icmp": case "icmp":
proto = fwProtoICMP proto = firewall.ProtoICMP
default: default:
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto) return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
} }
@@ -347,20 +339,36 @@ func AddFirewallRulesFromConfig(inbound bool, config *Config, fw FirewallInterfa
return nil return nil
} }
func (f *Firewall) Drop(packet []byte, fp FirewallPacket, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool) bool { var ErrInvalidRemoteIP = errors.New("remote IP is not in remote certificate subnets")
var ErrInvalidLocalIP = errors.New("local IP is not in list of handled local IPs")
var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
// Drop returns an error if the packet should be dropped, explaining why. It
// returns nil if the packet should not be dropped.
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
// Check if we spoke to this tuple, if we did then allow this packet // Check if we spoke to this tuple, if we did then allow this packet
if f.inConns(packet, fp, incoming) { if f.inConns(packet, fp, incoming, h, caPool, localCache) {
return false return nil
} }
// Make sure remote address matches nebula certificate // Make sure remote address matches nebula certificate
if h.remoteCidr.Contains(fp.RemoteIP) == nil { if remoteCidr := h.remoteCidr; remoteCidr != nil {
return true if remoteCidr.Contains(fp.RemoteIP) == nil {
f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP
}
} else {
// Simple case: Certificate has one IP and no subnets
if fp.RemoteIP != h.vpnIp {
f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP
}
} }
// Make sure we are supposed to be handling this local ip address // Make sure we are supposed to be handling this local ip address
if f.localIps.Contains(fp.LocalIP) == nil { if f.localIps.Contains(fp.LocalIP) == nil {
return true f.metrics(incoming).droppedLocalIP.Inc(1)
return ErrInvalidLocalIP
} }
table := f.OutRules table := f.OutRules
@@ -370,13 +378,22 @@ func (f *Firewall) Drop(packet []byte, fp FirewallPacket, incoming bool, h *Host
// We now know which firewall table to check against // We now know which firewall table to check against
if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) { if !table.match(fp, incoming, h.ConnectionState.peerCert, caPool) {
return true f.metrics(incoming).droppedNoRule.Inc(1)
return ErrNoMatchingRule
} }
// We always want to conntrack since it is a faster operation // We always want to conntrack since it is a faster operation
f.addConn(packet, fp, incoming) f.addConn(packet, fp, incoming)
return false return nil
}
func (f *Firewall) metrics(incoming bool) firewallMetrics {
if incoming {
return f.incomingMetrics
} else {
return f.outgoingMetrics
}
} }
// Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new // Destroy cleans up any known cyclical references so the object can be free'd my GC. This should be called if a new
@@ -386,77 +403,132 @@ func (f *Firewall) Destroy() {
} }
func (f *Firewall) EmitStats() { func (f *Firewall) EmitStats() {
conntrackCount := len(f.Conns) conntrack := f.Conntrack
conntrack.Lock()
conntrackCount := len(conntrack.Conns)
conntrack.Unlock()
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount)) metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
} }
func (f *Firewall) inConns(packet []byte, fp FirewallPacket, incoming bool) bool { func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
f.connMutex.Lock() if localCache != nil {
if _, ok := localCache[fp]; ok {
return true
}
}
conntrack := f.Conntrack
conntrack.Lock()
// Purge every time we test // Purge every time we test
ep, has := f.TimerWheel.Purge() ep, has := conntrack.TimerWheel.Purge()
if has { if has {
f.evict(ep) f.evict(ep)
} }
c, ok := f.Conns[fp] c, ok := conntrack.Conns[fp]
if !ok { if !ok {
f.connMutex.Unlock() conntrack.Unlock()
return false return false
} }
if c.rulesVersion != f.rulesVersion {
// This conntrack entry was for an older rule set, validate
// it still passes with the current rule set
table := f.OutRules
if c.incoming {
table = f.InRules
}
// We now know which firewall table to check against
if !table.match(fp, c.incoming, h.ConnectionState.peerCert, caPool) {
if f.l.Level >= logrus.DebugLevel {
h.logger(f.l).
WithField("fwPacket", fp).
WithField("incoming", c.incoming).
WithField("rulesVersion", f.rulesVersion).
WithField("oldRulesVersion", c.rulesVersion).
Debugln("dropping old conntrack entry, does not match new ruleset")
}
delete(conntrack.Conns, fp)
conntrack.Unlock()
return false
}
if f.l.Level >= logrus.DebugLevel {
h.logger(f.l).
WithField("fwPacket", fp).
WithField("incoming", c.incoming).
WithField("rulesVersion", f.rulesVersion).
WithField("oldRulesVersion", c.rulesVersion).
Debugln("keeping old conntrack entry, does match new ruleset")
}
c.rulesVersion = f.rulesVersion
}
switch fp.Protocol { switch fp.Protocol {
case fwProtoTCP: case firewall.ProtoTCP:
c.Expires = time.Now().Add(f.TCPTimeout) c.Expires = time.Now().Add(f.TCPTimeout)
if incoming { if incoming {
f.checkTCPRTT(c, packet) f.checkTCPRTT(c, packet)
} else { } else {
setTCPRTTTracking(c, packet) setTCPRTTTracking(c, packet)
} }
case fwProtoUDP: case firewall.ProtoUDP:
c.Expires = time.Now().Add(f.UDPTimeout) c.Expires = time.Now().Add(f.UDPTimeout)
default: default:
c.Expires = time.Now().Add(f.DefaultTimeout) c.Expires = time.Now().Add(f.DefaultTimeout)
} }
f.connMutex.Unlock() conntrack.Unlock()
if localCache != nil {
localCache[fp] = struct{}{}
}
return true return true
} }
func (f *Firewall) addConn(packet []byte, fp FirewallPacket, incoming bool) { func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
var timeout time.Duration var timeout time.Duration
c := &conn{} c := &conn{}
switch fp.Protocol { switch fp.Protocol {
case fwProtoTCP: case firewall.ProtoTCP:
timeout = f.TCPTimeout timeout = f.TCPTimeout
if !incoming { if !incoming {
setTCPRTTTracking(c, packet) setTCPRTTTracking(c, packet)
} }
case fwProtoUDP: case firewall.ProtoUDP:
timeout = f.UDPTimeout timeout = f.UDPTimeout
default: default:
timeout = f.DefaultTimeout timeout = f.DefaultTimeout
} }
f.connMutex.Lock() conntrack := f.Conntrack
if _, ok := f.Conns[fp]; !ok { conntrack.Lock()
f.TimerWheel.Add(fp, timeout) if _, ok := conntrack.Conns[fp]; !ok {
conntrack.TimerWheel.Add(fp, timeout)
} }
// Record which rulesVersion allowed this connection, so we can retest after
// firewall reload
c.incoming = incoming
c.rulesVersion = f.rulesVersion
c.Expires = time.Now().Add(timeout) c.Expires = time.Now().Add(timeout)
f.Conns[fp] = c conntrack.Conns[fp] = c
f.connMutex.Unlock() conntrack.Unlock()
} }
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel // Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
// Caller must own the connMutex lock! // Caller must own the connMutex lock!
func (f *Firewall) evict(p FirewallPacket) { func (f *Firewall) evict(p firewall.Packet) {
//TODO: report a stat if the tcp rtt tracking was never resolved? //TODO: report a stat if the tcp rtt tracking was never resolved?
// Are we still tracking this conn? // Are we still tracking this conn?
t, ok := f.Conns[p] conntrack := f.Conntrack
t, ok := conntrack.Conns[p]
if !ok { if !ok {
return return
} }
@@ -465,29 +537,29 @@ func (f *Firewall) evict(p FirewallPacket) {
// Timeout is in the future, re-add the timer // Timeout is in the future, re-add the timer
if newT > 0 { if newT > 0 {
f.TimerWheel.Add(p, newT) conntrack.TimerWheel.Add(p, newT)
return return
} }
// This conn is done // This conn is done
delete(f.Conns, p) delete(conntrack.Conns, p)
} }
func (ft *FirewallTable) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
if ft.AnyProto.match(p, incoming, c, caPool) { if ft.AnyProto.match(p, incoming, c, caPool) {
return true return true
} }
switch p.Protocol { switch p.Protocol {
case fwProtoTCP: case firewall.ProtoTCP:
if ft.TCP.match(p, incoming, c, caPool) { if ft.TCP.match(p, incoming, c, caPool) {
return true return true
} }
case fwProtoUDP: case firewall.ProtoUDP:
if ft.UDP.match(p, incoming, c, caPool) { if ft.UDP.match(p, incoming, c, caPool) {
return true return true
} }
case fwProtoICMP: case firewall.ProtoICMP:
if ft.ICMP.match(p, incoming, c, caPool) { if ft.ICMP.match(p, incoming, c, caPool) {
return true return true
} }
@@ -517,7 +589,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
return nil return nil
} }
func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
// We don't have any allowed ports, bail // We don't have any allowed ports, bail
if fp == nil { if fp == nil {
return false return false
@@ -526,7 +598,7 @@ func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCert
var port int32 var port int32
if p.Fragment { if p.Fragment {
port = fwPortFragment port = firewall.PortFragment
} else if incoming { } else if incoming {
port = int32(p.LocalPort) port = int32(p.LocalPort)
} else { } else {
@@ -537,7 +609,7 @@ func (fp firewallPort) match(p FirewallPacket, incoming bool, c *cert.NebulaCert
return true return true
} }
return fp[fwPortAny].match(p, c, caPool) return fp[firewall.PortAny].match(p, c, caPool)
} }
func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error { func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caName, caSha string) error {
@@ -545,7 +617,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
return &FirewallRule{ return &FirewallRule{
Hosts: make(map[string]struct{}), Hosts: make(map[string]struct{}),
Groups: make([][]string, 0), Groups: make([][]string, 0),
CIDR: NewCIDRTree(), CIDR: cidr.NewTree4(),
} }
} }
@@ -580,7 +652,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip *net.IPNet, caNam
return nil return nil
} }
func (fc *FirewallCA) match(p FirewallPacket, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool { func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool *cert.NebulaCAPool) bool {
if fc == nil { if fc == nil {
return false return false
} }
@@ -613,7 +685,7 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet) err
// If it's any we need to wipe out any pre-existing rules to save on memory // If it's any we need to wipe out any pre-existing rules to save on memory
fr.Groups = make([][]string, 0) fr.Groups = make([][]string, 0)
fr.Hosts = make(map[string]struct{}) fr.Hosts = make(map[string]struct{})
fr.CIDR = NewCIDRTree() fr.CIDR = cidr.NewTree4()
} else { } else {
if len(groups) > 0 { if len(groups) > 0 {
fr.Groups = append(fr.Groups, groups) fr.Groups = append(fr.Groups, groups)
@@ -653,7 +725,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool
return false return false
} }
func (fr *FirewallRule) match(p FirewallPacket, c *cert.NebulaCertificate) bool { func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
if fr == nil { if fr == nil {
return false return false
} }
@@ -707,7 +779,7 @@ type rule struct {
CASha string CASha string
} }
func convertRule(p interface{}, table string, i int) (rule, error) { func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
r := rule{} r := rule{}
m, ok := p.(map[interface{}]interface{}) m, ok := p.(map[interface{}]interface{})
@@ -762,12 +834,12 @@ func convertRule(p interface{}, table string, i int) (rule, error) {
func parsePort(s string) (startPort, endPort int32, err error) { func parsePort(s string) (startPort, endPort int32, err error) {
if s == "any" { if s == "any" {
startPort = fwPortAny startPort = firewall.PortAny
endPort = fwPortAny endPort = firewall.PortAny
} else if s == "fragment" { } else if s == "fragment" {
startPort = fwPortFragment startPort = firewall.PortFragment
endPort = fwPortFragment endPort = firewall.PortFragment
} else if strings.Contains(s, `-`) { } else if strings.Contains(s, `-`) {
sPorts := strings.SplitN(s, `-`, 2) sPorts := strings.SplitN(s, `-`, 2)
@@ -791,8 +863,8 @@ func parsePort(s string) (startPort, endPort int32, err error) {
startPort = int32(rStartPort) startPort = int32(rStartPort)
endPort = int32(rEndPort) endPort = int32(rEndPort)
if startPort == fwPortAny { if startPort == firewall.PortAny {
endPort = fwPortAny endPort = firewall.PortAny
} }
} else { } else {

59
firewall/cache.go Normal file
View File

@@ -0,0 +1,59 @@
package firewall
import (
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
)
// ConntrackCache is used as a local routine cache to know if a given flow
// has been seen in the conntrack table.
type ConntrackCache map[Packet]struct{}
type ConntrackCacheTicker struct {
cacheV uint64
cacheTick uint64
cache ConntrackCache
}
func NewConntrackCacheTicker(d time.Duration) *ConntrackCacheTicker {
if d == 0 {
return nil
}
c := &ConntrackCacheTicker{
cache: ConntrackCache{},
}
go c.tick(d)
return c
}
func (c *ConntrackCacheTicker) tick(d time.Duration) {
for {
time.Sleep(d)
atomic.AddUint64(&c.cacheTick, 1)
}
}
// Get checks if the cache ticker has moved to the next version before returning
// the map. If it has moved, we reset the map.
func (c *ConntrackCacheTicker) Get(l *logrus.Logger) ConntrackCache {
if c == nil {
return nil
}
if tick := atomic.LoadUint64(&c.cacheTick); tick != c.cacheV {
c.cacheV = tick
if ll := len(c.cache); ll > 0 {
if l.Level == logrus.DebugLevel {
l.WithField("len", ll).Debug("resetting conntrack cache")
}
c.cache = make(ConntrackCache, ll)
}
}
return c.cache
}

62
firewall/packet.go Normal file
View File

@@ -0,0 +1,62 @@
package firewall
import (
"encoding/json"
"fmt"
"github.com/slackhq/nebula/iputil"
)
type m map[string]interface{}
const (
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
ProtoTCP = 6
ProtoUDP = 17
ProtoICMP = 1
PortAny = 0 // Special value for matching `port: any`
PortFragment = -1 // Special value for matching `port: fragment`
)
type Packet struct {
LocalIP iputil.VpnIp
RemoteIP iputil.VpnIp
LocalPort uint16
RemotePort uint16
Protocol uint8
Fragment bool
}
func (fp *Packet) Copy() *Packet {
return &Packet{
LocalIP: fp.LocalIP,
RemoteIP: fp.RemoteIP,
LocalPort: fp.LocalPort,
RemotePort: fp.RemotePort,
Protocol: fp.Protocol,
Fragment: fp.Fragment,
}
}
func (fp Packet) MarshalJSON() ([]byte, error) {
var proto string
switch fp.Protocol {
case ProtoTCP:
proto = "tcp"
case ProtoICMP:
proto = "icmp"
case ProtoUDP:
proto = "udp"
default:
proto = fmt.Sprintf("unknown %v", fp.Protocol)
}
return json.Marshal(m{
"LocalIP": fp.LocalIP.String(),
"RemoteIP": fp.RemoteIP.String(),
"LocalPort": fp.LocalPort,
"RemotePort": fp.RemotePort,
"Protocol": proto,
"Fragment": fp.Fragment,
})
}

View File

@@ -11,145 +11,138 @@ import (
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestNewFirewall(t *testing.T) { func TestNewFirewall(t *testing.T) {
l := test.NewLogger()
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
fw := NewFirewall(time.Second, time.Minute, time.Hour, c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.NotNil(t, fw.Conns) conntrack := fw.Conntrack
assert.NotNil(t, conntrack)
assert.NotNil(t, conntrack.Conns)
assert.NotNil(t, conntrack.TimerWheel)
assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules) assert.NotNil(t, fw.OutRules)
assert.NotNil(t, fw.TimerWheel)
assert.Equal(t, time.Second, fw.TCPTimeout) assert.Equal(t, time.Second, fw.TCPTimeout)
assert.Equal(t, time.Minute, fw.UDPTimeout) assert.Equal(t, time.Minute, fw.UDPTimeout)
assert.Equal(t, time.Hour, fw.DefaultTimeout) assert.Equal(t, time.Hour, fw.DefaultTimeout)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(time.Second, time.Hour, time.Minute, c) fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(time.Hour, time.Second, time.Minute, c) fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(time.Hour, time.Minute, time.Second, c) fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(time.Minute, time.Hour, time.Second, c) fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(time.Minute, time.Second, time.Hour, c) fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
assert.Equal(t, time.Hour, fw.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3601, fw.TimerWheel.wheelLen) assert.Equal(t, 3601, conntrack.TimerWheel.wheelLen)
} }
func TestFirewall_AddRule(t *testing.T) { func TestFirewall_AddRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
fw := NewFirewall(time.Second, time.Minute, time.Hour, c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules) assert.NotNil(t, fw.OutRules)
_, ti, _ := net.ParseCIDR("1.2.3.4/32") _, ti, _ := net.ParseCIDR("1.2.3.4/32")
assert.Nil(t, fw.AddRule(true, fwProtoTCP, 1, 1, []string{}, "", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, "", ""))
// An empty rule is any // An empty rule is any
assert.True(t, fw.InRules.TCP[1].Any.Any) assert.True(t, fw.InRules.TCP[1].Any.Any)
assert.Empty(t, fw.InRules.TCP[1].Any.Groups) assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts) assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.TCP[1].Any.CIDR.root.value)
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", ""))
assert.False(t, fw.InRules.UDP[1].Any.Any) assert.False(t, fw.InRules.UDP[1].Any.Any)
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1") assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts) assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.UDP[1].Any.CIDR.root.value)
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, fwProtoICMP, 1, 1, []string{}, "h1", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, "", ""))
assert.False(t, fw.InRules.ICMP[1].Any.Any) assert.False(t, fw.InRules.ICMP[1].Any.Any)
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups) assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1") assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.left)
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.right)
assert.Nil(t, fw.InRules.ICMP[1].Any.CIDR.root.value)
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, fwProtoAny, 1, 1, []string{}, "", ti, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any) assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts) assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(ip2int(ti.IP))) assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "ca-name", ""))
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name") assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, fwProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, "", "ca-sha"))
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha") assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
// Set any and clear fields // Set any and clear fields
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, "", ""))
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0]) assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1") assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(ip2int(ti.IP))) assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
// run twice just to make sure // run twice just to make sure
//TODO: these ANY rules should clear the CA firewall portion //TODO: these ANY rules should clear the CA firewall portion
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts) assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.left)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.right)
assert.Nil(t, fw.OutRules.AnyProto[0].Any.CIDR.root.value)
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "any", nil, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0") _, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
assert.Nil(t, fw.AddRule(false, fwProtoAny, 0, 0, []string{}, "", anyIp, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
// Test error conditions // Test error conditions
fw = NewFirewall(time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", "")) assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, "", ""))
assert.Error(t, fw.AddRule(true, fwProtoAny, 10, 0, []string{}, "", nil, "", "")) assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, "", ""))
} }
func TestFirewall_Drop(t *testing.T) { func TestFirewall_Drop(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := FirewallPacket{ p := firewall.Packet{
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
10, 10,
90, 90,
fwProtoUDP, firewall.ProtoUDP,
false, false,
} }
@@ -171,52 +164,53 @@ func TestFirewall_Drop(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"any"}, "", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// Drop outbound // Drop outbound
assert.True(t, fw.Drop([]byte{}, p, false, &h, cp)) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
// Allow inbound // Allow inbound
resetConntrack(fw) resetConntrack(fw)
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// Allow outbound because conntrack // Allow outbound because conntrack
assert.False(t, fw.Drop([]byte{}, p, false, &h, cp)) assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
// test remote mismatch // test remote mismatch
oldRemote := p.RemoteIP oldRemote := p.RemoteIP
p.RemoteIP = ip2int(net.IPv4(1, 2, 3, 10)) p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
assert.True(t, fw.Drop([]byte{}, p, false, &h, cp)) assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
p.RemoteIP = oldRemote p.RemoteIP = oldRemote
// ensure signer doesn't get in the way of group checks // ensure signer doesn't get in the way of group checks
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum"))
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum-bad"))
assert.True(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
// test caSha doesn't drop on match // test caSha doesn't drop on match
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "", "signer-shasum-bad"))
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "", "signer-shasum"))
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// ensure ca name doesn't get in the way of group checks // ensure ca name doesn't get in the way of group checks
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good", ""))
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good-bad", ""))
assert.True(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
// test caName doesn't drop on match // test caName doesn't drop on match
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, "ca-good-bad", ""))
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, "ca-good", ""))
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
} }
func BenchmarkFirewallTable_match(b *testing.B) { func BenchmarkFirewallTable_match(b *testing.B) {
@@ -235,14 +229,14 @@ func BenchmarkFirewallTable_match(b *testing.B) {
b.Run("fail on proto", func(b *testing.B) { b.Run("fail on proto", func(b *testing.B) {
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoUDP}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp)
} }
}) })
b.Run("fail on port", func(b *testing.B) { b.Run("fail on port", func(b *testing.B) {
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 1}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp)
} }
}) })
@@ -256,7 +250,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
@@ -268,7 +262,7 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
@@ -280,12 +274,12 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
b.Run("pass on ip", func(b *testing.B) { b.Run("pass on ip", func(b *testing.B) {
ip := ip2int(net.IPv4(172, 1, 1, 1)) ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, InvertedGroups: map[string]struct{}{"nope": {}},
@@ -293,14 +287,14 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
} }
}) })
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "") _ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, "", "")
b.Run("pass on ip with any port", func(b *testing.B) { b.Run("pass on ip with any port", func(b *testing.B) {
ip := ip2int(net.IPv4(172, 1, 1, 1)) ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, InvertedGroups: map[string]struct{}{"nope": {}},
@@ -308,23 +302,22 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(FirewallPacket{Protocol: fwProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
} }
}) })
} }
func TestFirewall_Drop2(t *testing.T) { func TestFirewall_Drop2(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := FirewallPacket{ p := firewall.Packet{
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
10, 10,
90, 90,
fwProtoUDP, firewall.ProtoUDP,
false, false,
} }
@@ -344,6 +337,7 @@ func TestFirewall_Drop2(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
@@ -361,29 +355,28 @@ func TestFirewall_Drop2(t *testing.T) {
} }
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// h1/c1 lacks the proper groups // h1/c1 lacks the proper groups
assert.True(t, fw.Drop([]byte{}, p, true, &h1, cp)) assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
// c has the proper groups // c has the proper groups
resetConntrack(fw) resetConntrack(fw)
assert.False(t, fw.Drop([]byte{}, p, true, &h, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
} }
func TestFirewall_Drop3(t *testing.T) { func TestFirewall_Drop3(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
p := FirewallPacket{ p := firewall.Packet{
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
ip2int(net.IPv4(1, 2, 3, 4)), iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
1, 1,
1, 1,
fwProtoUDP, firewall.ProtoUDP,
false, false,
} }
@@ -410,6 +403,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c1, peerCert: &c1,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
} }
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
@@ -424,6 +418,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c2, peerCert: &c2,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
} }
h2.CreateRemoteCIDR(&c2) h2.CreateRemoteCIDR(&c2)
@@ -438,22 +433,90 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c3, peerCert: &c3,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
} }
h3.CreateRemoteCIDR(&c3) h3.CreateRemoteCIDR(&c3)
fw := NewFirewall(time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "host1", nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, "", ""))
assert.Nil(t, fw.AddRule(true, fwProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, "", "signer-sha"))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// c1 should pass because host match // c1 should pass because host match
assert.False(t, fw.Drop([]byte{}, p, true, &h1, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
// c2 should pass because ca sha match // c2 should pass because ca sha match
resetConntrack(fw) resetConntrack(fw)
assert.False(t, fw.Drop([]byte{}, p, true, &h2, cp)) assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
// c3 should fail because no match // c3 should fail because no match
resetConntrack(fw) resetConntrack(fw)
assert.True(t, fw.Drop([]byte{}, p, true, &h3, cp)) assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
}
func TestFirewall_DropConntrackReload(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{}
l.SetOutput(ob)
p := firewall.Packet{
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
10,
90,
firewall.ProtoUDP,
false,
}
ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4),
Mask: net.IPMask{255, 255, 255, 0},
}
c := cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "host1",
Ips: []*net.IPNet{&ipNet},
Groups: []string{"default-group"},
InvertedGroups: map[string]struct{}{"default-group": {}},
Issuer: "signer-shasum",
},
}
h := HostInfo{
ConnectionState: &ConnectionState{
peerCert: &c,
},
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
}
h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, "", ""))
cp := cert.NewCAPool()
// Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
// Allow inbound
resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
// Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
oldFw := fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, "", ""))
fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1
// Allow outbound because conntrack and new rules allow port 10
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
oldFw = fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, "", ""))
fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1
// Drop outbound because conntrack doesn't match new ruleset
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
} }
func BenchmarkLookup(b *testing.B) { func BenchmarkLookup(b *testing.B) {
@@ -572,124 +635,126 @@ func Test_parsePort(t *testing.T) {
} }
func TestNewFirewallFromConfig(t *testing.T) { func TestNewFirewallFromConfig(t *testing.T) {
l := test.NewLogger()
// Test a bad rule definition // Test a bad rule definition
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
conf := NewConfig() conf := config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
_, err := NewFirewallFromConfig(c, conf) _, err := NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules") assert.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
// Test both port and code // Test both port and code
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided") assert.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
// Test missing host, group, cidr, ca_name and ca_sha // Test missing host, group, cidr, ca_name and ca_sha
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided") assert.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, ca_name, or ca_sha must be provided")
// Test code/port error // Test code/port error
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`") assert.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`") assert.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
// Test proto error // Test proto error
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``") assert.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
// Test cidr parse error // Test cidr parse error
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh") assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
// Test both group and groups // Test both group and groups
conf = NewConfig() conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
_, err = NewFirewallFromConfig(c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided") assert.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
} }
func TestAddFirewallRulesFromConfig(t *testing.T) { func TestAddFirewallRulesFromConfig(t *testing.T) {
l := test.NewLogger()
// Test adding tcp rule // Test adding tcp rule
conf := NewConfig() conf := config.NewC(l)
mf := &mockFirewall{} mf := &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding udp rule // Test adding udp rule
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding icmp rule // Test adding icmp rule
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: fwProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding any rule // Test adding any rule
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil}, mf.lastCall)
// Test adding rule with ca_sha // Test adding rule with ca_sha
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caSha: "12312313123"}, mf.lastCall)
// Test adding rule with ca_name // Test adding rule with ca_name
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, caName: "root01"}, mf.lastCall)
// Test single group // Test single group
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
// Test single groups // Test single groups
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil}, mf.lastCall)
// Test multiple AND groups // Test multiple AND groups
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
assert.Nil(t, AddFirewallRulesFromConfig(true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: fwProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil}, mf.lastCall)
// Test Add error // Test Add error
conf = NewConfig() conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
mf.nextCallReturn = errors.New("test error") mf.nextCallReturn = errors.New("test error")
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.EqualError(t, AddFirewallRulesFromConfig(true, conf, mf), "firewall.inbound rule #0; `test error`") assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
} }
func TestTCPRTTTracking(t *testing.T) { func TestTCPRTTTracking(t *testing.T) {
@@ -784,17 +849,16 @@ func TestTCPRTTTracking(t *testing.T) {
} }
func TestFirewall_convertRule(t *testing.T) { func TestFirewall_convertRule(t *testing.T) {
l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
out := l.Out
l.SetOutput(ob) l.SetOutput(ob)
defer l.SetOutput(out)
// Ensure group array of 1 is converted and a warning is printed // Ensure group array of 1 is converted and a warning is printed
c := map[interface{}]interface{}{ c := map[interface{}]interface{}{
"group": []interface{}{"group1"}, "group": []interface{}{"group1"},
} }
r, err := convertRule(c, "test", 1) r, err := convertRule(l, c, "test", 1)
assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value") assert.Contains(t, ob.String(), "test rule #1; group was an array with a single value, converting to simple value")
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "group1", r.Group) assert.Equal(t, "group1", r.Group)
@@ -805,7 +869,7 @@ func TestFirewall_convertRule(t *testing.T) {
"group": []interface{}{"group1", "group2"}, "group": []interface{}{"group1", "group2"},
} }
r, err = convertRule(c, "test", 1) r, err = convertRule(l, c, "test", 1)
assert.Equal(t, "", ob.String()) assert.Equal(t, "", ob.String())
assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided") assert.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
@@ -815,7 +879,7 @@ func TestFirewall_convertRule(t *testing.T) {
"group": "group1", "group": "group1",
} }
r, err = convertRule(c, "test", 1) r, err = convertRule(l, c, "test", 1)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, "group1", r.Group) assert.Equal(t, "group1", r.Group)
} }
@@ -856,7 +920,7 @@ func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, end
} }
func resetConntrack(fw *Firewall) { func resetConntrack(fw *Firewall) {
fw.connMutex.Lock() fw.Conntrack.Lock()
fw.Conns = map[FirewallPacket]*conn{} fw.Conntrack.Conns = map[firewall.Packet]*conn{}
fw.connMutex.Unlock() fw.Conntrack.Unlock()
} }

65
go.mod
View File

@@ -1,33 +1,48 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.12 go 1.18
require ( require (
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0 github.com/armon/go-radix v1.0.0
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect github.com/flynn/noise v1.0.0
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.3.2 github.com/google/gopacket v1.1.19
github.com/imdario/mergo v0.3.8 github.com/imdario/mergo v0.3.8
github.com/kardianos/service v1.0.0 github.com/kardianos/service v1.2.1
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/miekg/dns v1.1.48
github.com/kr/pretty v0.1.0 // indirect github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/miekg/dns v1.1.25 github.com/prometheus/client_golang v1.12.1
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/prometheus/client_golang v1.2.1 github.com/sirupsen/logrus v1.8.1
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee // indirect github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/prometheus/procfs v0.0.8 // indirect github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 github.com/stretchr/testify v1.7.1
github.com/sirupsen/logrus v1.4.2 github.com/vishvananda/netlink v1.1.0
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29
github.com/stretchr/testify v1.4.0 golang.org/x/net v0.0.0-20220403103023-749bd193bc2b
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 golang.zx2c4.com/wireguard/windows v0.5.3
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 google.golang.org/protobuf v1.28.0
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 gopkg.in/yaml.v2 v2.4.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect )
gopkg.in/yaml.v2 v2.2.7
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.33.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
) )

548
go.sum
View File

@@ -1,154 +1,574 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk=
github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= github.com/miekg/dns v1.1.48 h1:Ucfr7IIVyMBz4lRE8qmGUuZ4Wt3/ZGu9hmcMT3Uu4tQ=
github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c h1:G/mfx/MWYuaaGlHkZQBBXFAJiYnRt/GaOVxnRHjlxg4= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c/go.mod h1:1yMri853KAI2pPAUnESjaqZj9JeImOUM+6A4GuuPmTs= github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f h1:8dM0ilqKL0Uzl42GABzzC4Oqlc3kGRILz0vgoff7nwg=
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f/go.mod h1:nwPd6pDNId/Xi16qtKrFHrauSwMNuvk+zcjk89wrnlA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee h1:iBZPTYkGLvdu6+A5TsMUJQkQX9Ad4aCEnSQtdxPuTCQ= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE=
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b h1:+y4hCMc/WKsDbAPsOQZgBSaSZ26uh2afyaWeVg/3s/c= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a h1:Bt1IVPhiCDMqwGrc2nnbIN4QKvJGx6SK2NzWBmW00ao= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o=
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71 h1:PRD0hj6tTuUnCFD08vkvjkYFbQg/9lV8KIxe1y4/cvU=
golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY=
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@@ -1,31 +1,31 @@
package nebula package nebula
const ( import (
handshakeIXPSK0 = 0 "github.com/slackhq/nebula/header"
handshakeXXPSK0 = 1 "github.com/slackhq/nebula/udp"
) )
func HandleIncomingHandshake(f *Interface, addr *udpAddr, packet []byte, h *Header, hostinfo *HostInfo) { func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H, hostinfo *HostInfo) {
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex) // First remote allow list check before we know the vpnIp
//TODO: For stage 1 we won't have hostinfo yet but stage 2 and above would require it, this check may be helpful in those cases if addr != nil {
//if err != nil { if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
// l.WithError(err).WithField("udpAddr", addr).Error("Error while finding host info for handshake message") f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
// return return
//}
tearDown := false
switch h.Subtype {
case handshakeIXPSK0:
switch h.MessageCounter {
case 1:
tearDown = ixHandshakeStage1(f, addr, newHostinfo, packet, h)
case 2:
tearDown = ixHandshakeStage2(f, addr, newHostinfo, packet, h)
} }
} }
if tearDown && newHostinfo != nil { switch h.Subtype {
f.handshakeManager.DeleteIndex(newHostinfo.localIndexId) case header.HandshakeIXPSK0:
f.handshakeManager.DeleteVpnIP(newHostinfo.hostId) switch h.MessageCounter {
case 1:
ixHandshakeStage1(f, addr, via, packet, h)
case 2:
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
f.handshakeManager.DeleteHostInfo(newHostinfo)
}
}
} }
} }

View File

@@ -4,40 +4,36 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"bytes"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/golang/protobuf/proto" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
// NOISE IX Handshakes // NOISE IX Handshakes
// This function constructs a handshake packet, but does not actually send it // This function constructs a handshake packet, but does not actually send it
// Sending is done by the handshake manager // Sending is done by the handshake manager
func ixHandshakeStage0(f *Interface, vpnIp uint32, hostinfo *HostInfo) { func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
// This queries the lighthouse if we don't know a remote for the host // This queries the lighthouse if we don't know a remote for the host
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
// more quickly, effect is a quicker handshake.
if hostinfo.remote == nil { if hostinfo.remote == nil {
ips, err := f.lightHouse.Query(vpnIp, f) f.lightHouse.QueryServer(vpnIp, f)
if err != nil {
//l.Debugln(err)
}
for _, ip := range ips {
hostinfo.AddRemote(ip)
}
} }
myIndex, err := generateIndex() err := f.handshakeManager.AddIndexHostInfo(hostinfo)
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)). f.l.WithError(err).WithField("vpnIp", vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
return return
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
f.handshakeManager.AddIndexHostInfo(myIndex, hostinfo)
hsProto := &NebulaHandshakeDetails{ hsProto := &NebulaHandshakeDetails{
InitiatorIndex: myIndex, InitiatorIndex: hostinfo.localIndexId,
Time: uint64(time.Now().Unix()), Time: uint64(time.Now().UnixNano()),
Cert: ci.certState.rawCertificateNoKey, Cert: ci.certState.rawCertificateNoKey,
} }
@@ -46,219 +42,350 @@ func ixHandshakeStage0(f *Interface, vpnIp uint32, hostinfo *HostInfo) {
hs := &NebulaHandshake{ hs := &NebulaHandshake{
Details: hsProto, Details: hsProto,
} }
hsBytes, err = proto.Marshal(hs) hsBytes, err = hs.Marshal()
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)). f.l.WithError(err).WithField("vpnIp", vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return return
} }
header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, 0, 1) h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
atomic.AddUint64(ci.messageCounter, 1) atomic.AddUint64(&ci.atomicMessageCounter, 1)
msg, _, _, err := ci.H.WriteMessage(header, hsBytes) msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(vpnIp)). f.l.WithError(err).WithField("vpnIp", vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return return
} }
// We are sending handshake packet 1, so we don't expect to receive
// handshake packet 1 from the responder
ci.window.Update(f.l, 1)
hostinfo.HandshakePacket[0] = msg hostinfo.HandshakePacket[0] = msg
hostinfo.HandshakeReady = true hostinfo.HandshakeReady = true
hostinfo.handshakeStart = time.Now() hostinfo.handshakeStart = time.Now()
} }
func ixHandshakeStage1(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool { func ixHandshakeStage1(f *Interface, addr *udp.Addr, via interface{}, packet []byte, h *header.H) {
var ip uint32 ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
if h.RemoteIndex == 0 { // Mark packet 1 as seen so it doesn't show up as missed
ci := f.newConnectionState(false, noise.HandshakeIX, []byte{}, 0) ci.window.Update(f.l, 1)
// Mark packet 1 as seen so it doesn't show up as missed
ci.window.Update(1)
msg, _, _, err := ci.H.ReadMessage(nil, packet[HeaderLen:]) msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:])
if err != nil { if err != nil {
l.WithError(err).WithField("udpAddr", addr). f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage") WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.ReadMessage")
return true return
}
hs := &NebulaHandshake{}
err = hs.Unmarshal(msg)
/*
l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex)
*/
if err != nil || hs.Details == nil {
f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
return
}
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
if err != nil {
f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
Info("Invalid certificate from host")
return
}
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer
if vpnIp == f.myVpnIp {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
return
}
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
} }
}
hs := &NebulaHandshake{} myIndex, err := generateIndex(f.l)
err = proto.Unmarshal(msg, hs) if err != nil {
/* f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
l.Debugln("GOT INDEX: ", hs.Details.InitiatorIndex) WithField("certName", certName).
*/ WithField("fingerprint", fingerprint).
if err != nil || hs.Details == nil { WithField("issuer", issuer).
l.WithError(err).WithField("udpAddr", addr). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") return
return true }
}
hostinfo, _ := f.handshakeManager.pendingHostMap.QueryReverseIndex(hs.Details.InitiatorIndex) hostinfo := &HostInfo{
if hostinfo != nil && bytes.Equal(hostinfo.HandshakePacket[0], packet[HeaderLen:]) { ConnectionState: ci,
if msg, ok := hostinfo.HandshakePacket[2]; ok { localIndexId: myIndex,
remoteIndexId: hs.Details.InitiatorIndex,
vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0),
lastHandshakeTime: hs.Details.Time,
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}
hostinfo.Lock()
defer hostinfo.Unlock()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message received")
hs.Details.ResponderIndex = myIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey
// Update the time in case their clock is way off from ours
hs.Details.Time = uint64(time.Now().UnixNano())
hsBytes, err := hs.Marshal()
if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return
}
nh := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, hs.Details.InitiatorIndex, 2)
msg, dKey, eKey, err := ci.H.WriteMessage(nh, hsBytes)
if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return
} else if dKey == nil || eKey == nil {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
return
}
hostinfo.HandshakePacket[0] = make([]byte, len(packet[header.Len:]))
copy(hostinfo.HandshakePacket[0], packet[header.Len:])
// Regardless of whether you are the sender or receiver, you should arrive here
// and complete standing up the connection.
hostinfo.HandshakePacket[2] = make([]byte, len(msg))
copy(hostinfo.HandshakePacket[2], msg)
// We are sending handshake packet 2, so we don't expect to receive
// handshake packet 2 from the initiator.
ci.window.Update(f.l, 2)
ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey)
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
hostinfo.SetRemote(addr)
hostinfo.CreateRemoteCIDR(remoteCert)
// Only overwrite existing record if we should win the handshake race
overwrite := vpnIp > f.myVpnIp
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, overwrite, f)
if err != nil {
switch err {
case ErrAlreadySeen:
// Update remote if preferred (Note we have to switch to locking
// the existing hostinfo, and then switch back so the defer Unlock
// higher in this function still works)
hostinfo.Unlock()
existing.Lock()
// Update remote if preferred
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
existing.Unlock()
hostinfo.Lock()
msg = existing.HandshakePacket[2]
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil {
err := f.outside.WriteTo(msg, addr) err := f.outside.WriteTo(msg, addr)
if err != nil { if err != nil {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
WithError(err).Error("Failed to send handshake message") WithError(err).Error("Failed to send handshake message")
} else { } else {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
Info("Handshake message sent") Info("Handshake message sent")
} }
return false return
} else {
via2 := via.(*ViaSender)
if via2 == nil {
f.l.Error("Handshake send failed: both addr and via are nil.")
return
}
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
f.l.WithField("vpnIp", existing.vpnIp).WithField("relay", via2.relayHI.vpnIp).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("cached", true).
Info("Handshake message sent")
return
} }
case ErrExistingHostInfo:
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). // This means there was an existing tunnel and this handshake was older than the one we are currently based on
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cached", true). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("packets", hostinfo.HandshakePacket). WithField("certName", certName).
Error("Seen this handshake packet already but don't have a cached packet to return") WithField("oldHandshakeTime", existing.lastHandshakeTime).
} WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
WithField("fingerprint", fingerprint).
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert) WithField("issuer", issuer).
if err != nil {
l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
Info("Invalid certificate from host")
return true
}
vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
myIndex, err := generateIndex()
if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
return true
}
hostinfo, err = f.handshakeManager.AddIndex(myIndex, ci)
if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Error adding index to connection manager")
return true
}
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message received")
hostinfo.remoteIndexId = hs.Details.InitiatorIndex
hs.Details.ResponderIndex = myIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey
hsBytes, err := proto.Marshal(hs)
if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return true
}
header := HeaderEncode(make([]byte, HeaderLen), Version, uint8(handshake), handshakeIXPSK0, hs.Details.InitiatorIndex, 2)
msg, dKey, eKey, err := ci.H.WriteMessage(header, hsBytes)
if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return true
}
if f.hostMap.CheckHandshakeCompleteIP(vpnIP) && vpnIP < ip2int(f.certState.certificate.Details.Ips[0].IP) {
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Prevented a handshake race") Info("Handshake too old")
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
f.SendMessageToVpnIp(test, testRequest, vpnIP, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
return true return
case ErrLocalIndexCollision:
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("localIndex", hostinfo.localIndexId).WithField("collision", existing.vpnIp).
Error("Failed to add HostInfo due to localIndex collision")
return
case ErrExistingHandshake:
// We have a race where both parties think they are an initiator and this tunnel lost, let the other one finish
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Error("Prevented a pending handshake race")
return
default:
// Shouldn't happen, but just in case someone adds a new error type to CheckAndComplete
// And we forget to update it here
f.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Error("Failed to add HostInfo to HostMap")
return
} }
hostinfo.HandshakePacket[0] = make([]byte, len(packet[HeaderLen:]))
copy(hostinfo.HandshakePacket[0], packet[HeaderLen:])
// Regardless of whether you are the sender or receiver, you should arrive here
// and complete standing up the connection.
if dKey != nil && eKey != nil {
hostinfo.HandshakePacket[2] = make([]byte, len(msg))
copy(hostinfo.HandshakePacket[2], msg)
err := f.outside.WriteTo(msg, addr)
if err != nil {
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake")
} else {
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Info("Handshake message sent")
}
ip = ip2int(remoteCert.Details.Ips[0].IP)
ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey)
//l.Debugln("got symmetric pairs")
//hostinfo.ClearRemotes()
hostinfo.AddRemote(*addr)
hostinfo.CreateRemoteCIDR(remoteCert)
f.lightHouse.AddRemoteAndReset(ip, addr)
if f.serveDns {
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
}
ho, err := f.hostMap.QueryVpnIP(vpnIP)
if err == nil && ho.localIndexId != 0 {
l.WithField("vpnIp", vpnIP).
WithField("action", "removing stale index").
WithField("index", ho.localIndexId).
Debug("Handshake processing")
f.hostMap.DeleteIndex(ho.localIndexId)
}
f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
hostinfo.handshakeComplete()
} else {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Error("Noise did not arrive at a key")
return true
}
} }
f.hostMap.AddRemote(ip, addr) // Do the send
return false f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil {
err = f.outside.WriteTo(msg, addr)
if err != nil {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake")
} else {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent")
}
} else {
via2 := via.(*ViaSender)
if via2 == nil {
f.l.Error("Handshake send failed: both addr and via are nil.")
return
}
hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
f.SendVia(via2.relayHI, via2.relay, msg, make([]byte, 12), make([]byte, mtu), false)
f.l.WithField("vpnIp", vpnIp).WithField("relay", via2.relayHI.vpnIp).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent")
}
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
return
} }
func ixHandshakeStage2(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet []byte, h *Header) bool { func ixHandshakeStage2(f *Interface, addr *udp.Addr, via interface{}, hostinfo *HostInfo, packet []byte, h *header.H) bool {
if hostinfo == nil { if hostinfo == nil {
// Nothing here to tear down, got a bogus stage 2 packet
return true return true
} }
if bytes.Equal(hostinfo.HandshakePacket[2], packet[HeaderLen:]) { hostinfo.Lock()
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). defer hostinfo.Unlock()
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Error("Already seen this handshake packet") if addr != nil {
return false if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return false
}
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
// Mark packet 2 as seen so it doesn't show up as missed if ci.ready {
ci.window.Update(2) f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Info("Handshake is already complete")
hostinfo.HandshakePacket[2] = make([]byte, len(packet[HeaderLen:])) // Update remote if preferred
copy(hostinfo.HandshakePacket[2], packet[HeaderLen:]) if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[HeaderLen:]) // We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
return false
}
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h). WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Error("Failed to call noise.ReadMessage") Error("Failed to call noise.ReadMessage")
@@ -266,81 +393,119 @@ func ixHandshakeStage2(f *Interface, addr *udpAddr, hostinfo *HostInfo, packet [
// to DOS us. Every other error condition after should to allow a possible good handshake to complete in the // to DOS us. Every other error condition after should to allow a possible good handshake to complete in the
// near future // near future
return false return false
} else if dKey == nil || eKey == nil {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Error("Noise did not arrive at a key")
// This should be impossible in IX but just in case, if we get here then there is no chance to recover
// the handshake state machine. Tear it down
return true
} }
hs := &NebulaHandshake{} hs := &NebulaHandshake{}
err = proto.Unmarshal(msg, hs) err = hs.Unmarshal(msg)
if err != nil || hs.Details == nil { if err != nil || hs.Details == nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message") WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("Failed unmarshal handshake message")
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
return true return true
} }
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert) remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Error("Invalid certificate from host") Error("Invalid certificate from host")
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
return true return true
} }
vpnIP := ip2int(remoteCert.Details.Ips[0].IP)
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer
// Ensure the right host responded
if vpnIp != hostinfo.vpnIp {
f.l.WithField("intendedVpnIp", hostinfo.vpnIp).WithField("haveVpnIp", vpnIp).
WithField("udpAddr", addr).WithField("certName", certName).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
Info("Incorrect host responded to handshake")
// Release our old handshake from pending, it should not continue
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
// Create a new hostinfo/handshake for the intended vpn ip
//TODO: this adds it to the timer wheel in a way that aggressively retries
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
newHostInfo.Lock()
// Block the current used address
newHostInfo.remotes = hostinfo.remotes
newHostInfo.remotes.BlockRemote(addr)
// Get the correct remote list for the host we did handshake with
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
Info("Blocked addresses for handshakes")
// Swap the packet store to benefit the original intended recipient
hostinfo.ConnectionState.queueLock.Lock()
newHostInfo.packetStore = hostinfo.packetStore
hostinfo.packetStore = []*cachedPacket{}
hostinfo.ConnectionState.queueLock.Unlock()
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.vpnIp = vpnIp
f.sendCloseTunnel(hostinfo)
newHostInfo.Unlock()
return true
}
// Mark packet 2 as seen so it doesn't show up as missed
ci.window.Update(f.l, 2)
duration := time.Since(hostinfo.handshakeStart).Nanoseconds() duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", addr). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("durationNs", duration). WithField("durationNs", duration).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message received") Info("Handshake message received")
//ci.remoteIndex = hs.ResponderIndex
hostinfo.remoteIndexId = hs.Details.ResponderIndex hostinfo.remoteIndexId = hs.Details.ResponderIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey hostinfo.lastHandshakeTime = hs.Details.Time
/* // Store their cert and our symmetric keys
hsBytes, err := proto.Marshal(hs) ci.peerCert = remoteCert
if err != nil { ci.dKey = NewNebulaCipherState(dKey)
l.Debugln("Failed to marshal handshake: ", err) ci.eKey = NewNebulaCipherState(eKey)
return
}
*/
// Regardless of whether you are the sender or receiver, you should arrive here // Make sure the current udpAddr being used is set for responding
// and complete standing up the connection. if addr != nil {
if dKey != nil && eKey != nil { hostinfo.SetRemote(addr)
ip := ip2int(remoteCert.Details.Ips[0].IP)
ci.peerCert = remoteCert
ci.dKey = NewNebulaCipherState(dKey)
ci.eKey = NewNebulaCipherState(eKey)
//l.Debugln("got symmetric pairs")
//hostinfo.ClearRemotes()
f.hostMap.AddRemote(ip, addr)
hostinfo.CreateRemoteCIDR(remoteCert)
f.lightHouse.AddRemoteAndReset(ip, addr)
if f.serveDns {
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
}
ho, err := f.hostMap.QueryVpnIP(vpnIP)
if err == nil && ho.localIndexId != 0 {
l.WithField("vpnIp", vpnIP).
WithField("action", "removing stale index").
WithField("index", ho.localIndexId).
Debug("Handshake processing")
f.hostMap.DeleteIndex(ho.localIndexId)
}
f.hostMap.AddVpnIPHostInfo(vpnIP, hostinfo)
f.hostMap.AddIndexHostInfo(hostinfo.localIndexId, hostinfo)
hostinfo.handshakeComplete()
f.metricHandshakes.Update(duration)
} else { } else {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("udpAddr", addr). via2 := via.(*ViaSender)
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). hostinfo.relayState.InsertRelayTo(via2.relayHI.vpnIp)
Error("Noise did not arrive at a key")
return true
} }
// Build up the radix for the firewall if we have subnets in the cert
hostinfo.CreateRemoteCIDR(remoteCert)
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
//TODO: Complete here does not do a race avoidance, it will just take the new tunnel. Is this ok?
f.handshakeManager.Complete(hostinfo, f)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
f.metricHandshakes.Update(duration)
return false return false
} }

View File

@@ -1,175 +1,441 @@
package nebula package nebula
import ( import (
"bytes"
"context"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt" "errors"
"net" "net"
"time" "time"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
const ( const (
// Total time to try a handshake = sequence of HandshakeTryInterval * HandshakeRetries DefaultHandshakeTryInterval = time.Millisecond * 100
// With 100ms interval and 20 retries is 23.5 seconds DefaultHandshakeRetries = 10
HandshakeTryInterval = time.Millisecond * 100 DefaultHandshakeTriggerBuffer = 64
HandshakeRetries = 20 DefaultUseRelays = true
// HandshakeWaitRotation is the number of handshake attempts to do before starting to use other ips addresses
HandshakeWaitRotation = 5
) )
var (
defaultHandshakeConfig = HandshakeConfig{
tryInterval: DefaultHandshakeTryInterval,
retries: DefaultHandshakeRetries,
triggerBuffer: DefaultHandshakeTriggerBuffer,
useRelays: DefaultUseRelays,
}
)
type HandshakeConfig struct {
tryInterval time.Duration
retries int
triggerBuffer int
useRelays bool
messageMetrics *MessageMetrics
}
type HandshakeManager struct { type HandshakeManager struct {
pendingHostMap *HostMap pendingHostMap *HostMap
mainHostMap *HostMap mainHostMap *HostMap
lightHouse *LightHouse lightHouse *LightHouse
outside *udpConn outside *udp.Conn
config HandshakeConfig
OutboundHandshakeTimer *SystemTimerWheel OutboundHandshakeTimer *SystemTimerWheel
InboundHandshakeTimer *SystemTimerWheel messageMetrics *MessageMetrics
metricInitiated metrics.Counter
metricTimedOut metrics.Counter
l *logrus.Logger
// can be used to trigger outbound handshake for the given vpnIp
trigger chan iputil.VpnIp
} }
func NewHandshakeManager(tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udpConn) *HandshakeManager { func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
return &HandshakeManager{ return &HandshakeManager{
pendingHostMap: NewHostMap("pending", tunCidr, preferredRanges), pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
mainHostMap: mainHostMap, mainHostMap: mainHostMap,
lightHouse: lightHouse, lightHouse: lightHouse,
outside: outside, outside: outside,
config: config,
OutboundHandshakeTimer: NewSystemTimerWheel(HandshakeTryInterval, HandshakeTryInterval*HandshakeRetries), trigger: make(chan iputil.VpnIp, config.triggerBuffer),
InboundHandshakeTimer: NewSystemTimerWheel(HandshakeTryInterval, HandshakeTryInterval*HandshakeRetries), OutboundHandshakeTimer: NewSystemTimerWheel(config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
messageMetrics: config.messageMetrics,
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
l: l,
} }
} }
func (c *HandshakeManager) Run(f EncWriter) { func (c *HandshakeManager) Run(ctx context.Context, f udp.EncWriter) {
clockSource := time.Tick(HandshakeTryInterval) clockSource := time.NewTicker(c.config.tryInterval)
for now := range clockSource { defer clockSource.Stop()
c.NextOutboundHandshakeTimerTick(now, f)
c.NextInboundHandshakeTimerTick(now) for {
select {
case <-ctx.Done():
return
case vpnIP := <-c.trigger:
c.handleOutbound(vpnIP, f, true)
case now := <-clockSource.C:
c.NextOutboundHandshakeTimerTick(now, f)
}
} }
} }
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) { func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f udp.EncWriter) {
c.OutboundHandshakeTimer.advance(now) c.OutboundHandshakeTimer.advance(now)
for { for {
ep := c.OutboundHandshakeTimer.Purge() ep := c.OutboundHandshakeTimer.Purge()
if ep == nil { if ep == nil {
break break
} }
vpnIP := ep.(uint32) vpnIp := ep.(iputil.VpnIp)
c.handleOutbound(vpnIp, f, false)
}
}
index, err := c.pendingHostMap.GetIndexByVpnIP(vpnIP) func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f udp.EncWriter, lighthouseTriggered bool) {
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
if err != nil {
return
}
hostinfo.Lock()
defer hostinfo.Unlock()
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
if hostinfo.HandshakeComplete {
// Ensure we don't exist in the pending hostmap anymore since we have completed
c.pendingHostMap.DeleteHostInfo(hostinfo)
return
}
// Check if we have a handshake packet to transmit yet
if !hostinfo.HandshakeReady {
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
return
}
// If we are out of time, clean up
if hostinfo.HandshakeCounter >= c.config.retries {
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
Info("Handshake timed out")
c.metricTimedOut.Inc(1)
c.pendingHostMap.DeleteHostInfo(hostinfo)
return
}
// We only care about a lighthouse trigger before the first handshake transmit attempt. This is a very specific
// optimization for a fast lighthouse reply
//TODO: it would feel better to do this once, anytime, as our delay increases over time
if lighthouseTriggered && hostinfo.HandshakeCounter > 0 {
// If we didn't return here a lighthouse could cause us to aggressively send handshakes
return
}
// Get a remotes object if we don't already have one.
// This is mainly to protect us as this should never be the case
// NB ^ This comment doesn't jive. It's how the thing gets intiailized.
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
if hostinfo.remotes == nil {
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
}
//TODO: this will generate a load of queries for hosts with only 1 ip (i'm not using a lighthouse, static mapped)
if hostinfo.remotes.Len(c.pendingHostMap.preferredRanges) <= 1 {
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
// the learned public ip for them. Query again to short circuit the promotion counter
c.lightHouse.QueryServer(vpnIp, f)
}
// Send a the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
var sentTo []*udp.Addr
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
if err != nil { if err != nil {
continue hostinfo.logger(c.l).WithField("udpAddr", addr).
} WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake message")
hostinfo, err := c.pendingHostMap.QueryVpnIP(vpnIP)
if err != nil {
continue
}
// If we haven't finished the handshake and we haven't hit max retries, query
// lighthouse and then send the handshake packet again.
if hostinfo.HandshakeCounter < HandshakeRetries && !hostinfo.HandshakeComplete {
if hostinfo.remote == nil {
// We continue to query the lighthouse because hosts may
// come online during handshake retries. If the query
// succeeds (no error), add the lighthouse info to hostinfo
ips, err := c.lightHouse.Query(vpnIP, f)
if err == nil {
for _, ip := range ips {
hostinfo.AddRemote(ip)
}
hostinfo.ForcePromoteBest(c.mainHostMap.preferredRanges)
}
}
hostinfo.HandshakeCounter++
// We want to use the "best" calculated ip for the first 5 attempts, after that we just blindly rotate through
// all the others until we can stand up a connection.
if hostinfo.HandshakeCounter > HandshakeWaitRotation {
hostinfo.rotateRemote()
}
// Ensure the handshake is ready to avoid a race in timer tick and stage 0 handshake generation
if hostinfo.HandshakeReady && hostinfo.remote != nil {
err := c.outside.WriteTo(hostinfo.HandshakePacket[0], hostinfo.remote)
if err != nil {
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", hostinfo.remote).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake message")
} else {
//TODO: this log line is assuming a lot of stuff around the cached stage 0 handshake packet, we should
// keep the real packet struct around for logging purposes
l.WithField("vpnIp", IntIp(vpnIP)).WithField("udpAddr", hostinfo.remote).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message sent")
}
}
// Readd to the timer wheel so we continue trying wait HandshakeTryInterval * counter longer for next try
//l.Infoln("Interval: ", HandshakeTryInterval*time.Duration(hostinfo.HandshakeCounter))
c.OutboundHandshakeTimer.Add(vpnIP, HandshakeTryInterval*time.Duration(hostinfo.HandshakeCounter))
} else { } else {
c.pendingHostMap.DeleteVpnIP(vpnIP) sentTo = append(sentTo, addr)
c.pendingHostMap.DeleteIndex(index)
} }
})
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout
if len(sentTo) > 0 {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message sent")
}
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
hostinfo.logger(c.l).WithField("relayIps", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
// Send a RelayRequest to all known Relay IP's
for _, relay := range hostinfo.remotes.relays {
// Don't relay to myself, and don't relay through the host I'm trying to connect to
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
continue
}
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
if err != nil || relayHostInfo.remote == nil {
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target.")
f.Handshake(*relay)
continue
}
// Check the relay HostInfo to see if we already established a relay through it
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
switch existingRelay.State {
case Established:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
case Requested:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
// Re-send the CreateRelay request, in case the previous one was lost.
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: existingRelay.LocalIndex,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
}
default:
hostinfo.logger(c.l).
WithField("vpnIp", vpnIp).
WithField("state", existingRelay.State).
WithField("relayVpnIp", relayHostInfo.vpnIp).
Errorf("Relay unexpected state")
}
} else {
// No relays exist or requested yet.
if relayHostInfo.remote != nil {
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
if err != nil {
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
}
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: idx,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
f.SendMessageToVpnIp(header.Control, 0, *relay, msg, make([]byte, 12), make([]byte, mtu))
}
}
}
}
}
// Increment the counter to increase our delay, linear backoff
hostinfo.HandshakeCounter++
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
if !lighthouseTriggered {
//TODO: feel like we dupe handshake real fast in a tight loop, why?
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
} }
} }
func (c *HandshakeManager) NextInboundHandshakeTimerTick(now time.Time) { func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo {
c.InboundHandshakeTimer.advance(now) hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
for {
ep := c.InboundHandshakeTimer.Purge()
if ep == nil {
break
}
index := ep.(uint32)
vpnIP, err := c.pendingHostMap.GetVpnIPByIndex(index) if created {
if err != nil { c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
continue c.metricInitiated.Inc(1)
}
c.pendingHostMap.DeleteIndex(index)
c.pendingHostMap.DeleteVpnIP(vpnIP)
} }
}
func (c *HandshakeManager) AddVpnIP(vpnIP uint32) *HostInfo {
hostinfo := c.pendingHostMap.AddVpnIP(vpnIP)
// We lock here and use an array to insert items to prevent locking the
// main receive thread for very long by waiting to add items to the pending map
c.OutboundHandshakeTimer.Add(vpnIP, HandshakeTryInterval)
return hostinfo return hostinfo
} }
func (c *HandshakeManager) DeleteVpnIP(vpnIP uint32) { var (
//l.Debugln("Deleting pending vpn ip :", IntIp(vpnIP)) ErrExistingHostInfo = errors.New("existing hostinfo")
c.pendingHostMap.DeleteVpnIP(vpnIP) ErrAlreadySeen = errors.New("already seen")
} ErrLocalIndexCollision = errors.New("local index collision")
ErrExistingHandshake = errors.New("existing handshake")
)
func (c *HandshakeManager) AddIndex(index uint32, ci *ConnectionState) (*HostInfo, error) { // CheckAndComplete checks for any conflicts in the main and pending hostmap
hostinfo, err := c.pendingHostMap.AddIndex(index, ci) // before adding hostinfo to main. If err is nil, it was added. Otherwise err will be:
if err != nil { //
return nil, fmt.Errorf("Issue adding index: %d", index) // ErrAlreadySeen if we already have an entry in the hostmap that has seen the
// exact same handshake packet
//
// ErrExistingHostInfo if we already have an entry in the hostmap for this
// VpnIp and the new handshake was older than the one we currently have
//
// ErrLocalIndexCollision if we already have an entry in the main or pending
// hostmap for the hostinfo.localIndexId.
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, overwrite bool, f *Interface) (*HostInfo, error) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
// Check if we already have a tunnel with this vpn ip
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
if found && existingHostInfo != nil {
// Is it just a delayed handshake packet?
if bytes.Equal(hostinfo.HandshakePacket[handshakePacket], existingHostInfo.HandshakePacket[handshakePacket]) {
return existingHostInfo, ErrAlreadySeen
}
// Is this a newer handshake?
if existingHostInfo.lastHandshakeTime >= hostinfo.lastHandshakeTime {
return existingHostInfo, ErrExistingHostInfo
}
existingHostInfo.logger(c.l).Info("Taking new handshake")
} }
//c.mainHostMap.AddIndexHostInfo(index, hostinfo)
c.InboundHandshakeTimer.Add(index, time.Second*10) existingIndex, found := c.mainHostMap.Indexes[hostinfo.localIndexId]
return hostinfo, nil if found {
// We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision
}
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
if found && existingIndex != hostinfo {
// We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision
}
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil && existingRemoteIndex.vpnIp != hostinfo.vpnIp {
// We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex")
}
// Check if we are also handshaking with this vpn ip
pendingHostInfo, found := c.pendingHostMap.Hosts[hostinfo.vpnIp]
if found && pendingHostInfo != nil {
if !overwrite {
// We won, let our pending handshake win
return pendingHostInfo, ErrExistingHandshake
}
// We lost, take this handshake and move any cached packets over so they get sent
pendingHostInfo.ConnectionState.queueLock.Lock()
hostinfo.packetStore = append(hostinfo.packetStore, pendingHostInfo.packetStore...)
c.pendingHostMap.unlockedDeleteHostInfo(pendingHostInfo)
pendingHostInfo.ConnectionState.queueLock.Unlock()
pendingHostInfo.logger(c.l).Info("Handshake race lost, replacing pending handshake with completed tunnel")
}
if existingHostInfo != nil {
// We are going to overwrite this entry, so remove the old references
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
delete(c.mainHostMap.Relays, relayIdx)
}
}
c.mainHostMap.addHostInfo(hostinfo, f)
return existingHostInfo, nil
} }
func (c *HandshakeManager) AddIndexHostInfo(index uint32, h *HostInfo) { // Complete is a simpler version of CheckAndComplete when we already know we
c.pendingHostMap.AddIndexHostInfo(index, h) // won't have a localIndexId collision because we already have an entry in the
// pendingHostMap
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
if found && existingHostInfo != nil {
// We are going to overwrite this entry, so remove the old references
delete(c.mainHostMap.Hosts, existingHostInfo.vpnIp)
delete(c.mainHostMap.Indexes, existingHostInfo.localIndexId)
delete(c.mainHostMap.RemoteIndexes, existingHostInfo.remoteIndexId)
for _, relayIdx := range existingHostInfo.relayState.CopyRelayForIdxs() {
delete(c.mainHostMap.Relays, relayIdx)
}
}
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil {
// We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex")
}
c.mainHostMap.addHostInfo(hostinfo, f)
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
} }
func (c *HandshakeManager) DeleteIndex(index uint32) { // AddIndexHostInfo generates a unique localIndexId for this HostInfo
//l.Debugln("Deleting pending index :", index) // and adds it to the pendingHostMap. Will error if we are unable to generate
c.pendingHostMap.DeleteIndex(index) // a unique localIndexId
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.RLock()
defer c.mainHostMap.RUnlock()
for i := 0; i < 32; i++ {
index, err := generateIndex(c.l)
if err != nil {
return err
}
_, inPending := c.pendingHostMap.Indexes[index]
_, inMain := c.mainHostMap.Indexes[index]
if !inMain && !inPending {
h.localIndexId = index
c.pendingHostMap.Indexes[index] = h
return nil
}
}
return errors.New("failed to generate unique localIndexId")
}
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
}
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
//l.Debugln("Deleting pending hostinfo :", hostinfo)
c.pendingHostMap.DeleteHostInfo(hostinfo)
} }
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) { func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
@@ -183,18 +449,28 @@ func (c *HandshakeManager) EmitStats() {
// Utility functions below // Utility functions below
func generateIndex() (uint32, error) { func generateIndex(l *logrus.Logger) (uint32, error) {
b := make([]byte, 4) b := make([]byte, 4)
_, err := rand.Read(b)
if err != nil { // Let zero mean we don't know the ID, so don't generate zero
l.Errorln(err) var index uint32
return 0, err for index == 0 {
_, err := rand.Read(b)
if err != nil {
l.Errorln(err)
return 0, err
}
index = binary.BigEndian.Uint32(b)
} }
index := binary.BigEndian.Uint32(b)
if l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
l.WithField("index", index). l.WithField("index", index).
Debug("Generated index") Debug("Generated index")
} }
return index, nil return index, nil
} }
func hsTimeout(tries int, interval time.Duration) time.Duration {
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval)))
}

View File

@@ -5,190 +5,136 @@ import (
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
var indexes []uint32 = []uint32{1000, 2000, 3000, 4000} func Test_NewHandshakeManagerVpnIp(t *testing.T) {
l := test.NewLogger()
//var ips []uint32 = []uint32{9000, 9999999, 3, 292394923}
var ips []uint32
func Test_NewHandshakeManagerIndex(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))} ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange}
mainHM := NewHostMap("test", vpncidr, preferredRanges)
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{})
now := time.Now()
blah.NextInboundHandshakeTimerTick(now)
// Add four indexes
for _, v := range indexes {
blah.AddIndex(v, &ConnectionState{})
}
// Confirm they are in the pending index list
for _, v := range indexes {
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
}
// Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Indexes, 0)
// Jump ahead 8 seconds
for i := 1; i <= HandshakeRetries; i++ {
next_tick := now.Add(HandshakeTryInterval * time.Duration(i))
blah.NextInboundHandshakeTimerTick(next_tick)
}
// Confirm they are still in the pending index list
for _, v := range indexes {
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(v))
}
// Jump ahead 4 more seconds
next_tick := now.Add(12 * time.Second)
blah.NextInboundHandshakeTimerTick(next_tick)
// Confirm they have been removed
for _, v := range indexes {
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(v))
}
}
func Test_NewHandshakeManagerVpnIP(t *testing.T) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ips = []uint32{ip2int(net.ParseIP("172.1.1.2"))}
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap("test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
addrMap: make(map[iputil.VpnIp]*RemoteList),
}
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
now := time.Now() now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw) blah.NextOutboundHandshakeTimerTick(now, mw)
// Add four "IPs" - which are just uint32s var initCalled bool
for _, v := range ips { initFunc := func(*HostInfo) {
blah.AddVpnIP(v) initCalled = true
} }
i := blah.AddVpnIp(ip, initFunc)
assert.True(t, initCalled)
initCalled = false
i2 := blah.AddVpnIp(ip, initFunc)
assert.False(t, initCalled)
assert.Same(t, i, i2)
i.remotes = NewRemoteList()
i.HandshakeReady = true
// Adding something to pending should not affect the main hostmap // Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Hosts, 0) assert.Len(t, mainHM.Hosts, 0)
// Confirm they are in the pending index list
for _, v := range ips {
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
}
// Jump ahead `HandshakeRetries` ticks // Confirm they are in the pending index list
cumulative := time.Duration(0) assert.Contains(t, blah.pendingHostMap.Hosts, ip)
for i := 0; i <= HandshakeRetries+1; i++ {
cumulative += time.Duration(i)*HandshakeTryInterval + 1 // Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
next_tick := now.Add(cumulative) for i := 1; i <= DefaultHandshakeRetries+1; i++ {
//l.Infoln(next_tick) now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
blah.NextOutboundHandshakeTimerTick(next_tick, mw) blah.NextOutboundHandshakeTimerTick(now, mw)
} }
// Confirm they are still in the pending index list // Confirm they are still in the pending index list
for _, v := range ips { assert.Contains(t, blah.pendingHostMap.Hosts, ip)
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(v))
} // Tick 1 more time, a minute will certainly flush it out
// Jump ahead 1 more second blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
cumulative += time.Duration(HandshakeRetries+1) * HandshakeTryInterval
next_tick := now.Add(cumulative)
//l.Infoln(next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
// Confirm they have been removed // Confirm they have been removed
for _, v := range ips { assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(v))
}
} }
func Test_NewHandshakeManagerVpnIPcleanup(t *testing.T) { func Test_NewHandshakeManagerTrigger(t *testing.T) {
l := test.NewLogger()
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") _, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") _, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") _, localrange, _ := net.ParseCIDR("10.1.1.1/24")
vpnIP = ip2int(net.ParseIP("172.1.1.2")) ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange} preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{} mw := &mockEncWriter{}
mainHM := NewHostMap("test", vpncidr, preferredRanges) mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
lh := &LightHouse{
addrMap: make(map[iputil.VpnIp]*RemoteList),
l: l,
atomicStaticList: make(map[iputil.VpnIp]struct{}),
atomicLighthouses: make(map[iputil.VpnIp]struct{}),
}
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{}) blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
now := time.Now() now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw) blah.NextOutboundHandshakeTimerTick(now, mw)
hostinfo := blah.AddVpnIP(vpnIP) assert.Equal(t, 0, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
// Pretned we have an index too
blah.AddIndexHostInfo(12341234, hostinfo)
assert.Contains(t, blah.pendingHostMap.Indexes, uint32(12341234))
// Jump ahead `HandshakeRetries` ticks. Eviction should happen in pending hi := blah.AddVpnIp(ip, nil)
// but not main hostmap hi.HandshakeReady = true
cumulative := time.Duration(0) assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
for i := 1; i <= HandshakeRetries+2; i++ { assert.Equal(t, 0, hi.HandshakeCounter, "Should not have attempted a handshake yet")
cumulative += HandshakeTryInterval * time.Duration(i)
next_tick := now.Add(cumulative)
blah.NextOutboundHandshakeTimerTick(next_tick, mw)
}
/*
for i := 0; i <= HandshakeRetries+1; i++ {
next_tick := now.Add(cumulative)
//l.Infoln(next_tick)
blah.NextOutboundHandshakeTimerTick(next_tick)
}
*/
/*
for i := 0; i <= HandshakeRetries+1; i++ {
next_tick := now.Add(time.Duration(i) * time.Second)
blah.NextOutboundHandshakeTimerTick(next_tick)
}
*/
/* // Trigger the same method the channel will but, this should set our remotes pointer
cumulative += HandshakeTryInterval*time.Duration(HandshakeRetries) + 3 blah.handleOutbound(ip, mw, true)
next_tick := now.Add(cumulative) assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have done a handshake attempt")
l.Infoln(cumulative, next_tick) assert.NotNil(t, hi.remotes, "Manager should have set my remotes pointer")
blah.NextOutboundHandshakeTimerTick(next_tick)
*/ // Make sure the trigger doesn't double schedule the timer entry
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(vpnIP)) assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
uaddr := udp.NewAddrFromString("10.1.1.1:4242")
hi.remotes.unlockedPrependV4(ip, NewIp4AndPort(uaddr.IP, uint32(uaddr.Port)))
// We now have remotes but only the first trigger should have pushed things forward
blah.handleOutbound(ip, mw, true)
assert.Equal(t, 1, hi.HandshakeCounter, "Trigger should have not done a handshake attempt")
assert.Equal(t, 1, testCountTimerWheelEntries(blah.OutboundHandshakeTimer))
} }
func Test_NewHandshakeManagerIndexcleanup(t *testing.T) { func testCountTimerWheelEntries(tw *SystemTimerWheel) (c int) {
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") for _, i := range tw.wheel {
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") n := i.Head
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") for n != nil {
preferredRanges := []*net.IPNet{localrange} c++
mainHM := NewHostMap("test", vpncidr, preferredRanges) n = n.Next
}
blah := NewHandshakeManager(tuncidr, preferredRanges, mainHM, &LightHouse{}, &udpConn{})
now := time.Now()
blah.NextInboundHandshakeTimerTick(now)
hostinfo, _ := blah.AddIndex(12341234, &ConnectionState{})
// Pretned we have an index too
blah.pendingHostMap.AddVpnIPHostInfo(101010, hostinfo)
assert.Contains(t, blah.pendingHostMap.Hosts, uint32(101010))
for i := 1; i <= HandshakeRetries+2; i++ {
next_tick := now.Add(HandshakeTryInterval * time.Duration(i))
blah.NextInboundHandshakeTimerTick(next_tick)
} }
return c
next_tick := now.Add(HandshakeTryInterval*HandshakeRetries + 3)
blah.NextInboundHandshakeTimerTick(next_tick)
assert.NotContains(t, blah.pendingHostMap.Hosts, uint32(101010))
assert.NotContains(t, blah.pendingHostMap.Indexes, uint32(12341234))
} }
type mockEncWriter struct { type mockEncWriter struct {
} }
func (mw *mockEncWriter) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) { func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
return return
} }
func (mw *mockEncWriter) SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) { func (mw *mockEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
return return
} }
func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {}

View File

@@ -1,4 +1,4 @@
package nebula package header
import ( import (
"encoding/binary" "encoding/binary"
@@ -19,82 +19,89 @@ import (
// |-----------------------------------------------------------------------| // |-----------------------------------------------------------------------|
// | payload... | // | payload... |
const ( type m map[string]interface{}
Version uint8 = 1
HeaderLen = 16
)
type NebulaMessageType uint8
type NebulaMessageSubType uint8
const ( const (
handshake NebulaMessageType = 0 Version uint8 = 1
message NebulaMessageType = 1 Len = 16
recvError NebulaMessageType = 2
lightHouse NebulaMessageType = 3
test NebulaMessageType = 4
closeTunnel NebulaMessageType = 5
//TODO These are deprecated as of 06/12/2018 - NB
testRemote NebulaMessageType = 6
testRemoteReply NebulaMessageType = 7
) )
var typeMap = map[NebulaMessageType]string{ type MessageType uint8
handshake: "handshake", type MessageSubType uint8
message: "message",
recvError: "recvError",
lightHouse: "lightHouse",
test: "test",
closeTunnel: "closeTunnel",
//TODO These are deprecated as of 06/12/2018 - NB const (
testRemote: "testRemote", Handshake MessageType = 0
testRemoteReply: "testRemoteReply", Message MessageType = 1
RecvError MessageType = 2
LightHouse MessageType = 3
Test MessageType = 4
CloseTunnel MessageType = 5
Control MessageType = 6
)
var typeMap = map[MessageType]string{
Handshake: "handshake",
Message: "message",
RecvError: "recvError",
LightHouse: "lightHouse",
Test: "test",
CloseTunnel: "closeTunnel",
Control: "control",
} }
const ( const (
testRequest NebulaMessageSubType = 0 MessageNone MessageSubType = 0
testReply NebulaMessageSubType = 1 MessageRelay MessageSubType = 1
) )
var eHeaderTooShort = errors.New("header is too short") const (
TestRequest MessageSubType = 0
TestReply MessageSubType = 1
)
var subTypeTestMap = map[NebulaMessageSubType]string{ const (
testRequest: "testRequest", HandshakeIXPSK0 MessageSubType = 0
testReply: "testReply", HandshakeXXPSK0 MessageSubType = 1
)
var ErrHeaderTooShort = errors.New("header is too short")
var subTypeTestMap = map[MessageSubType]string{
TestRequest: "testRequest",
TestReply: "testReply",
} }
var subTypeNoneMap = map[NebulaMessageSubType]string{0: "none"} var subTypeNoneMap = map[MessageSubType]string{0: "none"}
var subTypeMap = map[NebulaMessageType]*map[NebulaMessageSubType]string{ var subTypeMap = map[MessageType]*map[MessageSubType]string{
message: &subTypeNoneMap, Message: {
recvError: &subTypeNoneMap, MessageNone: "none",
lightHouse: &subTypeNoneMap, MessageRelay: "relay",
test: &subTypeTestMap,
closeTunnel: &subTypeNoneMap,
handshake: {
handshakeIXPSK0: "ix_psk0",
}, },
//TODO: these are deprecated RecvError: &subTypeNoneMap,
testRemote: &subTypeNoneMap, LightHouse: &subTypeNoneMap,
testRemoteReply: &subTypeNoneMap, Test: &subTypeTestMap,
CloseTunnel: &subTypeNoneMap,
Handshake: {
HandshakeIXPSK0: "ix_psk0",
},
Control: &subTypeNoneMap,
} }
type Header struct { type H struct {
Version uint8 Version uint8
Type NebulaMessageType Type MessageType
Subtype NebulaMessageSubType Subtype MessageSubType
Reserved uint16 Reserved uint16
RemoteIndex uint32 RemoteIndex uint32
MessageCounter uint64 MessageCounter uint64
} }
// HeaderEncode uses the provided byte array to encode the provided header values into. // Encode uses the provided byte array to encode the provided header values into.
// Byte array must be capped higher than HeaderLen or this will panic // Byte array must be capped higher than HeaderLen or this will panic
func HeaderEncode(b []byte, v uint8, t uint8, st uint8, ri uint32, c uint64) []byte { func Encode(b []byte, v uint8, t MessageType, st MessageSubType, ri uint32, c uint64) []byte {
b = b[:HeaderLen] b = b[:Len]
b[0] = byte(v<<4 | (t & 0x0f)) b[0] = v<<4 | byte(t&0x0f)
b[1] = byte(st) b[1] = byte(st)
binary.BigEndian.PutUint16(b[2:4], 0) binary.BigEndian.PutUint16(b[2:4], 0)
binary.BigEndian.PutUint32(b[4:8], ri) binary.BigEndian.PutUint32(b[4:8], ri)
@@ -103,7 +110,7 @@ func HeaderEncode(b []byte, v uint8, t uint8, st uint8, ri uint32, c uint64) []b
} }
// String creates a readable string representation of a header // String creates a readable string representation of a header
func (h *Header) String() string { func (h *H) String() string {
if h == nil { if h == nil {
return "<nil>" return "<nil>"
} }
@@ -112,7 +119,7 @@ func (h *Header) String() string {
} }
// MarshalJSON creates a json string representation of a header // MarshalJSON creates a json string representation of a header
func (h *Header) MarshalJSON() ([]byte, error) { func (h *H) MarshalJSON() ([]byte, error) {
return json.Marshal(m{ return json.Marshal(m{
"version": h.Version, "version": h.Version,
"type": h.TypeName(), "type": h.TypeName(),
@@ -124,24 +131,24 @@ func (h *Header) MarshalJSON() ([]byte, error) {
} }
// Encode turns header into bytes // Encode turns header into bytes
func (h *Header) Encode(b []byte) ([]byte, error) { func (h *H) Encode(b []byte) ([]byte, error) {
if h == nil { if h == nil {
return nil, errors.New("nil header") return nil, errors.New("nil header")
} }
return HeaderEncode(b, h.Version, uint8(h.Type), uint8(h.Subtype), h.RemoteIndex, h.MessageCounter), nil return Encode(b, h.Version, h.Type, h.Subtype, h.RemoteIndex, h.MessageCounter), nil
} }
// Parse is a helper function to parses given bytes into new Header struct // Parse is a helper function to parses given bytes into new Header struct
func (h *Header) Parse(b []byte) error { func (h *H) Parse(b []byte) error {
if len(b) < HeaderLen { if len(b) < Len {
return eHeaderTooShort return ErrHeaderTooShort
} }
// get upper 4 bytes // get upper 4 bytes
h.Version = uint8((b[0] >> 4) & 0x0f) h.Version = uint8((b[0] >> 4) & 0x0f)
// get lower 4 bytes // get lower 4 bytes
h.Type = NebulaMessageType(b[0] & 0x0f) h.Type = MessageType(b[0] & 0x0f)
h.Subtype = NebulaMessageSubType(b[1]) h.Subtype = MessageSubType(b[1])
h.Reserved = binary.BigEndian.Uint16(b[2:4]) h.Reserved = binary.BigEndian.Uint16(b[2:4])
h.RemoteIndex = binary.BigEndian.Uint32(b[4:8]) h.RemoteIndex = binary.BigEndian.Uint32(b[4:8])
h.MessageCounter = binary.BigEndian.Uint64(b[8:16]) h.MessageCounter = binary.BigEndian.Uint64(b[8:16])
@@ -149,12 +156,12 @@ func (h *Header) Parse(b []byte) error {
} }
// TypeName will transform the headers message type into a human string // TypeName will transform the headers message type into a human string
func (h *Header) TypeName() string { func (h *H) TypeName() string {
return TypeName(h.Type) return TypeName(h.Type)
} }
// TypeName will transform a nebula message type into a human string // TypeName will transform a nebula message type into a human string
func TypeName(t NebulaMessageType) string { func TypeName(t MessageType) string {
if n, ok := typeMap[t]; ok { if n, ok := typeMap[t]; ok {
return n return n
} }
@@ -163,12 +170,12 @@ func TypeName(t NebulaMessageType) string {
} }
// SubTypeName will transform the headers message sub type into a human string // SubTypeName will transform the headers message sub type into a human string
func (h *Header) SubTypeName() string { func (h *H) SubTypeName() string {
return SubTypeName(h.Type, h.Subtype) return SubTypeName(h.Type, h.Subtype)
} }
// SubTypeName will transform a nebula message sub type into a human string // SubTypeName will transform a nebula message sub type into a human string
func SubTypeName(t NebulaMessageType, s NebulaMessageSubType) string { func SubTypeName(t MessageType, s MessageSubType) string {
if n, ok := subTypeMap[t]; ok { if n, ok := subTypeMap[t]; ok {
if x, ok := (*n)[s]; ok { if x, ok := (*n)[s]; ok {
return x return x
@@ -179,8 +186,8 @@ func SubTypeName(t NebulaMessageType, s NebulaMessageSubType) string {
} }
// NewHeader turns bytes into a header // NewHeader turns bytes into a header
func NewHeader(b []byte) (*Header, error) { func NewHeader(b []byte) (*H, error) {
h := new(Header) h := new(H)
if err := h.Parse(b); err != nil { if err := h.Parse(b); err != nil {
return nil, err return nil, err
} }

120
header/header_test.go Normal file
View File

@@ -0,0 +1,120 @@
package header
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
type headerTest struct {
expectedBytes []byte
*H
}
// 0001 0010 00010010
var headerBigEndianTests = []headerTest{{
expectedBytes: []byte{0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9},
// 1010 0000
H: &H{
// 1111 1+2+4+8 = 15
Version: 5,
Type: 4,
Subtype: 0,
Reserved: 0,
RemoteIndex: 10,
MessageCounter: 9,
},
},
}
func TestEncode(t *testing.T) {
for _, tt := range headerBigEndianTests {
b, err := tt.Encode(make([]byte, Len))
if err != nil {
t.Fatal(err)
}
assert.Equal(t, tt.expectedBytes, b)
}
}
func TestParse(t *testing.T) {
for _, tt := range headerBigEndianTests {
b := tt.expectedBytes
parsedHeader := &H{}
parsedHeader.Parse(b)
if !reflect.DeepEqual(tt.H, parsedHeader) {
t.Fatalf("got %#v; want %#v", parsedHeader, tt.H)
}
}
}
func TestTypeName(t *testing.T) {
assert.Equal(t, "test", TypeName(Test))
assert.Equal(t, "test", (&H{Type: Test}).TypeName())
assert.Equal(t, "unknown", TypeName(99))
assert.Equal(t, "unknown", (&H{Type: 99}).TypeName())
}
func TestSubTypeName(t *testing.T) {
assert.Equal(t, "testRequest", SubTypeName(Test, TestRequest))
assert.Equal(t, "testRequest", (&H{Type: Test, Subtype: TestRequest}).SubTypeName())
assert.Equal(t, "unknown", SubTypeName(99, TestRequest))
assert.Equal(t, "unknown", (&H{Type: 99, Subtype: TestRequest}).SubTypeName())
assert.Equal(t, "unknown", SubTypeName(Test, 99))
assert.Equal(t, "unknown", (&H{Type: Test, Subtype: 99}).SubTypeName())
assert.Equal(t, "none", SubTypeName(Message, 0))
assert.Equal(t, "none", (&H{Type: Message, Subtype: 0}).SubTypeName())
}
func TestTypeMap(t *testing.T) {
// Force people to document this stuff
assert.Equal(t, map[MessageType]string{
Handshake: "handshake",
Message: "message",
RecvError: "recvError",
LightHouse: "lightHouse",
Test: "test",
CloseTunnel: "closeTunnel",
Control: "control",
}, typeMap)
assert.Equal(t, map[MessageType]*map[MessageSubType]string{
Message: {
MessageNone: "none",
MessageRelay: "relay",
},
RecvError: &subTypeNoneMap,
LightHouse: &subTypeNoneMap,
Test: &subTypeTestMap,
CloseTunnel: &subTypeNoneMap,
Handshake: {
HandshakeIXPSK0: "ix_psk0",
},
Control: &subTypeNoneMap,
}, subTypeMap)
}
func TestHeader_String(t *testing.T) {
assert.Equal(
t,
"ver=100 type=test subtype=testRequest reserved=0x63 remoteindex=98 messagecounter=97",
(&H{100, Test, TestRequest, 99, 98, 97}).String(),
)
}
func TestHeader_MarshalJSON(t *testing.T) {
b, err := (&H{100, Test, TestRequest, 99, 98, 97}).MarshalJSON()
assert.Nil(t, err)
assert.Equal(
t,
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
string(b),
)
}

View File

@@ -1,118 +0,0 @@
package nebula
import (
"github.com/stretchr/testify/assert"
"reflect"
"testing"
)
type headerTest struct {
expectedBytes []byte
*Header
}
// 0001 0010 00010010
var headerBigEndianTests = []headerTest{{
expectedBytes: []byte{0x54, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9},
// 1010 0000
Header: &Header{
// 1111 1+2+4+8 = 15
Version: 5,
Type: 4,
Subtype: 0,
Reserved: 0,
RemoteIndex: 10,
MessageCounter: 9,
},
},
}
func TestEncode(t *testing.T) {
for _, tt := range headerBigEndianTests {
b, err := tt.Encode(make([]byte, HeaderLen))
if err != nil {
t.Fatal(err)
}
assert.Equal(t, tt.expectedBytes, b)
}
}
func TestParse(t *testing.T) {
for _, tt := range headerBigEndianTests {
b := tt.expectedBytes
parsedHeader := &Header{}
parsedHeader.Parse(b)
if !reflect.DeepEqual(tt.Header, parsedHeader) {
t.Fatalf("got %#v; want %#v", parsedHeader, tt.Header)
}
}
}
func TestTypeName(t *testing.T) {
assert.Equal(t, "test", TypeName(test))
assert.Equal(t, "test", (&Header{Type: test}).TypeName())
assert.Equal(t, "unknown", TypeName(99))
assert.Equal(t, "unknown", (&Header{Type: 99}).TypeName())
}
func TestSubTypeName(t *testing.T) {
assert.Equal(t, "testRequest", SubTypeName(test, testRequest))
assert.Equal(t, "testRequest", (&Header{Type: test, Subtype: testRequest}).SubTypeName())
assert.Equal(t, "unknown", SubTypeName(99, testRequest))
assert.Equal(t, "unknown", (&Header{Type: 99, Subtype: testRequest}).SubTypeName())
assert.Equal(t, "unknown", SubTypeName(test, 99))
assert.Equal(t, "unknown", (&Header{Type: test, Subtype: 99}).SubTypeName())
assert.Equal(t, "none", SubTypeName(message, 0))
assert.Equal(t, "none", (&Header{Type: message, Subtype: 0}).SubTypeName())
}
func TestTypeMap(t *testing.T) {
// Force people to document this stuff
assert.Equal(t, map[NebulaMessageType]string{
handshake: "handshake",
message: "message",
recvError: "recvError",
lightHouse: "lightHouse",
test: "test",
closeTunnel: "closeTunnel",
testRemote: "testRemote",
testRemoteReply: "testRemoteReply",
}, typeMap)
assert.Equal(t, map[NebulaMessageType]*map[NebulaMessageSubType]string{
message: &subTypeNoneMap,
recvError: &subTypeNoneMap,
lightHouse: &subTypeNoneMap,
test: &subTypeTestMap,
closeTunnel: &subTypeNoneMap,
handshake: {
handshakeIXPSK0: "ix_psk0",
},
testRemote: &subTypeNoneMap,
testRemoteReply: &subTypeNoneMap,
}, subTypeMap)
}
func TestHeader_String(t *testing.T) {
assert.Equal(
t,
"ver=100 type=test subtype=testRequest reserved=0x63 remoteindex=98 messagecounter=97",
(&Header{100, test, testRequest, 99, 98, 97}).String(),
)
}
func TestHeader_MarshalJSON(t *testing.T) {
b, err := (&Header{100, test, testRequest, 99, 98, 97}).MarshalJSON()
assert.Nil(t, err)
assert.Equal(
t,
"{\"messageCounter\":97,\"remoteIndex\":98,\"reserved\":99,\"subType\":\"testRequest\",\"type\":\"test\",\"version\":100}",
string(b),
)
}

1037
hostmap.go

File diff suppressed because it is too large Load Diff

View File

@@ -1,166 +1 @@
package nebula package nebula
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
/*
func TestHostInfoDestProbe(t *testing.T) {
a, _ := net.ResolveUDPAddr("udp", "1.0.0.1:22222")
d := NewHostInfoDest(a)
// 999 probes that all return should give a 100% success rate
for i := 0; i < 999; i++ {
meh := d.Probe()
d.ProbeReceived(meh)
}
assert.Equal(t, d.Grade(), float64(1))
// 999 probes of which only half return should give a 50% success rate
for i := 0; i < 999; i++ {
meh := d.Probe()
if i%2 == 0 {
d.ProbeReceived(meh)
}
}
assert.Equal(t, d.Grade(), float64(.5))
// 999 probes of which none return should give a 0% success rate
for i := 0; i < 999; i++ {
d.Probe()
}
assert.Equal(t, d.Grade(), float64(0))
// 999 probes of which only 1/4 return should give a 25% success rate
for i := 0; i < 999; i++ {
meh := d.Probe()
if i%4 == 0 {
d.ProbeReceived(meh)
}
}
assert.Equal(t, d.Grade(), float64(.25))
// 999 probes of which only half return and are duplicates should give a 50% success rate
for i := 0; i < 999; i++ {
meh := d.Probe()
if i%2 == 0 {
d.ProbeReceived(meh)
d.ProbeReceived(meh)
}
}
assert.Equal(t, d.Grade(), float64(.5))
// 999 probes of which only way old replies return should give a 0% success rate
for i := 0; i < 999; i++ {
meh := d.Probe()
d.ProbeReceived(meh - 101)
}
assert.Equal(t, d.Grade(), float64(0))
}
*/
func TestHostmap(t *testing.T) {
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
myNets := []*net.IPNet{myNet}
preferredRanges := []*net.IPNet{localToMe}
m := NewHostMap("test", myNet, preferredRanges)
a := NewUDPAddrFromString("10.127.0.3:11111")
b := NewUDPAddrFromString("1.0.0.1:22222")
y := NewUDPAddrFromString("10.128.0.3:11111")
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), b)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
info, _ := m.QueryVpnIP(ip2int(net.ParseIP("10.128.1.1")))
// There should be three remotes in the host map
assert.Equal(t, 3, len(info.Remotes))
// Adding an identical remote should not change the count
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
assert.Equal(t, 3, len(info.Remotes))
// Adding a fresh remote should add one
y = NewUDPAddrFromString("10.18.0.3:11111")
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
assert.Equal(t, 4, len(info.Remotes))
// Query and reference remote should get the first one (and not nil)
info, _ = m.QueryVpnIP(ip2int(net.ParseIP("10.128.1.1")))
assert.NotNil(t, info.remote)
// Promotion should ensure that the best remote is chosen (y)
info.ForcePromoteBest(myNets)
assert.True(t, myNet.Contains(udp2ip(info.remote)))
}
func TestHostmapdebug(t *testing.T) {
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
preferredRanges := []*net.IPNet{localToMe}
m := NewHostMap("test", myNet, preferredRanges)
a := NewUDPAddrFromString("10.127.0.3:11111")
b := NewUDPAddrFromString("1.0.0.1:22222")
y := NewUDPAddrFromString("10.128.0.3:11111")
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), b)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
//t.Errorf("%s", m.DebugRemotes(1))
}
func TestHostMap_rotateRemote(t *testing.T) {
h := HostInfo{}
// 0 remotes, no panic
h.rotateRemote()
assert.Nil(t, h.remote)
// 1 remote, no panic
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 1}), 0))
h.rotateRemote()
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 1}))
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 2}), 0))
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 3}), 0))
h.AddRemote(*NewUDPAddr(ip2int(net.IP{1, 1, 1, 4}), 0))
// Rotate through those 3
h.rotateRemote()
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 2}))
h.rotateRemote()
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 3}))
h.rotateRemote()
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 4}))
// Finally, we should start over
h.rotateRemote()
assert.Equal(t, udp2ipInt(h.remote), ip2int(net.IP{1, 1, 1, 1}))
}
func BenchmarkHostmappromote2(b *testing.B) {
for n := 0; n < b.N; n++ {
_, myNet, _ := net.ParseCIDR("10.128.0.0/16")
_, localToMe, _ := net.ParseCIDR("192.168.1.0/24")
preferredRanges := []*net.IPNet{localToMe}
m := NewHostMap("test", myNet, preferredRanges)
y := NewUDPAddrFromString("10.128.0.3:11111")
a := NewUDPAddrFromString("10.127.0.3:11111")
g := NewUDPAddrFromString("1.0.0.1:22222")
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), a)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), g)
m.AddRemote(ip2int(net.ParseIP("10.128.1.1")), y)
}
b.Errorf("hi")
}

290
inside.go
View File

@@ -5,12 +5,18 @@ import (
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket, nb, out []byte) { func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
err := newPacket(packet, false, fwPacket) err := newPacket(packet, false, fwPacket)
if err != nil { if err != nil {
l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err) if f.l.Level >= logrus.DebugLevel {
f.l.WithField("packet", packet).Debugf("Error while validating outbound packet: %s", err)
}
return return
} }
@@ -19,12 +25,35 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket,
return return
} }
if fwPacket.RemoteIP == f.myVpnIp {
// Immediately forward packets from self to self.
// This should only happen on Darwin-based hosts, which routes packets from
// the Nebula IP to the Nebula IP through the Nebula TUN device.
if immediatelyForwardToSelf {
_, err := f.readers[q].Write(packet)
if err != nil {
f.l.WithError(err).Error("Failed to forward to tun")
}
}
// Otherwise, drop. On linux, we should never see these packets - Linux
// routes packets from the nebula IP to the nebula IP through the loopback device.
return
}
// Ignore broadcast packets // Ignore broadcast packets
if f.dropMulticast && isMulticast(fwPacket.RemoteIP) { if f.dropMulticast && isMulticast(fwPacket.RemoteIP) {
return return
} }
hostinfo := f.getOrHandshake(fwPacket.RemoteIP) hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
if hostinfo == nil {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("vpnIp", fwPacket.RemoteIP).
WithField("fwPacket", fwPacket).
Debugln("dropping outbound packet, vpnIp not in our CIDR or in unsafe routes")
}
return
}
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
if ci.ready == false { if ci.ready == false {
@@ -32,53 +61,60 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *FirewallPacket,
// the packet queue. // the packet queue.
ci.queueLock.Lock() ci.queueLock.Lock()
if !ci.ready { if !ci.ready {
hostinfo.cachePacket(message, 0, packet, f.sendMessageNow) hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
ci.queueLock.Unlock() ci.queueLock.Unlock()
return return
} }
ci.queueLock.Unlock() ci.queueLock.Unlock()
} }
if !f.firewall.Drop(packet, *fwPacket, false, hostinfo, trustedCAs) { dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache)
f.send(message, 0, ci, hostinfo, hostinfo.remote, packet, nb, out) if dropReason == nil {
if f.lightHouse != nil && *ci.messageCounter%5000 == 0 { f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
f.lightHouse.Query(fwPacket.RemoteIP, f)
}
} else if l.Level >= logrus.DebugLevel { } else if f.l.Level >= logrus.DebugLevel {
l.WithField("vpnIp", IntIp(hostinfo.hostId)).WithField("fwPacket", fwPacket). hostinfo.logger(f.l).
WithField("fwPacket", fwPacket).
WithField("reason", dropReason).
Debugln("dropping outbound packet") Debugln("dropping outbound packet")
} }
} }
func (f *Interface) getOrHandshake(vpnIp uint32) *HostInfo { func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
if f.hostMap.vpnCIDR.Contains(int2ip(vpnIp)) == false { f.getOrHandshake(vpnIp)
vpnIp = f.hostMap.queryUnsafeRoute(vpnIp) }
// getOrHandshake returns nil if the vpnIp is not routable
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
vpnIp = f.inside.RouteFor(vpnIp)
if vpnIp == 0 {
return nil
}
} }
hostinfo, err := f.hostMap.PromoteBestQueryVpnIP(vpnIp, f) hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f)
//if err != nil || hostinfo.ConnectionState == nil { //if err != nil || hostinfo.ConnectionState == nil {
if err != nil { if err != nil {
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIP(vpnIp) hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
if err != nil { if err != nil {
hostinfo = f.handshakeManager.AddVpnIP(vpnIp) hostinfo = f.handshakeManager.AddVpnIp(vpnIp, f.initHostInfo)
} }
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
if ci != nil && ci.eKey != nil && ci.ready { if ci != nil && ci.eKey != nil && ci.ready {
return hostinfo return hostinfo
} }
if ci == nil { // Handshake is not ready, we need to grab the lock now before we start the handshake process
// if we don't have a connection state, then send a handshake initiation hostinfo.Lock()
ci = f.newConnectionState(true, noise.HandshakeIX, []byte{}, 0) defer hostinfo.Unlock()
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
//ci = f.newConnectionState(true, noise.HandshakeXX, []byte{}, 0) // Double check, now that we have the lock
hostinfo.ConnectionState = ci ci = hostinfo.ConnectionState
} else if ci.eKey == nil { if ci != nil && ci.eKey != nil && ci.ready {
// if we don't have any state at all, create it return hostinfo
} }
// If we have already created the handshake packet, we don't want to call the function at all. // If we have already created the handshake packet, we don't want to call the function at all.
@@ -86,41 +122,65 @@ func (f *Interface) getOrHandshake(vpnIp uint32) *HostInfo {
ixHandshakeStage0(f, vpnIp, hostinfo) ixHandshakeStage0(f, vpnIp, hostinfo)
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us. // FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
//xx_handshakeStage0(f, ip, hostinfo) //xx_handshakeStage0(f, ip, hostinfo)
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
if _, ok := f.lightHouse.GetStaticHostList()[vpnIp]; ok {
select {
case f.handshakeManager.trigger <- vpnIp:
default:
}
}
} }
return hostinfo return hostinfo
} }
func (f *Interface) sendMessageNow(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, out []byte) { // initHostInfo is the init function to pass to (*HandshakeManager).AddVpnIP that
fp := &FirewallPacket{} // will create the initial Noise ConnectionState
func (f *Interface) initHostInfo(hostinfo *HostInfo) {
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
}
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
fp := &firewall.Packet{}
err := newPacket(p, false, fp) err := newPacket(p, false, fp)
if err != nil { if err != nil {
l.Warnf("error while parsing outgoing packet for firewall check; %v", err) f.l.Warnf("error while parsing outgoing packet for firewall check; %v", err)
return return
} }
// check if packet is in outbound fw rules // check if packet is in outbound fw rules
if f.firewall.Drop(p, *fp, false, hostInfo, trustedCAs) { dropReason := f.firewall.Drop(p, *fp, false, hostInfo, f.caPool, nil)
l.WithField("fwPacket", fp).Debugln("dropping cached packet") if dropReason != nil {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("fwPacket", fp).
WithField("reason", dropReason).
Debugln("dropping cached packet")
}
return return
} }
f.send(message, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out) f.sendNoMetrics(header.Message, st, hostInfo.ConnectionState, hostInfo, nil, p, nb, out, 0)
if f.lightHouse != nil && *hostInfo.ConnectionState.messageCounter%5000 == 0 {
f.lightHouse.Query(fp.RemoteIP, f)
}
} }
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp // SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
func (f *Interface) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) { func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
hostInfo := f.getOrHandshake(vpnIp) hostInfo := f.getOrHandshake(vpnIp)
if hostInfo == nil {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("vpnIp", vpnIp).
Debugln("dropping SendMessageToVpnIp, vpnIp not in our CIDR or in unsafe routes")
}
return
}
if !hostInfo.ConnectionState.ready { if !hostInfo.ConnectionState.ready {
// Because we might be sending stored packets, lock here to stop new things going to // Because we might be sending stored packets, lock here to stop new things going to
// the packet queue. // the packet queue.
hostInfo.ConnectionState.queueLock.Lock() hostInfo.ConnectionState.queueLock.Lock()
if !hostInfo.ConnectionState.ready { if !hostInfo.ConnectionState.ready {
hostInfo.cachePacket(t, st, p, f.sendMessageToVpnIp) hostInfo.cachePacket(f.l, t, st, p, f.sendMessageToVpnIp, f.cachedPacketMetrics)
hostInfo.ConnectionState.queueLock.Unlock() hostInfo.ConnectionState.queueLock.Unlock()
return return
} }
@@ -131,70 +191,162 @@ func (f *Interface) SendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubT
return return
} }
func (f *Interface) sendMessageToVpnIp(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, out []byte) { func (f *Interface) sendMessageToVpnIp(t header.MessageType, st header.MessageSubType, hostInfo *HostInfo, p, nb, out []byte) {
f.send(t, st, hostInfo.ConnectionState, hostInfo, hostInfo.remote, p, nb, out) f.send(t, st, hostInfo.ConnectionState, hostInfo, p, nb, out)
} }
// SendMessageToAll handles real ip:port lookup and sends to all known addresses for vpnIp func (f *Interface) send(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, p, nb, out []byte) {
func (f *Interface) SendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, vpnIp uint32, p, nb, out []byte) { f.messageMetrics.Tx(t, st, 1)
hostInfo := f.getOrHandshake(vpnIp) f.sendNoMetrics(t, st, ci, hostinfo, nil, p, nb, out, 0)
}
if hostInfo.ConnectionState.ready == false { func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte) {
// Because we might be sending stored packets, lock here to stop new things going to f.messageMetrics.Tx(t, st, 1)
// the packet queue. f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
hostInfo.ConnectionState.queueLock.Lock() }
if !hostInfo.ConnectionState.ready {
hostInfo.cachePacket(t, st, p, f.sendMessageToAll) // sendVia sends a payload through a Relay tunnel. No authentication or encryption is done
hostInfo.ConnectionState.queueLock.Unlock() // to the payload for the ultimate target host, making this a useful method for sending
return // handshake messages to peers through relay tunnels.
} // via is the HostInfo through which the message is relayed.
hostInfo.ConnectionState.queueLock.Unlock() // ad is the plaintext data to authenticate, but not encrypt
// nb is a buffer used to store the nonce value, re-used for performance reasons.
// out is a buffer used to store the result of the Encrypt operation
// q indicates which writer to use to send the packet.
func (f *Interface) SendVia(viaIfc interface{},
relayIfc interface{},
ad,
nb,
out []byte,
nocopy bool,
) {
via := viaIfc.(*HostInfo)
relay := relayIfc.(*Relay)
c := atomic.AddUint64(&via.ConnectionState.atomicMessageCounter, 1)
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
f.connectionManager.Out(via.vpnIp)
// Authenticate the header and payload, but do not encrypt for this message type.
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
if len(out)+len(ad)+via.ConnectionState.eKey.Overhead() > cap(out) {
via.logger(f.l).
WithField("outCap", cap(out)).
WithField("payloadLen", len(ad)).
WithField("headerLen", len(out)).
WithField("cipherOverhead", via.ConnectionState.eKey.Overhead()).
Error("SendVia out buffer not large enough for relay")
return
} }
f.sendMessageToAll(t, st, hostInfo, p, nb, out) // The header bytes are written to the 'out' slice; Grow the slice to hold the header and associated data payload.
return offset := len(out)
} out = out[:offset+len(ad)]
func (f *Interface) sendMessageToAll(t NebulaMessageType, st NebulaMessageSubType, hostInfo *HostInfo, p, nb, b []byte) { // In one call path, the associated data _is_ already stored in out. In other call paths, the associated data must
for _, r := range hostInfo.RemoteUDPAddrs() { // be copied into 'out'.
f.send(t, st, hostInfo.ConnectionState, hostInfo, r, p, nb, b) if !nocopy {
copy(out[offset:], ad)
}
var err error
out, err = via.ConnectionState.eKey.EncryptDanger(out, out, nil, c, nb)
if err != nil {
via.logger(f.l).WithError(err).Info("Failed to EncryptDanger in sendVia")
return
}
err = f.writers[0].WriteTo(out, via.remote)
if err != nil {
via.logger(f.l).WithError(err).Info("Failed to WriteTo in sendVia")
} }
} }
func (f *Interface) send(t NebulaMessageType, st NebulaMessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udpAddr, p, nb, out []byte) { func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote *udp.Addr, p, nb, out []byte, q int) {
if ci.eKey == nil { if ci.eKey == nil {
//TODO: log warning //TODO: log warning
return return
} }
useRelay := remote == nil && hostinfo.remote == nil
fullOut := out
if useRelay {
if len(out) < header.Len {
// out always has a capacity of mtu, but not always a length greater than the header.Len.
// Grow it to make sure the next operation works.
out = out[:header.Len]
}
// Save a header's worth of data at the front of the 'out' buffer.
out = out[header.Len:]
}
var err error
//TODO: enable if we do more than 1 tun queue //TODO: enable if we do more than 1 tun queue
//ci.writeLock.Lock() //ci.writeLock.Lock()
c := atomic.AddUint64(ci.messageCounter, 1) c := atomic.AddUint64(&ci.atomicMessageCounter, 1)
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
out = HeaderEncode(out, Version, uint8(t), uint8(st), hostinfo.remoteIndexId, c) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
f.connectionManager.Out(hostinfo.hostId) f.connectionManager.Out(hostinfo.vpnIp)
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
// all our IPs and enable a faster roaming.
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
f.lightHouse.QueryServer(hostinfo.vpnIp, f)
hostinfo.lastRebindCount = f.rebindCount
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
}
}
var err error
out, err = ci.eKey.EncryptDanger(out, out, p, c, nb) out, err = ci.eKey.EncryptDanger(out, out, p, c, nb)
//TODO: see above note on lock //TODO: see above note on lock
//ci.writeLock.Unlock() //ci.writeLock.Unlock()
if err != nil { if err != nil {
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)). hostinfo.logger(f.l).WithError(err).
WithField("udpAddr", remote).WithField("counter", c). WithField("udpAddr", remote).WithField("counter", c).
WithField("attemptedCounter", ci.messageCounter). WithField("attemptedCounter", c).
Error("Failed to encrypt outgoing packet") Error("Failed to encrypt outgoing packet")
return return
} }
err = f.outside.WriteTo(out, remote) if remote != nil {
if err != nil { err = f.writers[q].WriteTo(out, remote)
l.WithError(err).WithField("vpnIp", IntIp(hostinfo.hostId)). if err != nil {
WithField("udpAddr", remote).Error("Failed to write outgoing packet") hostinfo.logger(f.l).WithError(err).
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
}
} else if hostinfo.remote != nil {
err = f.writers[q].WriteTo(out, hostinfo.remote)
if err != nil {
hostinfo.logger(f.l).WithError(err).
WithField("udpAddr", remote).Error("Failed to write outgoing packet")
}
} else {
// Try to send via a relay
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
relayHostInfo, err := f.hostMap.QueryVpnIp(relayIP)
if err != nil {
hostinfo.logger(f.l).WithField("relayIp", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
continue
}
relay, ok := relayHostInfo.relayState.QueryRelayForByIp(hostinfo.vpnIp)
if !ok {
hostinfo.logger(f.l).
WithField("relayIp", relayHostInfo.vpnIp).
WithField("relayTarget", hostinfo.vpnIp).
Info("sendNoMetrics relay missing object for target")
continue
}
f.SendVia(relayHostInfo, relay, out, nb, fullOut[:header.Len+len(out)], true)
break
}
} }
return
} }
func isMulticast(ip uint32) bool { func isMulticast(ip iputil.VpnIp) bool {
// Class D multicast // Class D multicast
if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 { if (((ip >> 24) & 0xff) & 0xf0) == 0xe0 {
return true return true

3
inside_darwin.go Normal file
View File

@@ -0,0 +1,3 @@
package nebula
const immediatelyForwardToSelf bool = true

6
inside_generic.go Normal file
View File

@@ -0,0 +1,6 @@
//go:build !darwin
// +build !darwin
package nebula
const immediatelyForwardToSelf bool = false

View File

@@ -1,19 +1,32 @@
package nebula package nebula
import ( import (
"context"
"errors" "errors"
"fmt"
"io"
"net"
"os" "os"
"runtime"
"sync/atomic"
"time" "time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp"
) )
const mtu = 9001 const mtu = 9001
type InterfaceConfig struct { type InterfaceConfig struct {
HostMap *HostMap HostMap *HostMap
Outside *udpConn Outside *udp.Conn
Inside *Tun Inside overlay.Device
certState *CertState certState *CertState
Cipher string Cipher string
Firewall *Firewall Firewall *Firewall
@@ -24,13 +37,21 @@ type InterfaceConfig struct {
pendingDeletionInterval int pendingDeletionInterval int
DropLocalBroadcast bool DropLocalBroadcast bool
DropMulticast bool DropMulticast bool
UDPBatchSize int routines int
MessageMetrics *MessageMetrics
version string
caPool *cert.NebulaCAPool
disconnectInvalid bool
relayManager *relayManager
ConntrackCacheTimeout time.Duration
l *logrus.Logger
} }
type Interface struct { type Interface struct {
hostMap *HostMap hostMap *HostMap
outside *udpConn outside *udp.Conn
inside *Tun inside overlay.Device
certState *CertState certState *CertState
cipher string cipher string
firewall *Firewall firewall *Firewall
@@ -39,18 +60,69 @@ type Interface struct {
serveDns bool serveDns bool
createTime time.Time createTime time.Time
lightHouse *LightHouse lightHouse *LightHouse
localBroadcast uint32 localBroadcast iputil.VpnIp
myVpnIp iputil.VpnIp
dropLocalBroadcast bool dropLocalBroadcast bool
dropMulticast bool dropMulticast bool
udpBatchSize int routines int
version string caPool *cert.NebulaCAPool
disconnectInvalid bool
closed int32
relayManager *relayManager
metricRxRecvError metrics.Counter sendRecvErrorConfig sendRecvErrorConfig
metricTxRecvError metrics.Counter
metricHandshakes metrics.Histogram // rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
rebindCount int8
version string
conntrackCacheTimeout time.Duration
writers []*udp.Conn
readers []io.ReadWriteCloser
metricHandshakes metrics.Histogram
messageMetrics *MessageMetrics
cachedPacketMetrics *cachedPacketMetrics
l *logrus.Logger
} }
func NewInterface(c *InterfaceConfig) (*Interface, error) { type sendRecvErrorConfig uint8
const (
sendRecvErrorAlways sendRecvErrorConfig = iota
sendRecvErrorNever
sendRecvErrorPrivate
)
func (s sendRecvErrorConfig) ShouldSendRecvError(ip net.IP) bool {
switch s {
case sendRecvErrorPrivate:
return ip.IsPrivate()
case sendRecvErrorAlways:
return true
case sendRecvErrorNever:
return false
default:
panic(fmt.Errorf("invalid sendRecvErrorConfig value: %d", s))
}
}
func (s sendRecvErrorConfig) String() string {
switch s {
case sendRecvErrorAlways:
return "always"
case sendRecvErrorNever:
return "never"
case sendRecvErrorPrivate:
return "private"
default:
return fmt.Sprintf("invalid(%d)", s)
}
}
func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
if c.Outside == nil { if c.Outside == nil {
return nil, errors.New("no outside connection") return nil, errors.New("no outside connection")
} }
@@ -64,6 +136,7 @@ func NewInterface(c *InterfaceConfig) (*Interface, error) {
return nil, errors.New("no firewall rules") return nil, errors.New("no firewall rules")
} }
myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP)
ifce := &Interface{ ifce := &Interface{
hostMap: c.HostMap, hostMap: c.HostMap,
outside: c.Outside, outside: c.Outside,
@@ -75,107 +148,152 @@ func NewInterface(c *InterfaceConfig) (*Interface, error) {
handshakeManager: c.HandshakeManager, handshakeManager: c.HandshakeManager,
createTime: time.Now(), createTime: time.Now(),
lightHouse: c.lightHouse, lightHouse: c.lightHouse,
localBroadcast: ip2int(c.certState.certificate.Details.Ips[0].IP) | ^ip2int(c.certState.certificate.Details.Ips[0].Mask), localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask),
dropLocalBroadcast: c.DropLocalBroadcast, dropLocalBroadcast: c.DropLocalBroadcast,
dropMulticast: c.DropMulticast, dropMulticast: c.DropMulticast,
udpBatchSize: c.UDPBatchSize, routines: c.routines,
version: c.version,
writers: make([]*udp.Conn, c.routines),
readers: make([]io.ReadWriteCloser, c.routines),
caPool: c.caPool,
disconnectInvalid: c.disconnectInvalid,
myVpnIp: myVpnIp,
relayManager: c.relayManager,
metricRxRecvError: metrics.GetOrRegisterCounter("messages.rx.recv_error", nil), conntrackCacheTimeout: c.ConntrackCacheTimeout,
metricTxRecvError: metrics.GetOrRegisterCounter("messages.tx.recv_error", nil),
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)), metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
messageMetrics: c.MessageMetrics,
cachedPacketMetrics: &cachedPacketMetrics{
sent: metrics.GetOrRegisterCounter("hostinfo.cached_packets.sent", nil),
dropped: metrics.GetOrRegisterCounter("hostinfo.cached_packets.dropped", nil),
},
l: c.l,
} }
ifce.connectionManager = newConnectionManager(ifce, c.checkInterval, c.pendingDeletionInterval) ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval)
return ifce, nil return ifce, nil
} }
func (f *Interface) Run(tunRoutines, udpRoutines int, buildVersion string) { // activate creates the interface on the host. After the interface is created, any
// other services that want to bind listeners to its IP may do so successfully. However,
// the interface isn't going to process anything until run() is called.
func (f *Interface) activate() {
// actually turn on tun dev // actually turn on tun dev
if err := f.inside.Activate(); err != nil {
l.Fatal(err) addr, err := f.outside.LocalAddr()
if err != nil {
f.l.WithError(err).Error("Failed to get udp listen address")
} }
f.version = buildVersion f.l.WithField("interface", f.inside.Name()).WithField("network", f.inside.Cidr().String()).
l.WithField("interface", f.inside.Device).WithField("network", f.inside.Cidr.String()). WithField("build", f.version).WithField("udpAddr", addr).
WithField("build", buildVersion).
Info("Nebula interface is active") Info("Nebula interface is active")
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
// Prepare n tun queues
var reader io.ReadWriteCloser = f.inside
for i := 0; i < f.routines; i++ {
if i > 0 {
reader, err = f.inside.NewMultiQueueReader()
if err != nil {
f.l.Fatal(err)
}
}
f.readers[i] = reader
}
if err := f.inside.Activate(); err != nil {
f.inside.Close()
f.l.Fatal(err)
}
}
func (f *Interface) run() {
// Launch n queues to read packets from udp // Launch n queues to read packets from udp
for i := 0; i < udpRoutines; i++ { for i := 0; i < f.routines; i++ {
go f.listenOut(i) go f.listenOut(i)
} }
// Launch n queues to read packets from tun dev // Launch n queues to read packets from tun dev
for i := 0; i < tunRoutines; i++ { for i := 0; i < f.routines; i++ {
go f.listenIn(i) go f.listenIn(f.readers[i], i)
} }
} }
func (f *Interface) listenOut(i int) { func (f *Interface) listenOut(i int) {
//TODO: handle error runtime.LockOSThread()
addr, err := f.outside.LocalAddr()
if err != nil {
l.WithError(err).Error("failed to discover udp listening address")
}
var li *udpConn var li *udp.Conn
// TODO clean this up with a coherent interface for each outside connection
if i > 0 { if i > 0 {
//TODO: handle error li = f.writers[i]
li, err = NewListener(udp2ip(addr).String(), int(addr.Port), i > 0)
if err != nil {
l.WithError(err).Error("failed to make a new udp listener")
}
} else { } else {
li = f.outside li = f.outside
} }
li.ListenOut(f) lhh := f.lightHouse.NewRequestHandler()
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
li.ListenOut(f.readOutsidePackets, lhh.HandleRequest, conntrackCache, i)
} }
func (f *Interface) listenIn(i int) { func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
runtime.LockOSThread()
packet := make([]byte, mtu) packet := make([]byte, mtu)
out := make([]byte, mtu) out := make([]byte, mtu)
fwPacket := &FirewallPacket{} fwPacket := &firewall.Packet{}
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
for { for {
n, err := f.inside.Read(packet) n, err := reader.Read(packet)
if err != nil { if err != nil {
l.WithError(err).Error("Error while reading outbound packet") if errors.Is(err, os.ErrClosed) && atomic.LoadInt32(&f.closed) != 0 {
return
}
f.l.WithError(err).Error("Error while reading outbound packet")
// This only seems to happen when something fatal happens to the fd, so exit. // This only seems to happen when something fatal happens to the fd, so exit.
os.Exit(2) os.Exit(2)
} }
f.consumeInsidePacket(packet[:n], fwPacket, nb, out) f.consumeInsidePacket(packet[:n], fwPacket, nb, out, i, conntrackCache.Get(f.l))
} }
} }
func (f *Interface) RegisterConfigChangeCallbacks(c *Config) { func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
c.RegisterReloadCallback(f.reloadCA) c.RegisterReloadCallback(f.reloadCA)
c.RegisterReloadCallback(f.reloadCertKey) c.RegisterReloadCallback(f.reloadCertKey)
c.RegisterReloadCallback(f.reloadFirewall) c.RegisterReloadCallback(f.reloadFirewall)
c.RegisterReloadCallback(f.outside.reloadConfig) c.RegisterReloadCallback(f.reloadSendRecvError)
for _, udpConn := range f.writers {
c.RegisterReloadCallback(udpConn.ReloadConfig)
}
} }
func (f *Interface) reloadCA(c *Config) { func (f *Interface) reloadCA(c *config.C) {
// reload and check regardless // reload and check regardless
// todo: need mutex? // todo: need mutex?
newCAs, err := loadCAFromConfig(c) newCAs, err := loadCAFromConfig(f.l, c)
if err != nil { if err != nil {
l.WithError(err).Error("Could not refresh trusted CA certificates") f.l.WithError(err).Error("Could not refresh trusted CA certificates")
return return
} }
trustedCAs = newCAs f.caPool = newCAs
l.WithField("fingerprints", trustedCAs.GetFingerprints()).Info("Trusted CA certificates refreshed") f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed")
} }
func (f *Interface) reloadCertKey(c *Config) { func (f *Interface) reloadCertKey(c *config.C) {
// reload and check in all cases // reload and check in all cases
cs, err := NewCertStateFromConfig(c) cs, err := NewCertStateFromConfig(c)
if err != nil { if err != nil {
l.WithError(err).Error("Could not refresh client cert") f.l.WithError(err).Error("Could not refresh client cert")
return return
} }
@@ -183,40 +301,98 @@ func (f *Interface) reloadCertKey(c *Config) {
oldIPs := f.certState.certificate.Details.Ips oldIPs := f.certState.certificate.Details.Ips
newIPs := cs.certificate.Details.Ips newIPs := cs.certificate.Details.Ips
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() { if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old") f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
return return
} }
f.certState = cs f.certState = cs
l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk") f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
} }
func (f *Interface) reloadFirewall(c *Config) { func (f *Interface) reloadFirewall(c *config.C) {
//TODO: need to trigger/detect if the certificate changed too //TODO: need to trigger/detect if the certificate changed too
if c.HasChanged("firewall") == false { if c.HasChanged("firewall") == false {
l.Debug("No firewall config change detected") f.l.Debug("No firewall config change detected")
return return
} }
fw, err := NewFirewallFromConfig(f.certState.certificate, c) fw, err := NewFirewallFromConfig(f.l, f.certState.certificate, c)
if err != nil { if err != nil {
l.WithError(err).Error("Error while creating firewall during reload") f.l.WithError(err).Error("Error while creating firewall during reload")
return return
} }
oldFw := f.firewall oldFw := f.firewall
conntrack := oldFw.Conntrack
conntrack.Lock()
defer conntrack.Unlock()
fw.rulesVersion = oldFw.rulesVersion + 1
// If rulesVersion is back to zero, we have wrapped all the way around. Be
// safe and just reset conntrack in this case.
if fw.rulesVersion == 0 {
f.l.WithField("firewallHash", fw.GetRuleHash()).
WithField("oldFirewallHash", oldFw.GetRuleHash()).
WithField("rulesVersion", fw.rulesVersion).
Warn("firewall rulesVersion has overflowed, resetting conntrack")
} else {
fw.Conntrack = conntrack
}
f.firewall = fw f.firewall = fw
oldFw.Destroy() oldFw.Destroy()
l.WithField("firewallHash", fw.GetRuleHash()). f.l.WithField("firewallHash", fw.GetRuleHash()).
WithField("oldFirewallHash", oldFw.GetRuleHash()). WithField("oldFirewallHash", oldFw.GetRuleHash()).
WithField("rulesVersion", fw.rulesVersion).
Info("New firewall has been installed") Info("New firewall has been installed")
} }
func (f *Interface) emitStats(i time.Duration) { func (f *Interface) reloadSendRecvError(c *config.C) {
ticker := time.NewTicker(i) if c.InitialLoad() || c.HasChanged("listen.send_recv_error") {
for range ticker.C { stringValue := c.GetString("listen.send_recv_error", "always")
f.firewall.EmitStats()
f.handshakeManager.EmitStats() switch stringValue {
case "always":
f.sendRecvErrorConfig = sendRecvErrorAlways
case "never":
f.sendRecvErrorConfig = sendRecvErrorNever
case "private":
f.sendRecvErrorConfig = sendRecvErrorPrivate
default:
if c.GetBool("listen.send_recv_error", true) {
f.sendRecvErrorConfig = sendRecvErrorAlways
} else {
f.sendRecvErrorConfig = sendRecvErrorNever
}
}
f.l.WithField("sendRecvError", f.sendRecvErrorConfig.String()).
Info("Loaded send_recv_error config")
} }
} }
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
ticker := time.NewTicker(i)
defer ticker.Stop()
udpStats := udp.NewUDPStatsEmitter(f.writers)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
f.firewall.EmitStats()
f.handshakeManager.EmitStats()
udpStats()
}
}
}
func (f *Interface) Close() error {
atomic.StoreInt32(&f.closed, 1)
// Release the tun device
return f.inside.Close()
}

93
iputil/util.go Normal file
View File

@@ -0,0 +1,93 @@
package iputil
import (
"encoding/binary"
"fmt"
"net"
"net/netip"
)
type VpnIp uint32
const maxIPv4StringLen = len("255.255.255.255")
func (ip VpnIp) String() string {
b := make([]byte, maxIPv4StringLen)
n := ubtoa(b, 0, byte(ip>>24))
b[n] = '.'
n++
n += ubtoa(b, n, byte(ip>>16&255))
b[n] = '.'
n++
n += ubtoa(b, n, byte(ip>>8&255))
b[n] = '.'
n++
n += ubtoa(b, n, byte(ip&255))
return string(b[:n])
}
func (ip VpnIp) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", ip.String())), nil
}
func (ip VpnIp) ToIP() net.IP {
nip := make(net.IP, 4)
binary.BigEndian.PutUint32(nip, uint32(ip))
return nip
}
func (ip VpnIp) ToNetIpAddr() netip.Addr {
var nip [4]byte
binary.BigEndian.PutUint32(nip[:], uint32(ip))
return netip.AddrFrom4(nip)
}
func Ip2VpnIp(ip []byte) VpnIp {
if len(ip) == 16 {
return VpnIp(binary.BigEndian.Uint32(ip[12:16]))
}
return VpnIp(binary.BigEndian.Uint32(ip))
}
func ToNetIpAddr(ip net.IP) (netip.Addr, error) {
addr, ok := netip.AddrFromSlice(ip)
if !ok {
return netip.Addr{}, fmt.Errorf("invalid net.IP: %v", ip)
}
return addr, nil
}
func ToNetIpPrefix(ipNet net.IPNet) (netip.Prefix, error) {
addr, err := ToNetIpAddr(ipNet.IP)
if err != nil {
return netip.Prefix{}, err
}
ones, bits := ipNet.Mask.Size()
if ones == 0 && bits == 0 {
return netip.Prefix{}, fmt.Errorf("invalid net.IP: %v", ipNet)
}
return netip.PrefixFrom(addr, ones), nil
}
// ubtoa encodes the string form of the integer v to dst[start:] and
// returns the number of bytes written to dst. The caller must ensure
// that dst has sufficient length.
func ubtoa(dst []byte, start int, v byte) int {
if v < 10 {
dst[start] = v + '0'
return 1
} else if v < 100 {
dst[start+1] = v%10 + '0'
dst[start] = v/10 + '0'
return 2
}
dst[start+2] = v%10 + '0'
dst[start+1] = (v/10)%10 + '0'
dst[start] = v/100 + '0'
return 3
}

17
iputil/util_test.go Normal file
View File

@@ -0,0 +1,17 @@
package iputil
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestVpnIp_String(t *testing.T) {
assert.Equal(t, "255.255.255.255", Ip2VpnIp(net.ParseIP("255.255.255.255")).String())
assert.Equal(t, "1.255.255.255", Ip2VpnIp(net.ParseIP("1.255.255.255")).String())
assert.Equal(t, "1.1.255.255", Ip2VpnIp(net.ParseIP("1.1.255.255")).String())
assert.Equal(t, "1.1.1.255", Ip2VpnIp(net.ParseIP("1.1.1.255")).String())
assert.Equal(t, "1.1.1.1", Ip2VpnIp(net.ParseIP("1.1.1.1")).String())
assert.Equal(t, "0.0.0.0", Ip2VpnIp(net.ParseIP("0.0.0.0")).String())
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,32 @@
package nebula package nebula
import ( import (
"fmt"
"net" "net"
"testing" "testing"
proto "github.com/golang/protobuf/proto" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
//TODO: Add a test to ensure udpAddr is copied and not reused
func TestOldIPv4Only(t *testing.T) {
// This test ensures our new ipv6 enabled LH protobuf IpAndPorts works with the old style to enable backwards compatibility
b := []byte{8, 129, 130, 132, 80, 16, 10}
var m Ip4AndPort
err := m.Unmarshal(b)
assert.NoError(t, err)
assert.Equal(t, "10.1.1.1", iputil.VpnIp(m.GetIp()).String())
}
func TestNewLhQuery(t *testing.T) { func TestNewLhQuery(t *testing.T) {
myIp := net.ParseIP("192.1.1.1") myIp := net.ParseIP("192.1.1.1")
myIpint := ip2int(myIp) myIpint := iputil.Ip2VpnIp(myIp)
// Generating a new lh query should work // Generating a new lh query should work
a := NewLhQueryByInt(myIpint) a := NewLhQueryByInt(myIpint)
@@ -19,80 +35,395 @@ func TestNewLhQuery(t *testing.T) {
assert.IsType(t, &NebulaMeta{}, a) assert.IsType(t, &NebulaMeta{}, a)
// It should also Marshal fine // It should also Marshal fine
b, err := proto.Marshal(a) b, err := a.Marshal()
assert.Nil(t, err) assert.Nil(t, err)
// and then Unmarshal fine // and then Unmarshal fine
n := &NebulaMeta{} n := &NebulaMeta{}
err = proto.Unmarshal(b, n) err = n.Unmarshal(b)
assert.Nil(t, err) assert.Nil(t, err)
} }
func TestNewipandportfromudpaddr(t *testing.T) {
blah := NewUDPAddrFromString("1.2.2.3:12345")
meh := NewIpAndPortFromUDPAddr(*blah)
assert.Equal(t, uint32(16908803), meh.Ip)
assert.Equal(t, uint32(12345), meh.Port)
}
func TestNewipandportsfromudpaddrs(t *testing.T) {
blah := NewUDPAddrFromString("1.2.2.3:12345")
blah2 := NewUDPAddrFromString("9.9.9.9:47828")
group := []udpAddr{*blah, *blah2}
hah := NewIpAndPortsFromNetIps(group)
assert.IsType(t, &[]*IpAndPort{}, hah)
//t.Error(reflect.TypeOf(hah))
}
func Test_lhStaticMapping(t *testing.T) { func Test_lhStaticMapping(t *testing.T) {
l := test.NewLogger()
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
lh1 := "10.128.0.2" lh1 := "10.128.0.2"
lh1IP := net.ParseIP(lh1)
udpServer, _ := NewListener("0.0.0.0", 0, true) c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
meh := NewLightHouse(true, 1, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false) c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
meh.AddRemote(ip2int(lh1IP), NewUDPAddr(ip2int(lh1IP), uint16(4242)), true) _, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
err := meh.ValidateLHStaticEntries()
assert.Nil(t, err) assert.Nil(t, err)
lh2 := "10.128.0.3" lh2 := "10.128.0.3"
lh2IP := net.ParseIP(lh2) c = config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
meh = NewLightHouse(true, 1, []uint32{ip2int(lh1IP), ip2int(lh2IP)}, 10, 10003, udpServer, false) c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
meh.AddRemote(ip2int(lh1IP), NewUDPAddr(ip2int(lh1IP), uint16(4242)), true) _, err = NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
err = meh.ValidateLHStaticEntries() assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
assert.EqualError(t, err, "Lighthouse 10.128.0.3 does not have a static_host_map entry")
} }
//func NewLightHouse(amLighthouse bool, myIp uint32, ips []string, interval int, nebulaPort int, pc *udpConn, punchBack bool) *LightHouse { func BenchmarkLighthouseHandleRequest(b *testing.B) {
l := test.NewLogger()
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
/* c := config.NewC(l)
func TestLHQuery(t *testing.T) { lh, err := NewLightHouseFromConfig(l, c, myVpnNet, nil, nil)
//n := NewLhQueryByIpString("10.128.0.3") if !assert.NoError(b, err) {
_, myNet, _ := net.ParseCIDR("10.128.0.0/16") b.Fatal()
m := NewHostMap(myNet)
y, _ := net.ResolveUDPAddr("udp", "10.128.0.3:11111")
m.Add(ip2int(net.ParseIP("127.0.0.1")), y)
//t.Errorf("%s", m)
_ = m
_, n, _ := net.ParseCIDR("127.0.0.1/8")
/*udpServer, err := net.ListenUDP("udp", &net.UDPAddr{Port: 10009})
if err != nil {
t.Errorf("%s", err)
} }
meh := NewLightHouse(n, m, []string{"10.128.0.2"}, false, 10, 10003, 10004) hAddr := udp.NewAddrFromString("4.5.6.7:12345")
//t.Error(m.Hosts) hAddr2 := udp.NewAddrFromString("4.5.6.7:12346")
meh2, err := meh.Query(ip2int(net.ParseIP("10.128.0.3"))) lh.addrMap[3] = NewRemoteList()
t.Error(err) lh.addrMap[3].unlockedSetV4(
3,
3,
[]*Ip4AndPort{
NewIp4AndPort(hAddr.IP, uint32(hAddr.Port)),
NewIp4AndPort(hAddr2.IP, uint32(hAddr2.Port)),
},
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
)
rAddr := udp.NewAddrFromString("1.2.2.3:12345")
rAddr2 := udp.NewAddrFromString("1.2.2.3:12346")
lh.addrMap[2] = NewRemoteList()
lh.addrMap[2].unlockedSetV4(
3,
3,
[]*Ip4AndPort{
NewIp4AndPort(rAddr.IP, uint32(rAddr.Port)),
NewIp4AndPort(rAddr2.IP, uint32(rAddr2.Port)),
},
func(iputil.VpnIp, *Ip4AndPort) bool { return true },
)
mw := &mockEncWriter{}
b.Run("notfound", func(b *testing.B) {
lhh := lh.NewRequestHandler()
req := &NebulaMeta{
Type: NebulaMeta_HostQuery,
Details: &NebulaMetaDetails{
VpnIp: 4,
Ip4AndPorts: nil,
},
}
p, err := req.Marshal()
assert.NoError(b, err)
for n := 0; n < b.N; n++ {
lhh.HandleRequest(rAddr, 2, p, mw)
}
})
b.Run("found", func(b *testing.B) {
lhh := lh.NewRequestHandler()
req := &NebulaMeta{
Type: NebulaMeta_HostQuery,
Details: &NebulaMetaDetails{
VpnIp: 3,
Ip4AndPorts: nil,
},
}
p, err := req.Marshal()
assert.NoError(b, err)
for n := 0; n < b.N; n++ {
lhh.HandleRequest(rAddr, 2, p, mw)
}
})
}
func TestLighthouse_Memory(t *testing.T) {
l := test.NewLogger()
myUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.2"), Port: 4242}
myUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4242}
myUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.2"), Port: 4242}
myUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.2"), Port: 4242}
myUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.2"), Port: 4242}
myUdpAddr5 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4243}
myUdpAddr6 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4244}
myUdpAddr7 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4245}
myUdpAddr8 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4246}
myUdpAddr9 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4247}
myUdpAddr10 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4248}
myUdpAddr11 := &udp.Addr{IP: net.ParseIP("192.168.0.2"), Port: 4249}
myVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.2"))
theirUdpAddr0 := &udp.Addr{IP: net.ParseIP("10.0.0.3"), Port: 4242}
theirUdpAddr1 := &udp.Addr{IP: net.ParseIP("192.168.0.3"), Port: 4242}
theirUdpAddr2 := &udp.Addr{IP: net.ParseIP("172.16.0.3"), Port: 4242}
theirUdpAddr3 := &udp.Addr{IP: net.ParseIP("100.152.0.3"), Port: 4242}
theirUdpAddr4 := &udp.Addr{IP: net.ParseIP("24.15.0.3"), Port: 4242}
theirVpnIp := iputil.Ip2VpnIp(net.ParseIP("10.128.0.3"))
c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
assert.NoError(t, err)
lhh := lh.NewRequestHandler()
// Test that my first update responds with just that
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr2}, lhh)
r := newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr2)
// Ensure we don't accumulate addresses
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr3}, lhh)
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr3)
// Grow it back to 2
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{myUdpAddr1, myUdpAddr4}, lhh)
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
// Update a different host and ask about it
newLHHostUpdate(theirUdpAddr0, theirVpnIp, []*udp.Addr{theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4}, lhh)
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, theirVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
// Have both hosts ask about the other
r = newLHHostRequest(theirUdpAddr0, theirVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
r = newLHHostRequest(myUdpAddr0, myVpnIp, theirVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, theirUdpAddr1, theirUdpAddr2, theirUdpAddr3, theirUdpAddr4)
// Make sure we didn't get changed
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, myUdpAddr1, myUdpAddr4)
// Ensure proper ordering and limiting
// Send 12 addrs, get 10 back, the last 2 removed, allowing the duplicate to remain (clients dedupe)
newLHHostUpdate(
myUdpAddr0,
myVpnIp,
[]*udp.Addr{
myUdpAddr1,
myUdpAddr2,
myUdpAddr3,
myUdpAddr4,
myUdpAddr5,
myUdpAddr5, //Duplicated on purpose
myUdpAddr6,
myUdpAddr7,
myUdpAddr8,
myUdpAddr9,
myUdpAddr10,
myUdpAddr11, // This should get cut
}, lhh)
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(
t,
r.msg.Details.Ip4AndPorts,
myUdpAddr1, myUdpAddr2, myUdpAddr3, myUdpAddr4, myUdpAddr5, myUdpAddr5, myUdpAddr6, myUdpAddr7, myUdpAddr8, myUdpAddr9,
)
// Make sure we won't add ips in our vpn network
bad1 := &udp.Addr{IP: net.ParseIP("10.128.0.99"), Port: 4242}
bad2 := &udp.Addr{IP: net.ParseIP("10.128.0.100"), Port: 4242}
good := &udp.Addr{IP: net.ParseIP("1.128.0.99"), Port: 4242}
newLHHostUpdate(myUdpAddr0, myVpnIp, []*udp.Addr{bad1, bad2, good}, lhh)
r = newLHHostRequest(myUdpAddr0, myVpnIp, myVpnIp, lhh)
assertIp4InArray(t, r.msg.Details.Ip4AndPorts, good)
}
func TestLighthouse_reload(t *testing.T) {
l := test.NewLogger()
c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
lh, err := NewLightHouseFromConfig(l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
assert.NoError(t, err)
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
lh.reload(c, false)
}
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
req := &NebulaMeta{
Type: NebulaMeta_HostQuery,
Details: &NebulaMetaDetails{
VpnIp: uint32(queryVpnIp),
},
}
b, err := req.Marshal()
if err != nil { if err != nil {
panic(err)
}
filter := NebulaMeta_HostQueryReply
w := &testEncWriter{
metaFilter: &filter,
}
lhh.HandleRequest(fromAddr, myVpnIp, b, w)
return w.lastReply
}
func newLHHostUpdate(fromAddr *udp.Addr, vpnIp iputil.VpnIp, addrs []*udp.Addr, lhh *LightHouseHandler) {
req := &NebulaMeta{
Type: NebulaMeta_HostUpdateNotification,
Details: &NebulaMetaDetails{
VpnIp: uint32(vpnIp),
Ip4AndPorts: make([]*Ip4AndPort, len(addrs)),
},
}
for k, v := range addrs {
req.Details.Ip4AndPorts[k] = &Ip4AndPort{Ip: uint32(iputil.Ip2VpnIp(v.IP)), Port: uint32(v.Port)}
}
b, err := req.Marshal()
if err != nil {
panic(err)
}
w := &testEncWriter{}
lhh.HandleRequest(fromAddr, vpnIp, b, w)
}
//TODO: this is a RemoteList test
//func Test_lhRemoteAllowList(t *testing.T) {
// l := NewLogger()
// c := NewConfig(l)
// c.Settings["remoteallowlist"] = map[interface{}]interface{}{
// "10.20.0.0/12": false,
// }
// allowList, err := c.GetAllowList("remoteallowlist", false)
// assert.Nil(t, err)
//
// lh1 := "10.128.0.2"
// lh1IP := net.ParseIP(lh1)
//
// udpServer, _ := NewListener(l, "0.0.0.0", 0, true)
//
// lh := NewLightHouse(l, true, &net.IPNet{IP: net.IP{0, 0, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, []uint32{ip2int(lh1IP)}, 10, 10003, udpServer, false, 1, false)
// lh.SetRemoteAllowList(allowList)
//
// // A disallowed ip should not enter the cache but we should end up with an empty entry in the addrMap
// remote1IP := net.ParseIP("10.20.0.3")
// remotes := lh.unlockedGetRemoteList(ip2int(remote1IP))
// remotes.unlockedPrependV4(ip2int(remote1IP), NewIp4AndPort(remote1IP, 4242))
// assert.NotNil(t, lh.addrMap[ip2int(remote1IP)])
// assert.Empty(t, lh.addrMap[ip2int(remote1IP)].CopyAddrs([]*net.IPNet{}))
//
// // Make sure a good ip enters the cache and addrMap
// remote2IP := net.ParseIP("10.128.0.3")
// remote2UDPAddr := NewUDPAddr(remote2IP, uint16(4242))
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote2UDPAddr.IP, uint32(remote2UDPAddr.Port)), false, false)
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr)
//
// // Another good ip gets into the cache, ordering is inverted
// remote3IP := net.ParseIP("10.128.0.4")
// remote3UDPAddr := NewUDPAddr(remote3IP, uint16(4243))
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remote3UDPAddr.IP, uint32(remote3UDPAddr.Port)), false, false)
// assertUdpAddrInArray(t, lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}), remote2UDPAddr, remote3UDPAddr)
//
// // If we exceed the length limit we should only have the most recent addresses
// addedAddrs := []*udpAddr{}
// for i := 0; i < 11; i++ {
// remoteUDPAddr := NewUDPAddr(net.IP{10, 128, 0, 4}, uint16(4243+i))
// lh.addRemoteV4(ip2int(remote2IP), ip2int(remote2IP), NewIp4AndPort(remoteUDPAddr.IP, uint32(remoteUDPAddr.Port)), false, false)
// // The first entry here is a duplicate, don't add it to the assert list
// if i != 0 {
// addedAddrs = append(addedAddrs, remoteUDPAddr)
// }
// }
//
// // We should only have the last 10 of what we tried to add
// assert.True(t, len(addedAddrs) >= 10, "We should have tried to add at least 10 addresses")
// assertUdpAddrInArray(
// t,
// lh.addrMap[ip2int(remote2IP)].CopyAddrs([]*net.IPNet{}),
// addedAddrs[0],
// addedAddrs[1],
// addedAddrs[2],
// addedAddrs[3],
// addedAddrs[4],
// addedAddrs[5],
// addedAddrs[6],
// addedAddrs[7],
// addedAddrs[8],
// addedAddrs[9],
// )
//}
func Test_ipMaskContains(t *testing.T) {
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.0.255"))))
assert.False(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32-24, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
assert.True(t, ipMaskContains(iputil.Ip2VpnIp(net.ParseIP("10.0.0.1")), 32, iputil.Ip2VpnIp(net.ParseIP("10.0.1.1"))))
}
type testLhReply struct {
nebType header.MessageType
nebSubType header.MessageSubType
vpnIp iputil.VpnIp
msg *NebulaMeta
}
type testEncWriter struct {
lastReply testLhReply
metaFilter *NebulaMeta_MessageType
}
func (tw *testEncWriter) SendVia(via interface{}, relay interface{}, ad, nb, out []byte, nocopy bool) {
}
func (tw *testEncWriter) Handshake(vpnIp iputil.VpnIp) {
}
func (tw *testEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, _, _ []byte) {
msg := &NebulaMeta{}
err := msg.Unmarshal(p)
if tw.metaFilter == nil || msg.Type == *tw.metaFilter {
tw.lastReply = testLhReply{
nebType: t,
nebSubType: st,
vpnIp: vpnIp,
msg: msg,
}
}
if err != nil {
panic(err)
}
}
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
func assertIp4InArray(t *testing.T, have []*Ip4AndPort, want ...*udp.Addr) {
if !assert.Len(t, have, len(want)) {
return return
} }
t.Errorf("%s", meh2)
t.Errorf("%s", n) for k, w := range want {
if !(have[k].Ip == uint32(iputil.Ip2VpnIp(w.IP)) && have[k].Port == uint32(w.Port)) {
assert.Fail(t, fmt.Sprintf("Response did not contain: %v:%v at %v; %v", w.IP, w.Port, k, translateV4toUdpAddr(have)))
}
}
}
// assertUdpAddrInArray asserts every address in want is at the same position in have and that the lengths match
func assertUdpAddrInArray(t *testing.T, have []*udp.Addr, want ...*udp.Addr) {
if !assert.Len(t, have, len(want)) {
return
}
for k, w := range want {
if !(have[k].IP.Equal(w.IP) && have[k].Port == w.Port) {
assert.Fail(t, fmt.Sprintf("Response did not contain: %v at %v; %v", w, k, have))
}
}
}
func translateV4toUdpAddr(ips []*Ip4AndPort) []*udp.Addr {
addrs := make([]*udp.Addr, len(ips))
for k, v := range ips {
addrs[k] = NewUDPAddrFromLH4(v)
}
return addrs
} }
*/

Some files were not shown because too many files have changed in this diff Show More