diff --git a/Makefile b/Makefile index 36cfd29..658a799 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ vet: .PHONY: staticcheck staticcheck: - @go install honnef.co/go/tools/cmd/staticcheck@v0.4.0 + @go install honnef.co/go/tools/cmd/staticcheck@v0.4.6 staticcheck ./... .PHONY: ineffassign @@ -53,7 +53,6 @@ test: .PHONY: generate generate: - cd proto && buf generate go generate ./... .PHONY: testcover diff --git a/README.md b/README.md index 5a03eb6..faa6529 100644 --- a/README.md +++ b/README.md @@ -19,14 +19,14 @@ methods for forward and reverse tunneling. A forward tunnel allows for all requests made on the tunnel to be directed to the same server. With a typical gRPC client, connecting to a replicated - server, requests can round-robin across backends. For typical stateless - applications, this is desirable for load balancing and robustness. But some - applications that are not stateless may need affinity. The tunnel provides - that affinity. Instead of the client making multiple requests, which could - all be directed to different backends, the client makes one request to open - a tunnel. The resulting tunnel can then be used to create other RPC stubs, - so that all requests issued via those stubs are directed to the single - backend to which the tunnel was opened. + server, requests are typically load balanced across backends. For typical + stateless applications, this is desirable for resource utilization and fault + tolerance. But some applications that are not stateless may need affinity. + The tunnel provides that affinity. Instead of the client making multiple + requests, which could all be directed to different backends, the client + makes one request to open a tunnel. The resulting tunnel can then be used + to create other RPC stubs, so that all requests issued via those stubs are + directed to the single backend to which the tunnel was opened. * **Reverse Tunnel**: A reverse tunnel is the opposite: requests flow in the reverse direction of a normal gRPC connection. This means that the gRPC @@ -145,12 +145,9 @@ if err != nil { log.Fatal(err) } -// Create the tunnel. tunnelStub := tunnelpb.NewTunnelServiceClient(cc) -stream, err := tunnelStub.OpenTunnel(context.Background()) - -// Open a tunnel and return a channel. -ch, err := grpctunnel.NewChannel(tunnelStub) +// Opens a tunnel and return a channel. +ch, err := grpctunnel.NewChannel(tunnelStub).Start(context.Background()) if err != nil { log.Fatal(err) } @@ -158,14 +155,10 @@ if err != nil { // TODO: Create stubs using ch to send RPCs through the tunnel. ``` -Client code should not interact with the stream at all or risk corrupting the -tunneling protocol. (All interactions with the stream should be done via the -channel.) - To close the tunnel, use the channel's `Close` method. This will also close the underlying stream. If any RPCs are in progress on the channel when it is closed, -they will be cancelled. The channel is also closed if the context used to create -the stream is cancelled or times out. +they will be cancelled. The channel is also closed if the context passed to +`Start` is cancelled or times out. To use client interceptors with these channels, wrap them using [`grpchan.InterceptClientConn`](https://pkg.go.dev/github.com/fullstorydev/grpchan#InterceptClientConn) diff --git a/doc.go b/doc.go index 07cd731..495dbc2 100644 --- a/doc.go +++ b/doc.go @@ -13,14 +13,11 @@ // Forward tunnels allow a client to pin RPCs to a single server since they are // all sent over a single stream. Forward tunnels work like so: // -// - Client issues an RPC that establishes the forward tunnel. The RPC is a -// full-duplex bidirectional stream, so can support all manner of streaming -// RPCs over the tunnel. -// - Client then uses the tunnel to create a new gRPC client connection. (See -// NewChannel). -// - RPC stubs can then be created using this new connection. All RPCs issued -// on this connection are transmitted over the tunnel, on the stream that was -// established in step 1. +// - Client creates a new tunnel by calling NewChannel. This issues an RPC that +// establishes the forward tunnel. The RPC is a full-duplex bidirectional +// stream, so can support all manner of streaming RPCs over the tunnel. +// - RPC stubs can then be created using this tunnel. All RPCs issued on it are +// transmitted over the stream that was established by the RPC mentioned above. // - Closing the tunnel channel also results in the underlying stream // being closed. // diff --git a/flow_control.go b/flow_control.go new file mode 100644 index 0000000..4b8da2d --- /dev/null +++ b/flow_control.go @@ -0,0 +1,324 @@ +package grpctunnel + +//lint:file-ignore U1000 these aren't actually unused, but staticcheck is having trouble +// determining that, likely due to the use of generics + +import ( + "container/list" + "context" + "math" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // TODO: make these configurable + initialWindowSize = 65536 + chunkMax = 16384 +) + +var errFlowControlWindowExceeded = status.Errorf(codes.ResourceExhausted, "flow control window exceeded") + +// sender is responsible for sending messages and managing flow control. +type sender interface { + send(data []byte) error + updateWindow(add uint32) +} + +// receiver is responsible for receiving messages and managing flow control. +type receiver[T any] interface { + accept(item T) error + close() + cancel() + dequeue() (T, bool) +} + +type defaultSender struct { + ctx context.Context + sendFunc func([]byte, uint32, bool) error + windowUpdates chan struct{} + currentWindow atomic.Uint32 + + // does not protect any fields, just used to prevent concurrent calls to send + // (so messages are sent FIFO and not incorrectly interleaved) + mu sync.Mutex +} + +func newSender(ctx context.Context, initialWindowSize uint32, sendFunc func([]byte, uint32, bool) error) sender { + s := &defaultSender{ + ctx: ctx, + sendFunc: sendFunc, + windowUpdates: make(chan struct{}, 1), + } + s.currentWindow.Store(initialWindowSize) + return s +} + +func (s *defaultSender) updateWindow(add uint32) { + if add == 0 { + return + } + prevWindow := s.currentWindow.Add(add) - add + if prevWindow == 0 { + select { + case s.windowUpdates <- struct{}{}: + default: + } + } +} + +func (s *defaultSender) send(data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + if int64(len(data)) > math.MaxUint32 { + return status.Errorf(codes.ResourceExhausted, "serialized message is too large: %d bytes > maximum %d bytes", len(data), math.MaxUint32) + } + size := uint32(len(data)) + first := true + for { + windowSz := s.currentWindow.Load() + + if windowSz == 0 { + // must wait for window size update before we can send more + select { + case <-s.windowUpdates: + case <-s.ctx.Done(): + return s.ctx.Err() + } + continue + } + + chunkSz := windowSz + if chunkSz > uint32(len(data)) { + chunkSz = uint32(len(data)) + } + if chunkSz > chunkMax { + chunkSz = chunkMax + } + if !s.currentWindow.CompareAndSwap(windowSz, windowSz-chunkSz) { + continue + } + + last := chunkSz == uint32(len(data)) + if err := s.sendFunc(data[:chunkSz], size, first); err != nil { + return err + } + if last { + return nil + } + first = false + + data = data[chunkSz:] + } +} + +// defaultReceiver is a per-stream queue of messages. When we receive a message for +// a stream over a tunnel, we have to put them into this unbounded queue to prevent +// deadlock (where one consumer of a stream channel can block all operations on the +// tunnel). +// +// In practice, this does not use unbounded memory because flow control will apply +// backpressure to senders that are outpacing respective consumers. A well-behaved +// sender will respect the flow control window. A misbehaving sender will be detected +// and messages rejected if the flow control window is exceeded. +type defaultReceiver[T any] struct { + measure func(T) uint + updateWindow func(uint32) + + mu sync.Mutex + cond sync.Cond + closed, cancelled bool + items *list.List + currentWindow uint32 +} + +func newReceiver[T any](measure func(T) uint, updateWindow func(uint32), initialWindowSize uint32) receiver[T] { + rcvr := &defaultReceiver[T]{ + measure: measure, + updateWindow: updateWindow, + items: list.New(), + currentWindow: initialWindowSize, + } + rcvr.cond.L = &rcvr.mu + return rcvr +} + +func (r *defaultReceiver[T]) accept(item T) error { + sz := r.measure(item) + r.mu.Lock() + defer r.mu.Unlock() + if r.closed { + return nil + } + if sz > uint(r.currentWindow) { + return errFlowControlWindowExceeded + } + r.currentWindow -= uint32(sz) + signal := r.items.Len() == 0 + r.items.PushBack(item) + if signal { + r.cond.Signal() + } + return nil +} + +func (r *defaultReceiver[_]) close() { + r.mu.Lock() + defer r.mu.Unlock() + r.handleClosure(&r.closed) +} + +func (r *defaultReceiver[_]) cancel() { + r.mu.Lock() + defer r.mu.Unlock() + r.handleClosure(&r.cancelled) + r.items.Init() // clear list to free memory +} + +func (r *defaultReceiver[_]) handleClosure(b *bool) { + if *b { + return + } + *b = true + if r.items.Len() == 0 { + r.cond.Broadcast() + } +} + +func (r *defaultReceiver[T]) dequeue() (T, bool) { + var windowUpdate uint + defer func() { + // TODO: Support minimum update size, so we can batch + // updates and send fewer messages over the network. + if windowUpdate > 0 { + r.updateWindow(uint32(windowUpdate)) + } + }() + r.mu.Lock() + defer r.mu.Unlock() + var zero T + for { + if r.cancelled { + return zero, false + } + element := r.items.Front() + if element != nil { + item := r.items.Remove(element).(T) + sz := r.measure(item) + r.currentWindow += uint32(sz) + windowUpdate = sz + return item, true + } + if r.closed { + return zero, false + } + r.cond.Wait() + } +} + +type noFlowControlSender struct { + sendFunc func([]byte, uint32, bool) error + + // does not protect any fields, just used to prevent concurrent calls to send + // (so messages are sent FIFO and not incorrectly interleaved) + mu sync.Mutex +} + +func newSenderWithoutFlowControl(sendFunc func([]byte, uint32, bool) error) sender { + return &noFlowControlSender{sendFunc: sendFunc} +} + +func (s *noFlowControlSender) send(data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + if int64(len(data)) > math.MaxUint32 { + return status.Errorf(codes.ResourceExhausted, "serialized message is too large: %d bytes > maximum %d bytes", len(data), math.MaxUint32) + } + size := uint32(len(data)) + first := true + for { + chunkSz := uint32(chunkMax) + if chunkSz > uint32(len(data)) { + chunkSz = uint32(len(data)) + } + + last := chunkSz == uint32(len(data)) + if err := s.sendFunc(data[:chunkSz], size, first); err != nil { + return err + } + if last { + return nil + } + first = false + + data = data[chunkSz:] + } +} + +func (s *noFlowControlSender) updateWindow(_ uint32) { + // should never actually be called +} + +type noFlowControlReceiver[T any] struct { + ctx context.Context + + ingestMu sync.Mutex + ch chan T + closed chan struct{} + doClose sync.Once +} + +func newReceiverWithoutFlowControl[T any](ctx context.Context) receiver[T] { + return &noFlowControlReceiver[T]{ + ctx: ctx, + ch: make(chan T, 1), + closed: make(chan struct{}), + } +} + +func (r *noFlowControlReceiver[T]) accept(item T) error { + r.ingestMu.Lock() + defer r.ingestMu.Unlock() + + // First check closed channel. If already closed, we can't run select + // below because trying to write to closed channel r.ch will panic. + select { + case <-r.closed: + return nil + default: + } + + select { + case r.ch <- item: + case <-r.closed: + // another thread intends to close; so abort and release the lock + } + return nil +} + +func (r *noFlowControlReceiver[T]) close() { + r.doClose.Do(func() { + // Let any concurrent accepting thread know that we intend + // to close and thus need the lock. + close(r.closed) + // Must close the channel while lock is held to prevent + // panic in accept(). + r.ingestMu.Lock() + defer r.ingestMu.Unlock() + close(r.ch) + }) +} + +func (r *noFlowControlReceiver[T]) cancel() { + r.close() +} + +func (r *noFlowControlReceiver[T]) dequeue() (T, bool) { + t, ok := <-r.ch + return t, ok +} diff --git a/go.mod b/go.mod index 596f3dc..fc389dd 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/fullstorydev/grpchan v1.1.1 github.com/stretchr/testify v1.8.0 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4 - google.golang.org/grpc v1.50.0 + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f + google.golang.org/grpc v1.53.0 google.golang.org/protobuf v1.28.1 ) diff --git a/go.sum b/go.sum index 7b688e3..76ad973 100644 --- a/go.sum +++ b/go.sum @@ -32,7 +32,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= @@ -89,21 +89,20 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4 h1:nZ28yoLJWNLTcERW43BN+JDsNQOdiZOFB9Dly/IUrjw= -google.golang.org/genproto v0.0.0-20221013201013-33fc6f83cba4/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/service.go b/handler.go similarity index 94% rename from service.go rename to handler.go index caae92b..b351f74 100644 --- a/service.go +++ b/handler.go @@ -14,6 +14,11 @@ import ( "github.com/jhump/grpctunnel/tunnelpb" ) +const ( + grpctunnelNegotiateKey = "grpctunnel-negotiate" + grpctunnelNegotiateVal = "on" +) + // TunnelServiceHandler provides an implementation for TunnelServiceServer. You // can register handlers with it, and it will then expose those handlers for // incoming tunnels. If no handlers are registered, the server will reply to @@ -31,6 +36,7 @@ type TunnelServiceHandler struct { onReverseTunnelConnect func(TunnelChannel) onReverseTunnelDisconnect func(TunnelChannel) affinityKey func(TunnelChannel) any + tunnelOpts tunnelOpts stopping atomic.Bool reverse *reverseChannels @@ -64,6 +70,10 @@ type TunnelServiceHandlerOptions struct { // server interceptors ran when the tunnel was opened, then any values they // store in the context is also available. AffinityKey func(TunnelChannel) any + + // If true, flow control will be disabled, even when the network client + // supports flow control. + DisableFlowControl bool } // NewTunnelServiceHandler creates a new TunnelServiceHandler. The options are @@ -82,6 +92,9 @@ func NewTunnelServiceHandler(options TunnelServiceHandlerOptions) *TunnelService affinityKey: options.AffinityKey, reverse: newReverseChannels(), reverseByKey: map[interface{}]*reverseChannels{}, + tunnelOpts: tunnelOpts{ + disableFlowControl: options.DisableFlowControl, + }, } } @@ -125,11 +138,13 @@ func (s *TunnelServiceHandler) openTunnel(stream tunnelpb.TunnelService_OpenTunn // This gives any server interceptors a chance to run and potentially to send // auth credentials in response headers (since the client will need a way to // authenticate the server, since roles are reversed with reverse tunnels). - _ = stream.SendHeader(nil) + _ = stream.SendHeader(metadata.Pairs(grpctunnelNegotiateKey, grpctunnelNegotiateVal)) - stream = &threadSafeOpenTunnelServer{TunnelService_OpenTunnelServer: stream} md, _ := metadata.FromIncomingContext(stream.Context()) - return serveTunnel(stream, md, s.handlers, s.stopping.Load) + vals := md.Get(grpctunnelNegotiateKey) + clientAcceptsSettings := len(vals) > 0 && vals[0] == grpctunnelNegotiateVal + stream = &threadSafeOpenTunnelServer{TunnelService_OpenTunnelServer: stream} + return serveTunnel(stream, md, clientAcceptsSettings, &s.tunnelOpts, s.handlers, s.stopping.Load) } // openReverseTunnel creates a reverse tunnel from this server to the RPC client. @@ -146,9 +161,9 @@ func (s *TunnelServiceHandler) openReverseTunnel(stream tunnelpb.TunnelService_O // This gives any server interceptors a chance to run and potentially to send // auth credentials in response headers (since the client will need a way to // authenticate the server, since roles are reversed with reverse tunnels). - _ = stream.SendHeader(nil) + _ = stream.SendHeader(metadata.Pairs(grpctunnelNegotiateKey, grpctunnelNegotiateVal)) - ch := newReverseChannel(stream, s.unregister) + ch := newReverseChannel(stream, &s.tunnelOpts, s.unregister) defer ch.Close() var key interface{} diff --git a/internal/cmd/tunneltestclient/main.go b/internal/cmd/tunneltestclient/main.go index addb435..8b18b3f 100644 --- a/internal/cmd/tunneltestclient/main.go +++ b/internal/cmd/tunneltestclient/main.go @@ -24,8 +24,17 @@ import ( func main() { serverPort := flag.Int("server-port", 26354, "the port on which the server is listening") + noFlowControl := flag.Bool("no-flow-control", false, "disables flow control") + tunnelType := flag.String("tunnel-type", "both", `type of tunnel to test, can be "forward", "reverse", or "both"`) flag.Parse() + switch *tunnelType { + case "forward", "reverse", "both": + // okay + default: + log.Fatalf(`tunnel type %q is not valid; must be one of "forward", "reverse", or "both"`, *tunnelType) + } + ctx := context.Background() dialCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -39,50 +48,58 @@ func main() { log.Fatal(err) } - // First check the forward tunnel. tunnelClient := tunnelpb.NewTunnelServiceClient(cc) - tc, err := tunnelClient.OpenTunnel(ctx) - if err != nil { - log.Fatal(err) + var tunnelOpts []grpctunnel.TunnelOption + if *noFlowControl { + tunnelOpts = []grpctunnel.TunnelOption{grpctunnel.WithDisableFlowControl()} } - log.Println("Tunnel created.") - tunnel := grpctunnel.NewChannel(tc) - defer tunnel.Close() - var clCounts atomic.Int32 - if err := internal.SendRPCs(ctx, grpchantesting.NewTestServiceClient(withClientCounts(tunnel, &clCounts))); err != nil { - log.Fatal(err) + + // First check the forward tunnel. + if *tunnelType != "reverse" { + tunnel, err := grpctunnel.NewChannel(tunnelClient, tunnelOpts...).Start(ctx) + if err != nil { + log.Fatal(err) + } + log.Println("Tunnel created.") + defer tunnel.Close() + var clCounts atomic.Int32 + if err := internal.SendRPCs(ctx, grpchantesting.NewTestServiceClient(withClientCounts(tunnel, &clCounts))); err != nil { + log.Fatal(err) + } + log.Printf("Issued %d requests over tunnel.", clCounts.Load()) } - log.Printf("Issued %d requests over tunnel.", clCounts.Load()) // Then check the reverse tunnel. - key, err := makeClientKey() - if err != nil { - log.Fatal(err) - } - reverseTunnel := grpctunnel.NewReverseTunnelServer(tunnelClient) - // Over the tunnel, we just expose this simple test service - var svrCounts atomic.Int32 - grpchantesting.RegisterTestServiceServer(withServerCounts(reverseTunnel, &svrCounts), &grpchantesting.TestServer{}) - ctx, cancel = context.WithCancel(ctx) - done := make(chan struct{}) - defer func() { - cancel() - <-done - }() - go func() { - defer close(done) - ctx := metadata.AppendToOutgoingContext(ctx, "test-client-key", key) - if started, err := reverseTunnel.Serve(ctx); !started { + if *tunnelType != "forward" { + key, err := makeClientKey() + if err != nil { log.Fatal(err) } - }() - log.Printf("Reverse tunnel started (key = %s).", key) - // This tells the server to initiate RPCs via - tester := gen.NewTunnelTestServiceClient(cc) - if _, err := tester.TriggerTestRPCs(ctx, &gen.TriggerTestRPCsRequest{ClientKey: key}); err != nil { - log.Fatal(err) + reverseTunnel := grpctunnel.NewReverseTunnelServer(tunnelClient, tunnelOpts...) + // Over the tunnel, we just expose this simple test service + var svrCounts atomic.Int32 + grpchantesting.RegisterTestServiceServer(withServerCounts(reverseTunnel, &svrCounts), &grpchantesting.TestServer{}) + ctx, cancel = context.WithCancel(ctx) + done := make(chan struct{}) + defer func() { + cancel() + <-done + }() + go func() { + defer close(done) + ctx := metadata.AppendToOutgoingContext(ctx, "test-client-key", key) + if started, err := reverseTunnel.Serve(ctx); !started { + log.Fatal(err) + } + }() + log.Printf("Reverse tunnel started (key = %s).", key) + // This tells the server to initiate RPCs via + tester := gen.NewTunnelTestServiceClient(cc) + if _, err := tester.TriggerTestRPCs(ctx, &gen.TriggerTestRPCsRequest{ClientKey: key}); err != nil { + log.Fatal(err) + } + log.Printf("Served %d requests over reverse tunnel.", svrCounts.Load()) } - log.Printf("Served %d requests over reverse tunnel.", svrCounts.Load()) // Success! } diff --git a/internal/cmd/tunneltestsvr/main.go b/internal/cmd/tunneltestsvr/main.go index b71f0c8..5a0a279 100644 --- a/internal/cmd/tunneltestsvr/main.go +++ b/internal/cmd/tunneltestsvr/main.go @@ -20,6 +20,7 @@ import ( func main() { port := flag.Int("port", 26354, "the port on which this server will listen") + noFlowControl := flag.Bool("no-flow-control", false, "disables flow control") flag.Parse() svr := grpc.NewServer() @@ -32,6 +33,7 @@ func main() { } return vals[0] }, + DisableFlowControl: *noFlowControl, }) tunnelpb.RegisterTunnelServiceServer(svr, tunnelSvc.Service()) gen.RegisterTunnelTestServiceServer(svr, &tunnelTester{tunnelSvc: tunnelSvc}) diff --git a/options.go b/options.go new file mode 100644 index 0000000..da5196c --- /dev/null +++ b/options.go @@ -0,0 +1,45 @@ +package grpctunnel + +import "github.com/jhump/grpctunnel/tunnelpb" + +// TunnelOption is an option for configuring the behavior of +// a tunnel client or tunnel server. +type TunnelOption interface { + apply(*tunnelOpts) +} + +// WithDisableFlowControl returns an option that disables the +// use of flow control, even when the tunnel peer supports it. +// +// NOTE: This should NOT be used in application code. This is +// intended for test code, to verify that the tunnels work +// without flow control, to make sure they can interop correctly +// with older versions of this package, before flow control was +// introduced. +// +// Eventually, older versions that do not use flow control will +// not be supported and this option will be removed. +func WithDisableFlowControl() TunnelOption { + return tunnelOptFunc(func(opts *tunnelOpts) { + opts.disableFlowControl = true + }) +} + +type tunnelOpts struct { + disableFlowControl bool +} + +func (t *tunnelOpts) supportedRevisions() []tunnelpb.ProtocolRevision { + if t.disableFlowControl { + return []tunnelpb.ProtocolRevision{tunnelpb.ProtocolRevision_REVISION_ZERO} + } + return []tunnelpb.ProtocolRevision{ + tunnelpb.ProtocolRevision_REVISION_ZERO, tunnelpb.ProtocolRevision_REVISION_ONE, + } +} + +type tunnelOptFunc func(*tunnelOpts) + +func (t tunnelOptFunc) apply(opts *tunnelOpts) { + t(opts) +} diff --git a/proto/grpctunnel/v1/tunnel.proto b/proto/grpctunnel/v1/tunnel.proto index b93473e..930cb33 100644 --- a/proto/grpctunnel/v1/tunnel.proto +++ b/proto/grpctunnel/v1/tunnel.proto @@ -7,6 +7,34 @@ import "google/rpc/status.proto"; option go_package = "github.com/jhump/grpctunnel/tunnelpb"; +// A gRPC service which can tunnel gRPC traffic from the client to the server or +// even the server to the client. A "tunnel" is a gRPC stream which presents +// itself as a gRPC transport -- over which other streams may be multiplexed. +// +// Note that evolution of features in this service relies on the client and +// server negotiating a "protocol revision". This negotiation step was not +// originally present, and was added in revision one, which also includes +// support for flow control, which fixes several classes of deadlock that +// can occur in heavy usage of streams on the tunnel. +// +// Revision one also advertises that clients can accept server settings and +// that servers intend to send them via headers. This is done by sending a +// header named "grpctunnel-negotiate" with a value of "on", both in request +// headers from a network client and in response headers from a network +// server, on the RPC that establishes the tunnel. +// +// Both the client and server must advertise this ability in order for a +// server settings message to actually be sent. Otherwise, clients and servers +// that implement revision one will fall back to a revision zero compatibility +// mode. (Revision zero supports neither server settings messages nor flow +// control.) +// +// As features are added to the protocol in future revisions, they will be +// enabled via the client and server negotiating them via the server settings +// message. The server announces what protocol revisions it supports, so the +// client can fall back to older revisions if the server doesn't support the +// latest. The client indicates the actual revision it will use in messages +// that create new streams. service TunnelService { // OpenTunnel creates a channel to the server which can be used to send // additional RPCs, all of which will be sent to the same server via a @@ -41,13 +69,15 @@ message ClientToServer { // The ID of the stream. Stream IDs must be used in increasing order and // cannot be re-used. Unlike in the HTTP/2 protocol, the stream ID is 64-bit // so overflow in a long-lived channel is excessively unlikely. (If the - // channel were used for a stream every nanosecond, it would take close to - // 300 years to exhaust every ID and reach an overflow situation.) + // channel were used for a new stream every nanosecond, it would take close + // to 300 years to exhaust every ID and reach an overflow situation.) int64 stream_id = 1; oneof frame { // Creates a new RPC stream, which includes request header metadata. The - // stream ID must not be an already active stream. + // stream ID must be greater than all previously-used stream IDs for this + // tunnel. It is expected to start at zero for the first stream on the + // tunnel and then one for the next, and so on. NewStream new_stream = 2; // Sends a message on the RPC stream. If the message is larger than 16k, // the rest of the message should be sent in chunks using the @@ -65,6 +95,10 @@ message ClientToServer { // (unless the ID is being re-used after the stream is terminated on the // server side). google.protobuf.Empty cancel = 6; + // Lets the peer know that data has been consumed, so it may be able + // to send more data, based on flow control window sizes. This is only + // used in revision one of the protocol. + uint32 window_update = 7; } } @@ -80,11 +114,24 @@ message ServerToClient { // The ID of the stream. Stream IDs are defined by the client and should be // used in monotonically increasing order. They cannot be re-used. Unlike // HTTP/2, the ID is 64-bit, so overflow/re-use should not be an issue. (If - // the channel were used for a stream every nanosecond, it would take close - // to 300 years to exhaust every ID and reach an overflow situation.) + // the channel were used for a new stream every nanosecond, it would take + // close to 300 years to exhaust every ID and reach an overflow situation.) + // + // The stream ID will be -1 for messages that do not correspond to a single + // stream, but to the whole tunnel. Currently, only a Settings message will + // be sent this way. int64 stream_id = 1; oneof frame { + // This is the very first message sent on a response stream. The tunnel + // client should await this before sending any data as it will contain + // information about the server's initial flow control window size for + // each new stream. This is only used in revision one of the protocol. + // A client that needs to interact with an older server (i.e. revision + // zero) must examine header metadata to decide if it should expect a + // settings message. Similarly, a server must examine header metadata to + // decide if it should send a settings message. + Settings settings = 6; // Sends response headers for this stream. If headers are sent at all, // they must be sent before any response message data. Metadata response_headers = 2; @@ -100,19 +147,48 @@ message ServerToClient { // given stream ID until the ID is re-used (e.g. a NewStream message is // received that creates another stream with the same ID). CloseStream close_stream = 5; + // Lets the peer know that data has been consumed, so it may be able + // to send more data, based on flow control window sizes. + uint32 window_update = 7; } } +message Settings { + // The set of protocol revisions that this server supports. If the + // client does not support any of them, it must hang up. This should + // never be empty, but if that is observed, the client should assume + // the server only supports revision zero. + repeated ProtocolRevision supported_protocol_revisions = 1; + + // The server's initial window size for all newly created streams. + // When a new stream is created, this is the flow control window for + // sending data to the server. The client indicates its own initial + // window size, for receiving data from the server, in the NewStream + // message. + // + // This value will be zero if the only supported protocol revision + // is zero. + uint32 initial_window_size = 2; +} + message NewStream { + // The name of the method being invoked. string method_name = 1; + // Header metadata for this request. Metadata request_headers = 2; - + // The client's initial window size, for receiving data from the + // server. This will be zero if protocol_revision is zero. + uint32 initial_window_size = 3; + // The protocol revision that the client will use for this stream. + // If this revision is not supported by the server, the server will + // immediately close the stream with an error code. + ProtocolRevision protocol_revision = 4; // TODO: codec/compressor options? } message MessageData { // The full size of the message. - int32 size = 1; + uint32 size = 1; // The message data. This field should not be longer than 16kb (16,384 // bytes). If the full size of the message is larger then it should be // split into multiple chunks. The chunking is done to allow multiple @@ -134,3 +210,26 @@ message Metadata { } map md = 1; } + +enum ProtocolRevision { + // Indicates revision zero. This revision of the protocol did not support + // server settings messages or window update messages. Flow control was not + // enforced. This version of the protocol did not even know about protocol + // revisions, so by default any ProtocolRevision field will be absent (and + // thus default to REVISION_ZERO). A server can identify a revision zero client + // because it will not send a "grpctunnel-negotiate" request header, and thus + // the server knows to not send a settings message and to not use flow control. + // A client can identify a revision zero server because it will not send a + // "grpctunnel-negotiate" response header, and thus the client knows not to + // expect a settings message and to not use flow control. + REVISION_ZERO = 0; + // Indicates revision one, which requires server-supplied settings before the + // tunnel can be used and also supports flow control. This flow control support + // eliminates chances of deadlock in streaming-heavy tunnel usage. + // + // This value will be provided via messages on the tunnel, even though it is + // technically redundant with the use of the "grpctunnel-negotiation" header. + // It will be used in the future to distinguish between this protocol revision + // and later protocol revisions. + REVISION_ONE = 1; +} diff --git a/reverse_server.go b/reverse_server.go index 45ed4eb..ec0f07d 100644 --- a/reverse_server.go +++ b/reverse_server.go @@ -30,6 +30,7 @@ const ( // method to actually create a reverse tunnel and handle requests. type ReverseTunnelServer struct { stub tunnelpb.TunnelServiceClient + opts tunnelOpts handlers grpchan.HandlerMap mu sync.Mutex @@ -40,12 +41,16 @@ type ReverseTunnelServer struct { // NewReverseTunnelServer creates a new server that uses the given stub to // create reverse tunnels. -func NewReverseTunnelServer(stub tunnelpb.TunnelServiceClient) *ReverseTunnelServer { - return &ReverseTunnelServer{ +func NewReverseTunnelServer(stub tunnelpb.TunnelServiceClient, opts ...TunnelOption) *ReverseTunnelServer { + r := &ReverseTunnelServer{ stub: stub, handlers: grpchan.HandlerMap{}, instances: map[tunnelpb.TunnelService_OpenReverseTunnelClient]struct{}{}, } + for _, opt := range opts { + opt.apply(&r.opts) + } + return r } // RegisterService implements grpc.ServiceRegistrar. This allows you to use this @@ -82,19 +87,28 @@ func (s *ReverseTunnelServer) RegisterService(desc *grpc.ServiceDesc, srv interf // the gRPC server associated with the stub used to create this reverse tunnel // server. func (s *ReverseTunnelServer) Serve(ctx context.Context, opts ...grpc.CallOption) (started bool, err error) { + // TODO: validate options and maybe return an error + ctx = metadata.AppendToOutgoingContext(ctx, grpctunnelNegotiateKey, grpctunnelNegotiateVal) stream, err := s.stub.OpenReverseTunnel(ctx, opts...) if err != nil { return false, err } + respMD, err := stream.Header() + if err != nil { + return false, err + } + vals := respMD.Get(grpctunnelNegotiateKey) + clientAcceptsSettings := len(vals) > 0 && vals[0] == grpctunnelNegotiateVal + + // TODO: we don't have a way to access outgoing metadata that gets added by + // client interceptors that may be run by the stub. + reqMD, _ := metadata.FromOutgoingContext(ctx) stream = &threadSafeOpenReverseTunnelClient{TunnelService_OpenReverseTunnelClient: stream} if err := s.addInstance(stream); err != nil { return false, err } defer s.wg.Done() - // TODO: we don't have a way to access outgoing metadata that gets added by - // client interceptors that may be run by the stub. - md, _ := metadata.FromOutgoingContext(ctx) - err = serveTunnel(stream, md, s.handlers, s.isClosing) + err = serveTunnel(stream, reqMD, clientAcceptsSettings, &s.opts, s.handlers, s.isClosing) if err == context.Canceled && ctx.Err() == nil && s.isClosed() { // If we get back a cancelled error, but the given context is not // cancelled and this server is closed, then the cancellation was diff --git a/tunnel_client.go b/tunnel_client.go index ee805a4..6f0e56f 100644 --- a/tunnel_client.go +++ b/tunnel_client.go @@ -5,8 +5,10 @@ import ( "errors" "fmt" "io" + "math" "reflect" "sync" + "sync/atomic" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -19,25 +21,56 @@ import ( "github.com/jhump/grpctunnel/tunnelpb" ) -// NewChannel creates a new channel for issues RPCs. The returned channel -// implements [grpc.ClientConnInterface], so it can be used to create stubs -// and issue other RPCs, which are all carried over the given stream. -func NewChannel(stream tunnelpb.TunnelService_OpenTunnelClient) TunnelChannel { - stream = &threadSafeOpenTunnelClient{TunnelService_OpenTunnelClient: stream} - md, _ := metadata.FromOutgoingContext(stream.Context()) - return newTunnelChannel(stream, md, func(*tunnelChannel) { _ = stream.CloseSend() }) +// NewChannel creates a new pending channel that, once started, can be used +// for issuing RPCs. +func NewChannel(stub tunnelpb.TunnelServiceClient, opts ...TunnelOption) PendingChannel { + p := &pendingChannel{stub: stub} + for _, opt := range opts { + opt.apply(&p.opts) + } + return p } -func newReverseChannel(stream tunnelpb.TunnelService_OpenReverseTunnelServer, onClose func(*tunnelChannel)) *tunnelChannel { - stream = &threadSafeOpenReverseTunnelServer{TunnelService_OpenReverseTunnelServer: stream} - md, _ := metadata.FromIncomingContext(stream.Context()) - return newTunnelChannel(stream, md, onClose) +// PendingChannel is an un-started channel. Calling Start will establish the +// tunnel and returns a value that implements [grpc.ClientConnInterface], so it +// can be used to create stubs and issue other RPCs that are all carried over a +// single tunnel stream. +// +// The given context defines the lifetime of the stream and therefore of the +// channel; if the context times out or is cancelled, the channel will be closed. +type PendingChannel interface { + Start(ctx context.Context, opts ...grpc.CallOption) (TunnelChannel, error) } -type tunnelStreamClient interface { - Context() context.Context - Send(*tunnelpb.ClientToServer) error - Recv() (*tunnelpb.ServerToClient, error) +type pendingChannel struct { + stub tunnelpb.TunnelServiceClient + opts tunnelOpts +} + +func (p *pendingChannel) Start(ctx context.Context, opts ...grpc.CallOption) (TunnelChannel, error) { + // TODO: validate options and maybe return an error + ctx = metadata.AppendToOutgoingContext(ctx, grpctunnelNegotiateKey, grpctunnelNegotiateVal) + stream, err := p.stub.OpenTunnel(ctx, opts...) + if err != nil { + return nil, err + } + respMD, err := stream.Header() + if err != nil { + return nil, err + } + vals := respMD.Get(grpctunnelNegotiateKey) + serverSendsSettings := len(vals) > 0 && vals[0] == grpctunnelNegotiateVal + reqMD, _ := metadata.FromOutgoingContext(stream.Context()) + stream = &threadSafeOpenTunnelClient{TunnelService_OpenTunnelClient: stream} + return newTunnelChannel(stream, reqMD, serverSendsSettings, &p.opts, func(*tunnelChannel) { _ = stream.CloseSend() }), nil +} + +func newReverseChannel(stream tunnelpb.TunnelService_OpenReverseTunnelServer, opts *tunnelOpts, onClose func(*tunnelChannel)) *tunnelChannel { + md, _ := metadata.FromIncomingContext(stream.Context()) + vals := md.Get(grpctunnelNegotiateKey) + serverSendsSettings := len(vals) > 0 && vals[0] == grpctunnelNegotiateVal + stream = &threadSafeOpenReverseTunnelServer{TunnelService_OpenReverseTunnelServer: stream} + return newTunnelChannel(stream, md, serverSendsSettings, opts, onClose) } // TunnelChannel is a special gRPC connection that uses a gRPC stream (a tunnel) @@ -60,7 +93,7 @@ type TunnelChannel interface { // // For forward tunnels, this is a client context. So it will include // outgoing metadata for the request headers that were used to open the - // tunnel. For reverse tunnels, this is a server context. So that request + // tunnel. For reverse tunnels, this is a server context. So that request // metadata will be available as incoming metadata. Context() context.Context // Done returns a channel that can be used to await the channel closing. @@ -70,6 +103,12 @@ type TunnelChannel interface { Err() error } +type tunnelStreamClient interface { + Context() context.Context + Send(*tunnelpb.ClientToServer) error + Recv() (*tunnelpb.ServerToClient, error) +} + type threadSafeOpenTunnelClient struct { sendMu sync.Mutex recvMu sync.Mutex @@ -137,11 +176,17 @@ func (h *threadSafeOpenReverseTunnelServer) RecvMsg(msg interface{}) error { } type tunnelChannel struct { - stream tunnelStreamClient - tunnelMetadata metadata.MD - ctx context.Context - cancel context.CancelFunc - tearDown func(*tunnelChannel) + stream tunnelStreamClient + tunnelMetadata metadata.MD + serverSendsSettings bool + tunnelOpts *tunnelOpts + ctx context.Context + cancel context.CancelFunc + tearDown func(*tunnelChannel) + + awaitSettings chan struct{} + settings *tunnelpb.Settings + useRevision tunnelpb.ProtocolRevision mu sync.RWMutex streams map[int64]*tunnelClientStream @@ -153,17 +198,27 @@ type tunnelChannel struct { streamCreation sync.Mutex } -func newTunnelChannel(stream tunnelStreamClient, tunnelMetadata metadata.MD, tearDown func(*tunnelChannel)) *tunnelChannel { +func newTunnelChannel(stream tunnelStreamClient, tunnelMetadata metadata.MD, serverSendsSettings bool, opts *tunnelOpts, tearDown func(*tunnelChannel)) *tunnelChannel { ctx, cancel := context.WithCancel(stream.Context()) c := &tunnelChannel{ - stream: stream, - ctx: ctx, - tunnelMetadata: tunnelMetadata, - cancel: cancel, - tearDown: tearDown, - streams: map[int64]*tunnelClientStream{}, + stream: stream, + tunnelMetadata: tunnelMetadata, + serverSendsSettings: serverSendsSettings, + tunnelOpts: opts, + ctx: ctx, + cancel: cancel, + tearDown: tearDown, + streams: map[int64]*tunnelClientStream{}, + awaitSettings: make(chan struct{}), } go c.recvLoop() + + // make sure we've gotten settings from the server before we return + select { + case <-c.awaitSettings: + case <-ctx.Done(): + } + return c } @@ -213,15 +268,19 @@ func (c *tunnelChannel) Invoke(ctx context.Context, methodName string, req, resp rv := reflect.Indirect(reflect.ValueOf(resp)) extraResp := reflect.New(rv.Type()).Interface() extraErr := str.RecvMsg(extraResp) - switch extraErr { - case nil: - return status.Errorf(codes.Internal, "unary RPC returned >1 response message") - case io.EOF: - // this is what we want: nothing else in the stream + if extraErr == nil { + // Doh! + str.cancel() + extraErr = status.Errorf(codes.Internal, "unary RPC returned >1 response message") + } + // make sure to give thread-safe visibility to any trailers + // recorded via use of the grpc.Trailer call option. + str.Trailer() + + if errors.Is(extraErr, io.EOF) { return nil - default: - return err } + return extraErr } func (c *tunnelChannel) NewStream(ctx context.Context, desc *grpc.StreamDesc, methodName string, opts ...grpc.CallOption) (grpc.ClientStream, error) { @@ -243,8 +302,10 @@ func (c *tunnelChannel) newStream(ctx context.Context, clientStreams, serverStre StreamId: str.streamID, Frame: &tunnelpb.ClientToServer_NewStream{ NewStream: &tunnelpb.NewStream{ - MethodName: methodName, - RequestHeaders: toProto(md), + MethodName: methodName, + RequestHeaders: toProto(md), + ProtocolRevision: c.useRevision, + InitialWindowSize: initialWindowSize, }, }, }) @@ -256,7 +317,7 @@ func (c *tunnelChannel) newStream(ctx context.Context, clientStreams, serverStre // if context gets cancelled, make sure // we shut down the stream <-str.ctx.Done() - str.cancel(str.ctx.Err()) + str.cancelStream(str.ctx.Err()) }() return str, nil } @@ -327,13 +388,12 @@ func (c *tunnelChannel) allocateStream(ctx context.Context, clientStreams, serve } } - ch := make(chan tunnelpb.ServerToClientFrame, 1) ctx, cncl := context.WithCancel(ctx) ctx = context.WithValue(ctx, tunnelMetadataOutgoingContextKey{}, c.tunnelMetadata) ctx = context.WithValue(ctx, tunnelChannelContextKey{}, c) str := &tunnelClientStream{ ctx: ctx, - cncl: cncl, + cancel: cncl, ch: c, streamID: streamID, method: methodName, @@ -342,23 +402,109 @@ func (c *tunnelChannel) allocateStream(ctx context.Context, clientStreams, serve trailersTargets: tlrs, isClientStream: clientStreams, isServerStream: serverStreams, - ingestChan: ch, - readChan: ch, gotHeadersSignal: make(chan struct{}), doneSignal: make(chan struct{}), } + sendData := func(data []byte, totalSize uint32, first bool) error { + if first { + return c.stream.Send(&tunnelpb.ClientToServer{ + StreamId: streamID, + Frame: &tunnelpb.ClientToServer_RequestMessage{ + RequestMessage: &tunnelpb.MessageData{ + Size: totalSize, + Data: data, + }, + }, + }) + } + return c.stream.Send(&tunnelpb.ClientToServer{ + StreamId: streamID, + Frame: &tunnelpb.ClientToServer_MoreRequestData{ + MoreRequestData: data, + }, + }) + } + if c.useRevision == tunnelpb.ProtocolRevision_REVISION_ZERO { + str.sender = newSenderWithoutFlowControl(sendData) + str.receiver = newReceiverWithoutFlowControl[tunnelpb.ServerToClientFrame](ctx) + } else { + str.sender = newSender(ctx, c.settings.InitialWindowSize, sendData) + str.receiver = newReceiver( + func(frame tunnelpb.ServerToClientFrame) uint { + switch frame := frame.(type) { + case *tunnelpb.ServerToClient_ResponseMessage: + return uint(len(frame.ResponseMessage.Data)) + case *tunnelpb.ServerToClient_MoreResponseData: + return uint(len(frame.MoreResponseData)) + default: + return 0 + } + }, + func(windowUpdate uint32) { + if str.loadDone() != nil { + // don't bother with window updates; no more data coming + return + } + _ = c.stream.Send(&tunnelpb.ClientToServer{ + StreamId: streamID, + Frame: &tunnelpb.ClientToServer_WindowUpdate{ + WindowUpdate: windowUpdate, + }, + }) + }, + initialWindowSize, + ) + } + c.streams[streamID] = str return str, md, nil } func (c *tunnelChannel) recvLoop() { + if c.serverSendsSettings { + in, err := c.stream.Recv() + if err != nil { + c.close(fmt.Errorf("failed to read settings from server: %w", err)) + return + } + if in.StreamId != -1 { + c.close(fmt.Errorf("protocol error: settings frame had bad stream ID (%d)", in.StreamId)) + return + } + settings, ok := in.Frame.(*tunnelpb.ServerToClient_Settings) + if !ok { + c.close(fmt.Errorf("protocol error: first frame was not settings (instead was %T)", in.Frame)) + return + } + supportedRevisions := c.tunnelOpts.supportedRevisions() + var supported bool + for _, rev := range settings.Settings.SupportedProtocolRevisions { + switch { + case inSlice(rev, supportedRevisions): + if rev > c.useRevision { + // use highest version that both server and client supports + c.useRevision = rev + } + supported = true + } + } + if !supported { + c.close(fmt.Errorf("protocol error: server support revisions %v, but client supports revisions %v", + settings.Settings.SupportedProtocolRevisions, supportedRevisions)) + return + } + c.settings = settings.Settings + } + close(c.awaitSettings) + for { in, err := c.stream.Recv() if err != nil { c.close(err) return } + str, err := c.getStream(in.StreamId) if err != nil { c.close(err) @@ -368,6 +514,15 @@ func (c *tunnelChannel) recvLoop() { } } +func inSlice[S ~[]T, T comparable](find T, slice S) bool { + for _, elem := range slice { + if elem == find { + return true + } + } + return false +} + func (c *tunnelChannel) getStream(streamID int64) (*tunnelClientStream, error) { c.mu.RLock() defer c.mu.RUnlock() @@ -413,7 +568,7 @@ func (c *tunnelChannel) close(err error) bool { } c.err = err for _, st := range c.streams { - st.cncl() + st.cancel() } c.streams = nil return true @@ -421,7 +576,7 @@ func (c *tunnelChannel) close(err error) bool { type tunnelClientStream struct { ctx context.Context - cncl context.CancelFunc + cancel context.CancelFunc ch *tunnelChannel streamID int64 method string @@ -433,20 +588,21 @@ type tunnelClientStream struct { isClientStream bool isServerStream bool - // for "ingesting" frames into channel, from receive loop - ingestMu sync.Mutex - ingestChan chan<- tunnelpb.ServerToClientFrame + sender sender + receiver receiver[tunnelpb.ServerToClientFrame] + done atomic.Pointer[errHolder] + + // for processing metadata frames, from receive loop + metaMu sync.Mutex gotHeaders bool gotHeadersSignal chan struct{} headers metadata.MD - done error doneSignal chan struct{} trailers metadata.MD // for reading frames from channel, to read message data - readMu sync.Mutex - readChan <-chan tunnelpb.ServerToClientFrame - readErr error + readMu sync.Mutex + readErr error // for sending frames to server writeMu sync.Mutex @@ -493,7 +649,7 @@ func (st *tunnelClientStream) CloseSend() error { select { case <-st.doneSignal: - return st.done + return st.loadDone() default: // don't block since we are holding writeMu } @@ -510,6 +666,13 @@ func (st *tunnelClientStream) CloseSend() error { }) } +func (st *tunnelClientStream) loadDone() error { + if val := st.done.Load(); val != nil { + return val.error + } + return nil +} + func (st *tunnelClientStream) Context() context.Context { return st.ctx } @@ -528,57 +691,18 @@ func (st *tunnelClientStream) SendMsg(m interface{}) error { if err != nil { return err } - - i := 0 - for { - if err := st.err(); err != nil { - return io.EOF - } - - chunk := b - if len(b) > maxChunkSize { - chunk = b[:maxChunkSize] - } - - if i == 0 { - err = st.stream.Send(&tunnelpb.ClientToServer{ - StreamId: st.streamID, - Frame: &tunnelpb.ClientToServer_RequestMessage{ - RequestMessage: &tunnelpb.MessageData{ - Size: int32(len(b)), - Data: chunk, - }, - }, - }) - } else { - err = st.stream.Send(&tunnelpb.ClientToServer{ - StreamId: st.streamID, - Frame: &tunnelpb.ClientToServer_MoreRequestData{ - MoreRequestData: chunk, - }, - }) - } - - if err != nil { - return err - } - - if len(b) <= maxChunkSize { - break - } - - b = b[maxChunkSize:] - i++ + if int64(len(b)) > math.MaxUint32 { + return status.Errorf(codes.ResourceExhausted, "serialized message is too large: %d bytes > maximum %d bytes", len(b), math.MaxUint32) } - return nil + return st.sender.send(b) } func (st *tunnelClientStream) RecvMsg(m interface{}) error { data, ok, err := st.readMsg() if err != nil { if !ok { - st.cancel(err) + st.cancelStream(err) } return err } @@ -622,17 +746,15 @@ func (st *tunnelClientStream) readMsgLocked() (data []byte, ok bool, err error) msgLen := -1 var b []byte for { - in, ok := <-st.readChan + in, ok := st.receiver.dequeue() if !ok { - // don't need lock to read st.done; observing - // input channel close provides safe visibility - return nil, true, st.done + return nil, true, st.loadDone() } switch in := in.(type) { case *tunnelpb.ServerToClient_ResponseMessage: if msgLen != -1 { - return nil, false, status.Errorf(codes.Internal, "server sent redundant response message envelope") + return nil, false, status.Errorf(codes.Internal, "server sent response message envelope before previous message finished (%d/%d)", len(b), msgLen) } msgLen = int(in.ResponseMessage.Size) b = in.ResponseMessage.Data @@ -661,15 +783,6 @@ func (st *tunnelClientStream) readMsgLocked() (data []byte, ok bool, err error) } } -func (st *tunnelClientStream) err() error { - select { - case <-st.doneSignal: - return st.done - default: - return st.ctx.Err() - } -} - func (st *tunnelClientStream) acceptServerFrame(frame tunnelpb.ServerToClientFrame) { if st == nil { // can happen if client decided that the stream ID was recently used @@ -680,9 +793,12 @@ func (st *tunnelClientStream) acceptServerFrame(frame tunnelpb.ServerToClientFra } switch frame := frame.(type) { + case *tunnelpb.ServerToClient_Settings: + st.finishStream(errors.New("protocol error: unexpected settings frame"), nil) + case *tunnelpb.ServerToClient_ResponseHeaders: - st.ingestMu.Lock() - defer st.ingestMu.Unlock() + st.metaMu.Lock() + defer st.metaMu.Unlock() if st.gotHeaders { // TODO: cancel RPC and fail locally with internal error? return @@ -693,51 +809,62 @@ func (st *tunnelClientStream) acceptServerFrame(frame tunnelpb.ServerToClientFra *hdrs = st.headers } close(st.gotHeadersSignal) - return case *tunnelpb.ServerToClient_CloseStream: trailers := fromProto(frame.CloseStream.ResponseTrailers) err := status.FromProto(frame.CloseStream.Status).Err() st.finishStream(err, trailers) - } - st.ingestMu.Lock() - defer st.ingestMu.Unlock() + case *tunnelpb.ServerToClient_WindowUpdate: + st.sender.updateWindow(frame.WindowUpdate) - if st.done != nil { - return - } + case nil: + st.finishStream(errors.New("protocol error: unrecognized frame type"), nil) - select { - case st.ingestChan <- frame: - case <-st.ctx.Done(): + default: + if err := st.receiver.accept(frame); err != nil { + st.finishStream(err, nil) + } } } -func (st *tunnelClientStream) cancel(err error) { - st.finishStream(err, nil) - // let server know - st.writeMu.Lock() - defer st.writeMu.Unlock() - _ = st.stream.Send(&tunnelpb.ClientToServer{ - StreamId: st.streamID, - Frame: &tunnelpb.ClientToServer_Cancel{ - Cancel: &emptypb.Empty{}, - }, - }) +func (st *tunnelClientStream) cancelStream(err error) { + if !st.finishStream(err, nil) { + // stream already closed + return + } + st.receiver.cancel() + // Let server know, too. + go func() { + _ = st.stream.Send(&tunnelpb.ClientToServer{ + StreamId: st.streamID, + Frame: &tunnelpb.ClientToServer_Cancel{ + Cancel: &emptypb.Empty{}, + }, + }) + }() } -func (st *tunnelClientStream) finishStream(err error, trailers metadata.MD) { +func (st *tunnelClientStream) finishStream(err error, trailers metadata.MD) bool { + switch err { + case nil: + err = io.EOF + case context.DeadlineExceeded: + err = status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + err = status.Error(codes.Canceled, err.Error()) + } + if !st.done.CompareAndSwap(nil, &errHolder{err}) { + // done already set? then RPC already finished + return false + } + defer st.cancel() st.ch.removeStream(st.streamID) - defer st.cncl() + st.receiver.close() - st.ingestMu.Lock() - defer st.ingestMu.Unlock() + st.metaMu.Lock() + defer st.metaMu.Unlock() - if st.done != nil { - // RPC already finished! just ignore... - return - } st.trailers = trailers for _, tlrs := range st.trailersTargets { *tlrs = trailers @@ -746,16 +873,7 @@ func (st *tunnelClientStream) finishStream(err error, trailers metadata.MD) { st.gotHeaders = true close(st.gotHeadersSignal) } - switch err { - case nil: - err = io.EOF - case context.DeadlineExceeded: - err = status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - err = status.Error(codes.Canceled, err.Error()) - } - st.done = err - - close(st.ingestChan) close(st.doneSignal) + + return true } diff --git a/tunnel_server.go b/tunnel_server.go index 0659f28..d0f201b 100644 --- a/tunnel_server.go +++ b/tunnel_server.go @@ -5,8 +5,12 @@ import ( "errors" "fmt" "io" + "math" + "strconv" "strings" "sync" + "sync/atomic" + "time" "github.com/fullstorydev/grpchan" "google.golang.org/grpc" @@ -18,15 +22,15 @@ import ( "github.com/jhump/grpctunnel/tunnelpb" ) -const maxChunkSize = 16384 - -func serveTunnel(stream tunnelStreamServer, tunnelMetadata metadata.MD, handlers grpchan.HandlerMap, isClosing func() bool) error { +func serveTunnel(stream tunnelStreamServer, tunnelMetadata metadata.MD, clientAcceptsSettings bool, opts *tunnelOpts, handlers grpchan.HandlerMap, isClosing func() bool) error { svr := &tunnelServer{ - stream: stream, - services: handlers, - isClosing: isClosing, - streams: map[int64]*tunnelServerStream{}, - lastSeen: -1, + stream: stream, + services: handlers, + clientAcceptsSettings: clientAcceptsSettings, + tunnelOpts: opts, + isClosing: isClosing, + streams: map[int64]*tunnelServerStream{}, + lastSeen: -1, } return svr.serve(tunnelMetadata) } @@ -38,9 +42,11 @@ type tunnelStreamServer interface { } type tunnelServer struct { - stream tunnelStreamServer - services grpchan.HandlerMap - isClosing func() bool + stream tunnelStreamServer + services grpchan.HandlerMap + clientAcceptsSettings bool + tunnelOpts *tunnelOpts + isClosing func() bool mu sync.RWMutex streams map[int64]*tunnelServerStream @@ -48,9 +54,24 @@ type tunnelServer struct { } func (s *tunnelServer) serve(tunnelMetadata metadata.MD) error { + if s.clientAcceptsSettings { + go func() { + _ = s.stream.Send(&tunnelpb.ServerToClient{ + StreamId: -1, + Frame: &tunnelpb.ServerToClient_Settings{ + Settings: &tunnelpb.Settings{ + InitialWindowSize: initialWindowSize, + SupportedProtocolRevisions: s.tunnelOpts.supportedRevisions(), + }, + }, + }) + }() + } + ctx := context.WithValue(s.stream.Context(), tunnelMetadataIncomingContextKey{}, tunnelMetadata) ctx, cancel := context.WithCancel(ctx) defer cancel() + for { in, err := s.stream.Recv() if err != nil { @@ -65,15 +86,19 @@ func (s *tunnelServer) serve(tunnelMetadata metadata.MD) error { if !ok { return err } - st, _ := status.FromError(err) - _ = s.stream.Send(&tunnelpb.ServerToClient{ - StreamId: in.StreamId, - Frame: &tunnelpb.ServerToClient_CloseStream{ - CloseStream: &tunnelpb.CloseStream{ - Status: st.Proto(), + // we don't want to stall the receive loop as that could lead to + // flow control deadlock, so send on a different goroutine + go func() { + st, _ := status.FromError(err) + _ = s.stream.Send(&tunnelpb.ServerToClient{ + StreamId: in.StreamId, + Frame: &tunnelpb.ServerToClient_CloseStream{ + CloseStream: &tunnelpb.CloseStream{ + Status: st.Proto(), + }, }, - }, - }) + }) + }() } continue } @@ -86,11 +111,23 @@ func (s *tunnelServer) serve(tunnelMetadata metadata.MD) error { } } +// createStream creates a new stream with the given ID. It returns false if this frame represents +// a protocol error, in which case the tunnel channel will be aborted with the returned error. If +// it returns true, then the frame represents a valid protocol frame. If it returns true, but also +// a non-nil error, the stream will be immediately closed with the returned error, but the tunnel +// itself is still valid for subsequent RPCs. This will be the case, for example, if the requested +// method name is not implemented by the server. func (s *tunnelServer) createStream(ctx context.Context, streamID int64, frame *tunnelpb.NewStream) (bool, error) { if s.isClosing() { return true, status.Errorf(codes.Unavailable, "server is shutting down") } + if frame.ProtocolRevision != tunnelpb.ProtocolRevision_REVISION_ZERO && + frame.ProtocolRevision != tunnelpb.ProtocolRevision_REVISION_ONE { + return true, status.Errorf(codes.Unavailable, "server does not support protocol revision %d", frame.ProtocolRevision) + } + noFlowControl := frame.ProtocolRevision == tunnelpb.ProtocolRevision_REVISION_ZERO + s.mu.Lock() defer s.mu.Unlock() @@ -124,26 +161,113 @@ func (s *tunnelServer) createStream(ctx context.Context, streamID int64, frame * if md == nil { return true, status.Errorf(codes.Unimplemented, "%s not implemented", frame.MethodName) } - ctx = metadata.NewIncomingContext(ctx, fromProto(frame.RequestHeaders)) - - ch := make(chan tunnelpb.ClientToServerFrame, 1) + headers := fromProto(frame.RequestHeaders) + ctx = metadata.NewIncomingContext(ctx, headers) + var cancel context.CancelFunc + if timeout, ok := timeoutFromHeaders(headers); ok { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } str := &tunnelServerStream{ ctx: ctx, + cancel: cancel, svr: s, streamID: streamID, method: frame.MethodName, stream: s.stream, isClientStream: isClientStream, isServerStream: isServerStream, - readChan: ch, - ingestChan: ch, } + sendFunc := func(data []byte, totalSize uint32, first bool) error { + if first { + return s.stream.Send(&tunnelpb.ServerToClient{ + StreamId: streamID, + Frame: &tunnelpb.ServerToClient_ResponseMessage{ + ResponseMessage: &tunnelpb.MessageData{ + Size: totalSize, + Data: data, + }, + }, + }) + } + return s.stream.Send(&tunnelpb.ServerToClient{ + StreamId: streamID, + Frame: &tunnelpb.ServerToClient_MoreResponseData{ + MoreResponseData: data, + }, + }) + } + if noFlowControl { + str.sender = newSenderWithoutFlowControl(sendFunc) + str.receiver = newReceiverWithoutFlowControl[tunnelpb.ClientToServerFrame](ctx) + } else { + str.sender = newSender(ctx, frame.InitialWindowSize, sendFunc) + str.receiver = newReceiver( + func(m tunnelpb.ClientToServerFrame) uint { + switch m := m.(type) { + case *tunnelpb.ClientToServer_RequestMessage: + return uint(len(m.RequestMessage.Data)) + case *tunnelpb.ClientToServer_MoreRequestData: + return uint(len(m.MoreRequestData)) + default: + return 0 + } + }, + func(windowUpdate uint32) { + if str.loadHalfClosed() != nil { + // stream already half-closed, no more data coming + return + } + _ = s.stream.Send(&tunnelpb.ServerToClient{ + StreamId: streamID, + Frame: &tunnelpb.ServerToClient_WindowUpdate{ + WindowUpdate: windowUpdate, + }, + }) + }, + initialWindowSize, + ) + } + s.streams[streamID] = str str.ctx = grpc.NewContextWithServerTransportStream(str.ctx, (*tunnelServerTransportStream)(str)) go str.serveStream(md, svc) return true, nil } +func timeoutFromHeaders(headers metadata.MD) (time.Duration, bool) { + vals := headers.Get("grpc-timeout") + if len(vals) == 0 { + return 0, false + } + timeoutStr := vals[len(vals)-1] + if len(timeoutStr) < 2 { + return 0, false + } + timeout, err := strconv.Atoi(timeoutStr[:len(timeoutStr)-1]) + if err != nil { + return 0, false + } + duration := time.Duration(timeout) + switch timeoutStr[len(timeoutStr)-1] { + case 'H': + return duration * time.Hour, true + case 'M': + return duration * time.Minute, true + case 'S': + return duration * time.Second, true + case 'm': + return duration * time.Millisecond, true + case 'u': + return duration * time.Microsecond, true + case 'n': + return duration * time.Nanosecond, true + default: + return 0, false + } +} + func (s *tunnelServer) getStream(streamID int64) (*tunnelServerStream, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -181,8 +305,13 @@ func findMethod(sd *grpc.ServiceDesc, method string) interface{} { return nil } +type errHolder struct { + error +} + type tunnelServerStream struct { ctx context.Context + cancel context.CancelFunc svr *tunnelServer streamID int64 method string @@ -191,15 +320,13 @@ type tunnelServerStream struct { isClientStream bool isServerStream bool - // for "ingesting" frames into channel, from receive loop - ingestMu sync.Mutex - ingestChan chan<- tunnelpb.ClientToServerFrame - halfClosed error + sender sender + receiver receiver[tunnelpb.ClientToServerFrame] + halfClosed atomic.Pointer[errHolder] // for reading frames from channel, to read message data - readMu sync.Mutex - readChan <-chan tunnelpb.ClientToServerFrame - readErr error + readMu sync.Mutex + readErr error // for sending frames to client writeMu sync.Mutex @@ -219,27 +346,24 @@ func (st *tunnelServerStream) acceptClientFrame(frame tunnelpb.ClientToServerFra return } - switch frame.(type) { + switch frame := frame.(type) { + // NewStream handled in caller case *tunnelpb.ClientToServer_HalfClose: st.halfClose(io.EOF) - return case *tunnelpb.ClientToServer_Cancel: st.finishStream(context.Canceled) - return - } - st.ingestMu.Lock() - defer st.ingestMu.Unlock() + case *tunnelpb.ClientToServer_WindowUpdate: + st.sender.updateWindow(frame.WindowUpdate) - if st.halfClosed != nil { - // stream is half closed -- ignore subsequent messages - return - } + case nil: + st.finishStream(errors.New("protocol error: unrecognized frame type")) - select { - case st.ingestChan <- frame: - case <-st.ctx.Done(): + default: + if err := st.receiver.accept(frame); err != nil { + st.finishStream(err) + } } } @@ -313,6 +437,13 @@ func (st *tunnelServerStream) setTrailer(md metadata.MD) error { return nil } +func (st *tunnelServerStream) loadHalfClosed() error { + if val := st.halfClosed.Load(); val != nil { + return val.error + } + return nil +} + func (st *tunnelServerStream) Context() context.Context { return st.ctx } @@ -337,50 +468,11 @@ func (st *tunnelServerStream) SendMsg(m interface{}) error { if err != nil { return err } - - i := 0 - for { - if err := st.ctx.Err(); err != nil { - return err - } - - chunk := b - if len(b) > maxChunkSize { - chunk = b[:maxChunkSize] - } - - if i == 0 { - err = st.stream.Send(&tunnelpb.ServerToClient{ - StreamId: st.streamID, - Frame: &tunnelpb.ServerToClient_ResponseMessage{ - ResponseMessage: &tunnelpb.MessageData{ - Size: int32(len(b)), - Data: chunk, - }, - }, - }) - } else { - err = st.stream.Send(&tunnelpb.ServerToClient{ - StreamId: st.streamID, - Frame: &tunnelpb.ServerToClient_MoreResponseData{ - MoreResponseData: chunk, - }, - }) - } - - if err != nil { - return err - } - - if len(b) <= maxChunkSize { - break - } - - b = b[maxChunkSize:] - i++ + if int64(len(b)) > math.MaxUint32 { + return status.Errorf(codes.ResourceExhausted, "serialized message is too large: %d bytes > maximum %d bytes", len(b), math.MaxUint32) } - return nil + return st.sender.send(b) } func (st *tunnelServerStream) RecvMsg(m interface{}) error { @@ -436,48 +528,43 @@ func (st *tunnelServerStream) readMsgLocked() (data []byte, ok bool, err error) return nil, true, err } - // otherwise, try to read request data, but interrupt if - // stream is canceled or half-closed - select { - case <-st.ctx.Done(): - return nil, true, st.ctx.Err() - - case in, ok := <-st.readChan: - if !ok { - // don't need lock to read st.halfClosed; observing - // input channel close provides safe visibility - return nil, true, st.halfClosed + in, ok := st.receiver.dequeue() + if !ok { + var err error + if halfClosedErr := st.halfClosed.Load(); halfClosedErr != nil { + err = halfClosedErr.error } + return nil, true, err + } - switch in := in.(type) { - case *tunnelpb.ClientToServer_RequestMessage: - if msgLen != -1 { - return nil, false, status.Errorf(codes.InvalidArgument, "received redundant request message envelope") - } - msgLen = int(in.RequestMessage.Size) - b = in.RequestMessage.Data - if len(b) > msgLen { - return nil, false, status.Errorf(codes.InvalidArgument, "received more data than indicated by request message envelope") - } - if len(b) == msgLen { - return b, true, nil - } - - case *tunnelpb.ClientToServer_MoreRequestData: - if msgLen == -1 { - return nil, false, status.Errorf(codes.InvalidArgument, "never received envelope for request message") - } - b = append(b, in.MoreRequestData...) - if len(b) > msgLen { - return nil, false, status.Errorf(codes.InvalidArgument, "received more data than indicated by request message envelope") - } - if len(b) == msgLen { - return b, true, nil - } + switch in := in.(type) { + case *tunnelpb.ClientToServer_RequestMessage: + if msgLen != -1 { + return nil, false, status.Errorf(codes.InvalidArgument, "received request message envelope before previous message finished (%d/%d)", len(b), msgLen) + } + msgLen = int(in.RequestMessage.Size) + b = in.RequestMessage.Data + if len(b) > msgLen { + return nil, false, status.Errorf(codes.InvalidArgument, "received more data than indicated by request message envelope") + } + if len(b) == msgLen { + return b, true, nil + } - default: - return nil, false, status.Errorf(codes.InvalidArgument, "unrecognized frame type: %T", in) + case *tunnelpb.ClientToServer_MoreRequestData: + if msgLen == -1 { + return nil, false, status.Errorf(codes.InvalidArgument, "never received envelope for request message") + } + b = append(b, in.MoreRequestData...) + if len(b) > msgLen { + return nil, false, status.Errorf(codes.InvalidArgument, "received more data than indicated by request message envelope") } + if len(b) == msgLen { + return b, true, nil + } + + default: + return nil, false, status.Errorf(codes.InvalidArgument, "unrecognized frame type: %T", in) } } } @@ -492,6 +579,12 @@ func (st *tunnelServerStream) serveStream(md interface{}, srv interface{}) { } st.finishStream(err) }() + go func() { + // In case context closes asynchronously via timeout, + // we need to make sure receiver is closed promptly. + <-st.ctx.Done() + st.receiver.cancel() + }() switch md := md.(type) { case *grpc.MethodDesc: @@ -510,8 +603,8 @@ func (st *tunnelServerStream) serveStream(md interface{}, srv interface{}) { } func (st *tunnelServerStream) finishStream(err error) { + st.cancel() st.svr.removeStream(st.streamID) - st.halfClose(err) st.writeMu.Lock() @@ -521,39 +614,56 @@ func (st *tunnelServerStream) finishStream(err error) { return } - if !st.sentHeaders { - _ = st.sendHeadersLocked() - } - stat, _ := status.FromError(err) - _ = st.stream.Send(&tunnelpb.ServerToClient{ - StreamId: st.streamID, - Frame: &tunnelpb.ServerToClient_CloseStream{ - CloseStream: &tunnelpb.CloseStream{ - Status: stat.Proto(), - ResponseTrailers: toProto(st.trailers), + + headers := st.headers + sendHeaders := !st.sentHeaders + if sendHeaders { + st.sentHeaders = true + st.headers = nil + } + // we don't want to block here because we can be called from the + // receive loop and we're also holding a mutex, so send the close + // message from a different goroutine + trailers := st.trailers + go func() { + if sendHeaders { + _ = st.stream.Send(&tunnelpb.ServerToClient{ + StreamId: st.streamID, + Frame: &tunnelpb.ServerToClient_ResponseHeaders{ + ResponseHeaders: toProto(headers), + }, + }) + } + _ = st.stream.Send(&tunnelpb.ServerToClient{ + StreamId: st.streamID, + Frame: &tunnelpb.ServerToClient_CloseStream{ + CloseStream: &tunnelpb.CloseStream{ + Status: stat.Proto(), + ResponseTrailers: toProto(trailers), + }, }, - }, - }) + }) + }() + + if sendHeaders { + st.sentHeaders = true + st.headers = nil + } st.closed = true st.trailers = nil } func (st *tunnelServerStream) halfClose(err error) { - st.ingestMu.Lock() - defer st.ingestMu.Unlock() - - if st.halfClosed != nil { - // already closed - return - } - if err == nil { err = io.EOF } - st.halfClosed = err - close(st.ingestChan) + if !st.halfClosed.CompareAndSwap(nil, &errHolder{err}) { + // already closed + return + } + st.receiver.close() } type tunnelServerTransportStream tunnelServerStream diff --git a/tunnel_test.go b/tunnel_test.go index c969528..795e155 100644 --- a/tunnel_test.go +++ b/tunnel_test.go @@ -1,6 +1,7 @@ package grpctunnel import ( + "bytes" "context" "net" "runtime" @@ -12,6 +13,7 @@ import ( "github.com/fullstorydev/grpchan/grpchantesting" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" @@ -23,86 +25,90 @@ func TestTunnelServiceHandler(t *testing.T) { // Basic tests of the tunnel service as a gRPC channel var svr grpchantesting.TestServer - - ts := NewTunnelServiceHandler(TunnelServiceHandlerOptions{ - AffinityKey: func(t TunnelChannel) any { - md, _ := metadata.FromIncomingContext(t.Context()) - vals := md.Get("nesting-level") - if len(vals) == 0 { - return "" - } - return vals[0] + flowControlCases := []struct { + name string + disabled bool + }{ + { + name: "with-flow-control", + disabled: false, }, - }) - grpchantesting.RegisterTestServiceServer(ts, &svr) - // recursive: tunnels can be run on top of tunnels - // (not realistic, but fun exercise to verify soundness of protocol) - tunnelpb.RegisterTunnelServiceServer(ts, ts.Service()) - - l, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err, "failed to listen") - gs := grpc.NewServer() - tunnelpb.RegisterTunnelServiceServer(gs, ts.Service()) - serveDone := make(chan struct{}) - go func() { - defer close(serveDone) - assert.NoError(t, gs.Serve(l), "error from grpc server") - }() - defer func() { - gs.Stop() - <-serveDone - }() - - cc, err := grpc.Dial(l.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(t, err, "failed top create client") - defer func() { - err := cc.Close() - require.NoError(t, err, "failed to close client conn") - }() - - cli := tunnelpb.NewTunnelServiceClient(cc) + { + name: "without-flow-control", + disabled: true, + }, + } + for _, flowControlCase := range flowControlCases { + t.Run(flowControlCase.name, func(t *testing.T) { + cli, ts := setupServer(t, &svr, flowControlCase.disabled) + runTests(context.Background(), t, modeRunNested, cli, ts, &svr, + func(_ context.Context, t *testing.T, ch grpc.ClientConnInterface) { + grpchantesting.RunChannelTestCases(t, ch, true) + }) + }) + } +} - // Make sure any goroutines used by the client and server created above have started. That - // way, we don't incorrectly think they are leaked goroutines. - time.Sleep(500 * time.Millisecond) +func TestTunnelServiceHandler_Deadlocks(t *testing.T) { + var svr grpchantesting.TestServer + cli, ts := setupServer(t, &svr, false) - runTests(context.Background(), t, false, cli, ts, &svr) + runTests(context.Background(), t, modeRunNested, cli, ts, &svr, + func(ctx context.Context, t *testing.T, ch grpc.ClientConnInterface) { + runDeadlockTests(ctx, t, ch) + }) } -func runTests(ctx context.Context, t *testing.T, nested bool, cli tunnelpb.TunnelServiceClient, ts *TunnelServiceHandler, testSvr *grpchantesting.TestServer) { - if nested { - ctx = metadata.AppendToOutgoingContext(ctx, "nesting-level", "1") - } +type nestingMode int +const ( + modeDoNotRunNested = nestingMode(iota) + modeRunNested + modeIsNested +) + +func runTests( + ctx context.Context, + t *testing.T, + mode nestingMode, + cl tunnelpb.TunnelServiceClient, + ts *TunnelServiceHandler, + testSvr *grpchantesting.TestServer, + testFunc func(ctx context.Context, t *testing.T, ch grpc.ClientConnInterface), +) { prefix := "" - if nested { + if mode == modeIsNested { prefix = "nested-" + ctx = metadata.AppendToOutgoingContext(ctx, "nesting-level", "1") } + t.Run(prefix+"forward", func(t *testing.T) { checkForGoroutineLeak(t, func() { - tunnel, err := cli.OpenTunnel(ctx) + ch, err := NewChannel(cl).Start(ctx) require.NoError(t, err, "failed to open tunnel") - ch := NewChannel(tunnel) defer func() { ch.Close() <-ch.Done() assert.NoError(t, ch.Err(), "channel ended with error") }() - grpchantesting.RunChannelTestCases(t, ch, true) + testFunc(ctx, t, ch) - if !nested { + if mode == modeRunNested { // nested/recursive test - runTests(ch.Context(), t, true, tunnelpb.NewTunnelServiceClient(ch), ts, testSvr) + runTests(ch.Context(), t, modeIsNested, + tunnelpb.NewTunnelServiceClient(ch), + ts, testSvr, testFunc, + ) } }) }) t.Run(prefix+"reverse", func(t *testing.T) { checkForGoroutineLeak(t, func() { - revSvr := NewReverseTunnelServer(cli) - if !nested { + revSvr := NewReverseTunnelServer(cl) + if mode == modeRunNested { // we need this to run the nested/recursive tunnel test tunnelpb.RegisterTunnelServiceServer(revSvr, ts.Service()) } @@ -121,7 +127,7 @@ func runTests(ctx context.Context, t *testing.T, nested bool, cli tunnelpb.Tunne // make sure server has registered client, so we can issue RPCs to it var ch ReverseClientConnInterface - if nested { + if mode == modeIsNested { ch = ts.KeyAsChannel("1") } else { ch = ts.AsChannel() @@ -131,11 +137,15 @@ func runTests(ctx context.Context, t *testing.T, nested bool, cli tunnelpb.Tunne err := ch.WaitForReady(timedCtx) require.NoError(t, err, "reverse channel never became ready") - grpchantesting.RunChannelTestCases(t, ch, true) + testFunc(ctx, t, ch) - if !nested { + if mode == modeRunNested { // nested/recursive test - runTests(ctx, t, true, tunnelpb.NewTunnelServiceClient(ch), ts, testSvr) + runTests( + ctx, t, modeIsNested, + tunnelpb.NewTunnelServiceClient(ch), + ts, testSvr, testFunc, + ) } for i, rt := range ts.AllReverseTunnels() { @@ -145,13 +155,186 @@ func runTests(ctx context.Context, t *testing.T, nested bool, cli tunnelpb.Tunne }) } +func runDeadlockTests(ctx context.Context, t *testing.T, ch grpc.ClientConnInterface) { + stub := grpchantesting.NewTestServiceClient(ch) + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + slowOneDone := make(chan struct{}) + defer func() { + cancel() + <-slowOneDone + }() + slowCtx := ctx + go func() { + // the slow one + defer close(slowOneDone) + + stream, err := stub.BidiStream(slowCtx) + require.NoError(t, err) + for i := 0; i < 2; i++ { + err := stream.Send(&grpchantesting.Message{ + DelayMillis: 1000, + Payload: bytes.Repeat([]byte{0, 1, 2, 3}, 10_000), + }) + if err != nil { + require.Error(t, ctx.Err()) + break + } + } + }() + time.Sleep(100 * time.Millisecond) // make sure the slow one has had time to issue its RPC + + grp, ctx := errgroup.WithContext(ctx) + for i := 0; i < 10; i++ { + grp.Go(func() error { + // this should proceed just fine, regardless of the slow one + stream, err := stub.ClientStream(ctx) + if err != nil { + return err + } + for j := 0; j < 20; j++ { + err := stream.Send(&grpchantesting.Message{ + Payload: bytes.Repeat([]byte{0, 1, 2, 3}, 5_000), + }) + if err != nil { + return err + } + } + _, err = stream.CloseAndRecv() + return err + }) + } + err := grp.Wait() + require.NoError(t, err) +} + func TestTunnelServiceHandler_Concurrency(t *testing.T) { - // Basic tests of the tunnel service as a gRPC channel + flowControlCases := []struct { + name string + disabled bool + }{ + { + name: "with-flow-control", + disabled: false, + }, + { + name: "without-flow-control", + disabled: true, + }, + } + for _, flowControlCase := range flowControlCases { + t.Run(flowControlCase.name, func(t *testing.T) { + var svr grpchantesting.TestServer + tunnelCli, ts := setupServer(t, &svr, flowControlCase.disabled) - var svr grpchantesting.TestServer + forwardCh, err := NewChannel(tunnelCli).Start(context.Background()) + require.NoError(t, err) + defer func() { + forwardCh.Close() + <-forwardCh.Done() + require.NoError(t, forwardCh.Err()) + }() + + revSvr := NewReverseTunnelServer(tunnelCli) + grpchantesting.RegisterTestServiceServer(revSvr, &svr) + serveDone := make(chan struct{}) + go func() { + defer close(serveDone) + started, err := revSvr.Serve(context.Background()) + assert.True(t, started, "ReverseTunnelServer.Serve returned false") + assert.NoError(t, err, "ReverseTunnelServer.Serve returned error") + }() + defer func() { + revSvr.Stop() + <-serveDone + }() - ts := NewTunnelServiceHandler(TunnelServiceHandlerOptions{}) - grpchantesting.RegisterTestServiceServer(ts, &svr) + // make sure server has registered client, so we can issue RPCs to it + reverseCh := ts.AsChannel() + timedCtx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + err = reverseCh.WaitForReady(timedCtx) + require.NoError(t, err, "reverse channel never became ready") + + // Make sure any goroutines used by the client and server created above have started. That + // way, we don't incorrectly think they are leaked goroutines. + time.Sleep(200 * time.Millisecond) + + testCases := []struct { + name string + ch grpc.ClientConnInterface + }{ + { + name: "forward", + ch: forwardCh, + }, + { + name: "reverse", + ch: reverseCh, + }, + } + + for _, testCase := range testCases { + cli := grpchantesting.NewTestServiceClient(testCase.ch) + t.Run(testCase.name, func(t *testing.T) { + done := make(chan struct{}) + var count int32 + runOneThread := func() { + for { + select { + case <-done: + return + default: + } + _, err := cli.Unary(context.Background(), &grpchantesting.Message{}) + require.NoError(t, err) + atomic.AddInt32(&count, 1) + } + } + + // Ten goroutines all using the same tunnel, hoping to catch data races or + // other concurrency-related bugs. + checkForGoroutineLeak(t, func() { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runOneThread() + }() + } + // all threads sending concurrent requests for 3 seconds + time.Sleep(2 * time.Second) + close(done) + wg.Wait() + }) + + t.Logf("RPCs sent: %d", atomic.LoadInt32(&count)) + }) + } + }) + } +} + +// TODO: also need more tests around channel lifecycle, and ensuring it +// properly respects things like context cancellations, etc + +func setupServer(t *testing.T, svc grpchantesting.TestServiceServer, disableFlowControl bool) (tunnelpb.TunnelServiceClient, *TunnelServiceHandler) { + ts := NewTunnelServiceHandler(TunnelServiceHandlerOptions{ + AffinityKey: func(t TunnelChannel) any { + md, _ := metadata.FromIncomingContext(t.Context()) + vals := md.Get("nesting-level") + if len(vals) == 0 { + return "" + } + return vals[0] + }, + DisableFlowControl: disableFlowControl, + }) + grpchantesting.RegisterTestServiceServer(ts, svc) + // recursive: tunnels can be run on top of tunnels + // (not realistic, but fun exercise to verify soundness of implementation) + tunnelpb.RegisterTunnelServiceServer(ts, ts.Service()) l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err, "failed to listen") @@ -162,70 +345,25 @@ func TestTunnelServiceHandler_Concurrency(t *testing.T) { defer close(serveDone) assert.NoError(t, gs.Serve(l), "error from grpc server") }() - defer func() { + t.Cleanup(func() { gs.Stop() <-serveDone - }() + }) cc, err := grpc.Dial(l.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(t, err, "failed top create client") - defer func() { + require.NoError(t, err, "failed to create client") + t.Cleanup(func() { err := cc.Close() require.NoError(t, err, "failed to close client conn") - }() - - tc, err := tunnelpb.NewTunnelServiceClient(cc).OpenTunnel(context.Background()) - require.NoError(t, err) - ch := NewChannel(tc) - defer func() { - ch.Close() - <-ch.Done() - require.NoError(t, ch.Err()) - }() - cli := grpchantesting.NewTestServiceClient(ch) - - done := make(chan struct{}) - var count int32 - runOneThread := func() { - for { - select { - case <-done: - return - default: - } - _, err := cli.Unary(context.Background(), &grpchantesting.Message{}) - require.NoError(t, err) - atomic.AddInt32(&count, 1) - } - } + }) // Make sure any goroutines used by the client and server created above have started. That // way, we don't incorrectly think they are leaked goroutines. - time.Sleep(500 * time.Millisecond) - - // Ten goroutines all using the same tunnel, hoping to catch data races or - // other concurrency-related bugs. - checkForGoroutineLeak(t, func() { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - runOneThread() - }() - } - // all threads sending concurrent requests for 5 seconds - time.Sleep(5 * time.Second) - close(done) - wg.Wait() - }) + time.Sleep(200 * time.Millisecond) - t.Logf("RPCs sent: %d", atomic.LoadInt32(&count)) + return tunnelpb.NewTunnelServiceClient(cc), ts } -// TODO: also need more tests around channel lifecycle, and ensuring it -// properly respects things like context cancellations, etc - func checkForGoroutineLeak(t *testing.T, fn func()) { before := runtime.NumGoroutine() diff --git a/tunnelpb/tunnel.pb.go b/tunnelpb/tunnel.pb.go index 8b1fa56..61d789b 100644 --- a/tunnelpb/tunnel.pb.go +++ b/tunnelpb/tunnel.pb.go @@ -22,6 +22,70 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ProtocolRevision int32 + +const ( + // Indicates revision zero. This revision of the protocol did not support + // server settings messages or window update messages. Flow control was not + // enforced. This version of the protocol did not even know about protocol + // revisions, so by default any ProtocolRevision field will be absent (and + // thus default to REVISION_ZERO). A server can identify a revision zero client + // because it will not send a "grpctunnel-negotiate" request header, and thus + // the server knows to not send a settings message and to not use flow control. + // A client can identify a revision zero server because it will not send a + // "grpctunnel-negotiate" response header, and thus the client knows not to + // expect a settings message and to not use flow control. + ProtocolRevision_REVISION_ZERO ProtocolRevision = 0 + // Indicates revision one, which requires server-supplied settings before the + // tunnel can be used and also supports flow control. This flow control support + // eliminates chances of deadlock in streaming-heavy tunnel usage. + // + // This value will be provided via messages on the tunnel, even though it is + // technically redundant with the use of the "grpctunnel-negotiation" header. + // It will be used in the future to distinguish between this protocol revision + // and later protocol revisions. + ProtocolRevision_REVISION_ONE ProtocolRevision = 1 +) + +// Enum value maps for ProtocolRevision. +var ( + ProtocolRevision_name = map[int32]string{ + 0: "REVISION_ZERO", + 1: "REVISION_ONE", + } + ProtocolRevision_value = map[string]int32{ + "REVISION_ZERO": 0, + "REVISION_ONE": 1, + } +) + +func (x ProtocolRevision) Enum() *ProtocolRevision { + p := new(ProtocolRevision) + *p = x + return p +} + +func (x ProtocolRevision) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProtocolRevision) Descriptor() protoreflect.EnumDescriptor { + return file_grpctunnel_v1_tunnel_proto_enumTypes[0].Descriptor() +} + +func (ProtocolRevision) Type() protoreflect.EnumType { + return &file_grpctunnel_v1_tunnel_proto_enumTypes[0] +} + +func (x ProtocolRevision) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProtocolRevision.Descriptor instead. +func (ProtocolRevision) EnumDescriptor() ([]byte, []int) { + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{0} +} + // ClientToServer is the message a client sends to a server. // // For a single stream ID, the first such message must include the new_stream @@ -43,8 +107,8 @@ type ClientToServer struct { // The ID of the stream. Stream IDs must be used in increasing order and // cannot be re-used. Unlike in the HTTP/2 protocol, the stream ID is 64-bit // so overflow in a long-lived channel is excessively unlikely. (If the - // channel were used for a stream every nanosecond, it would take close to - // 300 years to exhaust every ID and reach an overflow situation.) + // channel were used for a new stream every nanosecond, it would take close + // to 300 years to exhaust every ID and reach an overflow situation.) StreamId int64 `protobuf:"varint,1,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` // Types that are assignable to Frame: // @@ -53,6 +117,7 @@ type ClientToServer struct { // *ClientToServer_MoreRequestData // *ClientToServer_HalfClose // *ClientToServer_Cancel + // *ClientToServer_WindowUpdate Frame isClientToServer_Frame `protobuf_oneof:"frame"` } @@ -137,13 +202,22 @@ func (x *ClientToServer) GetCancel() *emptypb.Empty { return nil } +func (x *ClientToServer) GetWindowUpdate() uint32 { + if x, ok := x.GetFrame().(*ClientToServer_WindowUpdate); ok { + return x.WindowUpdate + } + return 0 +} + type isClientToServer_Frame interface { isClientToServer_Frame() } type ClientToServer_NewStream struct { // Creates a new RPC stream, which includes request header metadata. The - // stream ID must not be an already active stream. + // stream ID must be greater than all previously-used stream IDs for this + // tunnel. It is expected to start at zero for the first stream on the + // tunnel and then one for the next, and so on. NewStream *NewStream `protobuf:"bytes,2,opt,name=new_stream,json=newStream,proto3,oneof"` } @@ -175,6 +249,13 @@ type ClientToServer_Cancel struct { Cancel *emptypb.Empty `protobuf:"bytes,6,opt,name=cancel,proto3,oneof"` } +type ClientToServer_WindowUpdate struct { + // Lets the peer know that data has been consumed, so it may be able + // to send more data, based on flow control window sizes. This is only + // used in revision one of the protocol. + WindowUpdate uint32 `protobuf:"varint,7,opt,name=window_update,json=windowUpdate,proto3,oneof"` +} + func (*ClientToServer_NewStream) isClientToServer_Frame() {} func (*ClientToServer_RequestMessage) isClientToServer_Frame() {} @@ -185,6 +266,8 @@ func (*ClientToServer_HalfClose) isClientToServer_Frame() {} func (*ClientToServer_Cancel) isClientToServer_Frame() {} +func (*ClientToServer_WindowUpdate) isClientToServer_Frame() {} + // ServerToClient is the message a server sends to a client. // // For a single stream ID, the first such message should include the @@ -201,15 +284,21 @@ type ServerToClient struct { // The ID of the stream. Stream IDs are defined by the client and should be // used in monotonically increasing order. They cannot be re-used. Unlike // HTTP/2, the ID is 64-bit, so overflow/re-use should not be an issue. (If - // the channel were used for a stream every nanosecond, it would take close - // to 300 years to exhaust every ID and reach an overflow situation.) + // the channel were used for a new stream every nanosecond, it would take + // close to 300 years to exhaust every ID and reach an overflow situation.) + // + // The stream ID will be -1 for messages that do not correspond to a single + // stream, but to the whole tunnel. Currently, only a Settings message will + // be sent this way. StreamId int64 `protobuf:"varint,1,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` // Types that are assignable to Frame: // + // *ServerToClient_Settings // *ServerToClient_ResponseHeaders // *ServerToClient_ResponseMessage // *ServerToClient_MoreResponseData // *ServerToClient_CloseStream + // *ServerToClient_WindowUpdate Frame isServerToClient_Frame `protobuf_oneof:"frame"` } @@ -259,6 +348,13 @@ func (m *ServerToClient) GetFrame() isServerToClient_Frame { return nil } +func (x *ServerToClient) GetSettings() *Settings { + if x, ok := x.GetFrame().(*ServerToClient_Settings); ok { + return x.Settings + } + return nil +} + func (x *ServerToClient) GetResponseHeaders() *Metadata { if x, ok := x.GetFrame().(*ServerToClient_ResponseHeaders); ok { return x.ResponseHeaders @@ -287,10 +383,29 @@ func (x *ServerToClient) GetCloseStream() *CloseStream { return nil } +func (x *ServerToClient) GetWindowUpdate() uint32 { + if x, ok := x.GetFrame().(*ServerToClient_WindowUpdate); ok { + return x.WindowUpdate + } + return 0 +} + type isServerToClient_Frame interface { isServerToClient_Frame() } +type ServerToClient_Settings struct { + // This is the very first message sent on a response stream. The tunnel + // client should await this before sending any data as it will contain + // information about the server's initial flow control window size for + // each new stream. This is only used in revision one of the protocol. + // A client that needs to interact with an older server (i.e. revision + // zero) must examine header metadata to decide if it should expect a + // settings message. Similarly, a server must examine header metadata to + // decide if it should send a settings message. + Settings *Settings `protobuf:"bytes,6,opt,name=settings,proto3,oneof"` +} + type ServerToClient_ResponseHeaders struct { // Sends response headers for this stream. If headers are sent at all, // they must be sent before any response message data. @@ -318,6 +433,14 @@ type ServerToClient_CloseStream struct { CloseStream *CloseStream `protobuf:"bytes,5,opt,name=close_stream,json=closeStream,proto3,oneof"` } +type ServerToClient_WindowUpdate struct { + // Lets the peer know that data has been consumed, so it may be able + // to send more data, based on flow control window sizes. + WindowUpdate uint32 `protobuf:"varint,7,opt,name=window_update,json=windowUpdate,proto3,oneof"` +} + +func (*ServerToClient_Settings) isServerToClient_Frame() {} + func (*ServerToClient_ResponseHeaders) isServerToClient_Frame() {} func (*ServerToClient_ResponseMessage) isServerToClient_Frame() {} @@ -326,19 +449,97 @@ func (*ServerToClient_MoreResponseData) isServerToClient_Frame() {} func (*ServerToClient_CloseStream) isServerToClient_Frame() {} +func (*ServerToClient_WindowUpdate) isServerToClient_Frame() {} + +type Settings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of protocol revisions that this server supports. If the + // client does not support any of them, it must hang up. This should + // never be empty, but if that is observed, the client should assume + // the server only supports revision zero. + SupportedProtocolRevisions []ProtocolRevision `protobuf:"varint,1,rep,packed,name=supported_protocol_revisions,json=supportedProtocolRevisions,proto3,enum=grpctunnel.v1.ProtocolRevision" json:"supported_protocol_revisions,omitempty"` + // The server's initial window size for all newly created streams. + // When a new stream is created, this is the flow control window for + // sending data to the server. The client indicates its own initial + // window size, for receiving data from the server, in the NewStream + // message. + // + // This value will be zero if the only supported protocol revision + // is zero. + InitialWindowSize uint32 `protobuf:"varint,2,opt,name=initial_window_size,json=initialWindowSize,proto3" json:"initial_window_size,omitempty"` +} + +func (x *Settings) Reset() { + *x = Settings{} + if protoimpl.UnsafeEnabled { + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Settings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Settings) ProtoMessage() {} + +func (x *Settings) ProtoReflect() protoreflect.Message { + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Settings.ProtoReflect.Descriptor instead. +func (*Settings) Descriptor() ([]byte, []int) { + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{2} +} + +func (x *Settings) GetSupportedProtocolRevisions() []ProtocolRevision { + if x != nil { + return x.SupportedProtocolRevisions + } + return nil +} + +func (x *Settings) GetInitialWindowSize() uint32 { + if x != nil { + return x.InitialWindowSize + } + return 0 +} + type NewStream struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - MethodName string `protobuf:"bytes,1,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // The name of the method being invoked. + MethodName string `protobuf:"bytes,1,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // Header metadata for this request. RequestHeaders *Metadata `protobuf:"bytes,2,opt,name=request_headers,json=requestHeaders,proto3" json:"request_headers,omitempty"` + // The client's initial window size, for receiving data from the + // server. This will be zero if protocol_revision is zero. + InitialWindowSize uint32 `protobuf:"varint,3,opt,name=initial_window_size,json=initialWindowSize,proto3" json:"initial_window_size,omitempty"` + // The protocol revision that the client will use for this stream. + // If this revision is not supported by the server, the server will + // immediately close the stream with an error code. + ProtocolRevision ProtocolRevision `protobuf:"varint,4,opt,name=protocol_revision,json=protocolRevision,proto3,enum=grpctunnel.v1.ProtocolRevision" json:"protocol_revision,omitempty"` // TODO: codec/compressor options? } func (x *NewStream) Reset() { *x = NewStream{} if protoimpl.UnsafeEnabled { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[2] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -351,7 +552,7 @@ func (x *NewStream) String() string { func (*NewStream) ProtoMessage() {} func (x *NewStream) ProtoReflect() protoreflect.Message { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[2] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -364,7 +565,7 @@ func (x *NewStream) ProtoReflect() protoreflect.Message { // Deprecated: Use NewStream.ProtoReflect.Descriptor instead. func (*NewStream) Descriptor() ([]byte, []int) { - return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{2} + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{3} } func (x *NewStream) GetMethodName() string { @@ -381,13 +582,27 @@ func (x *NewStream) GetRequestHeaders() *Metadata { return nil } +func (x *NewStream) GetInitialWindowSize() uint32 { + if x != nil { + return x.InitialWindowSize + } + return 0 +} + +func (x *NewStream) GetProtocolRevision() ProtocolRevision { + if x != nil { + return x.ProtocolRevision + } + return ProtocolRevision_REVISION_ZERO +} + type MessageData struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The full size of the message. - Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + Size uint32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` // The message data. This field should not be longer than 16kb (16,384 // bytes). If the full size of the message is larger then it should be // split into multiple chunks. The chunking is done to allow multiple @@ -401,7 +616,7 @@ type MessageData struct { func (x *MessageData) Reset() { *x = MessageData{} if protoimpl.UnsafeEnabled { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[3] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -414,7 +629,7 @@ func (x *MessageData) String() string { func (*MessageData) ProtoMessage() {} func (x *MessageData) ProtoReflect() protoreflect.Message { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[3] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -427,10 +642,10 @@ func (x *MessageData) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageData.ProtoReflect.Descriptor instead. func (*MessageData) Descriptor() ([]byte, []int) { - return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{3} + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{4} } -func (x *MessageData) GetSize() int32 { +func (x *MessageData) GetSize() uint32 { if x != nil { return x.Size } @@ -456,7 +671,7 @@ type CloseStream struct { func (x *CloseStream) Reset() { *x = CloseStream{} if protoimpl.UnsafeEnabled { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[4] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -469,7 +684,7 @@ func (x *CloseStream) String() string { func (*CloseStream) ProtoMessage() {} func (x *CloseStream) ProtoReflect() protoreflect.Message { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[4] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -482,7 +697,7 @@ func (x *CloseStream) ProtoReflect() protoreflect.Message { // Deprecated: Use CloseStream.ProtoReflect.Descriptor instead. func (*CloseStream) Descriptor() ([]byte, []int) { - return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{4} + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{5} } func (x *CloseStream) GetResponseTrailers() *Metadata { @@ -510,7 +725,7 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} if protoimpl.UnsafeEnabled { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[5] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -523,7 +738,7 @@ func (x *Metadata) String() string { func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[5] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -536,7 +751,7 @@ func (x *Metadata) ProtoReflect() protoreflect.Message { // Deprecated: Use Metadata.ProtoReflect.Descriptor instead. func (*Metadata) Descriptor() ([]byte, []int) { - return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{5} + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{6} } func (x *Metadata) GetMd() map[string]*Metadata_Values { @@ -557,7 +772,7 @@ type Metadata_Values struct { func (x *Metadata_Values) Reset() { *x = Metadata_Values{} if protoimpl.UnsafeEnabled { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[6] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -570,7 +785,7 @@ func (x *Metadata_Values) String() string { func (*Metadata_Values) ProtoMessage() {} func (x *Metadata_Values) ProtoReflect() protoreflect.Message { - mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[6] + mi := &file_grpctunnel_v1_tunnel_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -583,7 +798,7 @@ func (x *Metadata_Values) ProtoReflect() protoreflect.Message { // Deprecated: Use Metadata_Values.ProtoReflect.Descriptor instead. func (*Metadata_Values) Descriptor() ([]byte, []int) { - return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{5, 0} + return file_grpctunnel_v1_tunnel_proto_rawDescGZIP(), []int{6, 0} } func (x *Metadata_Values) GetVal() []string { @@ -602,7 +817,7 @@ var file_grpctunnel_v1_tunnel_proto_rawDesc = []byte{ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x53, 0x65, + 0x6f, 0x22, 0xf8, 0x02, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, @@ -622,71 +837,101 @@ var file_grpctunnel_v1_tunnel_proto_rawDesc = []byte{ 0x09, 0x68, 0x61, 0x6c, 0x66, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x42, 0x07, 0x0a, 0x05, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x22, 0xb6, 0x02, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x54, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, - 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x6d, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x48, 0x00, 0x52, 0x10, 0x6d, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x07, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x22, 0x6e, - 0x0a, 0x09, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, - 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0e, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x35, - 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, - 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x7f, 0x0a, 0x0b, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x12, 0x44, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xae, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x02, 0x6d, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x02, 0x6d, 0x64, 0x1a, 0x1a, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x6c, - 0x1a, 0x55, 0x0a, 0x07, 0x4d, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xb6, 0x01, 0x0a, 0x0d, 0x54, 0x75, 0x6e, 0x6e, - 0x65, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x4f, 0x70, 0x65, - 0x6e, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, - 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, - 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6f, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x55, 0x0a, 0x11, 0x4f, 0x70, 0x65, - 0x6e, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x1d, + 0x74, 0x79, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x25, 0x0a, 0x0d, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x03, 0x0a, + 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x08, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1d, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, - 0x68, 0x75, 0x6d, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2f, - 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x48, 0x00, 0x52, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x48, + 0x00, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x6d, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, + 0x52, 0x10, 0x6d, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, + 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x61, 0x0a, 0x1c, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, + 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, + 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x77, + 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x11, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, + 0x69, 0x7a, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x09, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x11, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x4c, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x7f, 0x0a, 0x0b, 0x43, 0x6c, 0x6f, + 0x73, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x44, 0x0a, 0x11, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x10, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x2a, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xae, 0x01, 0x0a, 0x08, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x02, 0x6d, 0x64, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x02, 0x6d, 0x64, 0x1a, 0x1a, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x03, 0x76, 0x61, 0x6c, 0x1a, 0x55, 0x0a, 0x07, 0x4d, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x37, 0x0a, 0x10, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x5a, 0x45, 0x52, 0x4f, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x56, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x4f, + 0x4e, 0x45, 0x10, 0x01, 0x32, 0xb6, 0x01, 0x0a, 0x0d, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x75, + 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6f, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x55, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x6e, 0x52, 0x65, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x54, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x28, 0x01, 0x30, 0x01, 0x42, 0x26, 0x5a, + 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x68, 0x75, 0x6d, + 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x2f, 0x74, 0x75, 0x6e, + 0x6e, 0x65, 0x6c, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -701,41 +946,47 @@ func file_grpctunnel_v1_tunnel_proto_rawDescGZIP() []byte { return file_grpctunnel_v1_tunnel_proto_rawDescData } -var file_grpctunnel_v1_tunnel_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpctunnel_v1_tunnel_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpctunnel_v1_tunnel_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_grpctunnel_v1_tunnel_proto_goTypes = []interface{}{ - (*ClientToServer)(nil), // 0: grpctunnel.v1.ClientToServer - (*ServerToClient)(nil), // 1: grpctunnel.v1.ServerToClient - (*NewStream)(nil), // 2: grpctunnel.v1.NewStream - (*MessageData)(nil), // 3: grpctunnel.v1.MessageData - (*CloseStream)(nil), // 4: grpctunnel.v1.CloseStream - (*Metadata)(nil), // 5: grpctunnel.v1.Metadata - (*Metadata_Values)(nil), // 6: grpctunnel.v1.Metadata.Values - nil, // 7: grpctunnel.v1.Metadata.MdEntry - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty - (*status.Status)(nil), // 9: google.rpc.Status + (ProtocolRevision)(0), // 0: grpctunnel.v1.ProtocolRevision + (*ClientToServer)(nil), // 1: grpctunnel.v1.ClientToServer + (*ServerToClient)(nil), // 2: grpctunnel.v1.ServerToClient + (*Settings)(nil), // 3: grpctunnel.v1.Settings + (*NewStream)(nil), // 4: grpctunnel.v1.NewStream + (*MessageData)(nil), // 5: grpctunnel.v1.MessageData + (*CloseStream)(nil), // 6: grpctunnel.v1.CloseStream + (*Metadata)(nil), // 7: grpctunnel.v1.Metadata + (*Metadata_Values)(nil), // 8: grpctunnel.v1.Metadata.Values + nil, // 9: grpctunnel.v1.Metadata.MdEntry + (*emptypb.Empty)(nil), // 10: google.protobuf.Empty + (*status.Status)(nil), // 11: google.rpc.Status } var file_grpctunnel_v1_tunnel_proto_depIdxs = []int32{ - 2, // 0: grpctunnel.v1.ClientToServer.new_stream:type_name -> grpctunnel.v1.NewStream - 3, // 1: grpctunnel.v1.ClientToServer.request_message:type_name -> grpctunnel.v1.MessageData - 8, // 2: grpctunnel.v1.ClientToServer.half_close:type_name -> google.protobuf.Empty - 8, // 3: grpctunnel.v1.ClientToServer.cancel:type_name -> google.protobuf.Empty - 5, // 4: grpctunnel.v1.ServerToClient.response_headers:type_name -> grpctunnel.v1.Metadata - 3, // 5: grpctunnel.v1.ServerToClient.response_message:type_name -> grpctunnel.v1.MessageData - 4, // 6: grpctunnel.v1.ServerToClient.close_stream:type_name -> grpctunnel.v1.CloseStream - 5, // 7: grpctunnel.v1.NewStream.request_headers:type_name -> grpctunnel.v1.Metadata - 5, // 8: grpctunnel.v1.CloseStream.response_trailers:type_name -> grpctunnel.v1.Metadata - 9, // 9: grpctunnel.v1.CloseStream.status:type_name -> google.rpc.Status - 7, // 10: grpctunnel.v1.Metadata.md:type_name -> grpctunnel.v1.Metadata.MdEntry - 6, // 11: grpctunnel.v1.Metadata.MdEntry.value:type_name -> grpctunnel.v1.Metadata.Values - 0, // 12: grpctunnel.v1.TunnelService.OpenTunnel:input_type -> grpctunnel.v1.ClientToServer - 1, // 13: grpctunnel.v1.TunnelService.OpenReverseTunnel:input_type -> grpctunnel.v1.ServerToClient - 1, // 14: grpctunnel.v1.TunnelService.OpenTunnel:output_type -> grpctunnel.v1.ServerToClient - 0, // 15: grpctunnel.v1.TunnelService.OpenReverseTunnel:output_type -> grpctunnel.v1.ClientToServer - 14, // [14:16] is the sub-list for method output_type - 12, // [12:14] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 4, // 0: grpctunnel.v1.ClientToServer.new_stream:type_name -> grpctunnel.v1.NewStream + 5, // 1: grpctunnel.v1.ClientToServer.request_message:type_name -> grpctunnel.v1.MessageData + 10, // 2: grpctunnel.v1.ClientToServer.half_close:type_name -> google.protobuf.Empty + 10, // 3: grpctunnel.v1.ClientToServer.cancel:type_name -> google.protobuf.Empty + 3, // 4: grpctunnel.v1.ServerToClient.settings:type_name -> grpctunnel.v1.Settings + 7, // 5: grpctunnel.v1.ServerToClient.response_headers:type_name -> grpctunnel.v1.Metadata + 5, // 6: grpctunnel.v1.ServerToClient.response_message:type_name -> grpctunnel.v1.MessageData + 6, // 7: grpctunnel.v1.ServerToClient.close_stream:type_name -> grpctunnel.v1.CloseStream + 0, // 8: grpctunnel.v1.Settings.supported_protocol_revisions:type_name -> grpctunnel.v1.ProtocolRevision + 7, // 9: grpctunnel.v1.NewStream.request_headers:type_name -> grpctunnel.v1.Metadata + 0, // 10: grpctunnel.v1.NewStream.protocol_revision:type_name -> grpctunnel.v1.ProtocolRevision + 7, // 11: grpctunnel.v1.CloseStream.response_trailers:type_name -> grpctunnel.v1.Metadata + 11, // 12: grpctunnel.v1.CloseStream.status:type_name -> google.rpc.Status + 9, // 13: grpctunnel.v1.Metadata.md:type_name -> grpctunnel.v1.Metadata.MdEntry + 8, // 14: grpctunnel.v1.Metadata.MdEntry.value:type_name -> grpctunnel.v1.Metadata.Values + 1, // 15: grpctunnel.v1.TunnelService.OpenTunnel:input_type -> grpctunnel.v1.ClientToServer + 2, // 16: grpctunnel.v1.TunnelService.OpenReverseTunnel:input_type -> grpctunnel.v1.ServerToClient + 2, // 17: grpctunnel.v1.TunnelService.OpenTunnel:output_type -> grpctunnel.v1.ServerToClient + 1, // 18: grpctunnel.v1.TunnelService.OpenReverseTunnel:output_type -> grpctunnel.v1.ClientToServer + 17, // [17:19] is the sub-list for method output_type + 15, // [15:17] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_grpctunnel_v1_tunnel_proto_init() } @@ -769,7 +1020,7 @@ func file_grpctunnel_v1_tunnel_proto_init() { } } file_grpctunnel_v1_tunnel_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewStream); i { + switch v := v.(*Settings); i { case 0: return &v.state case 1: @@ -781,7 +1032,7 @@ func file_grpctunnel_v1_tunnel_proto_init() { } } file_grpctunnel_v1_tunnel_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageData); i { + switch v := v.(*NewStream); i { case 0: return &v.state case 1: @@ -793,7 +1044,7 @@ func file_grpctunnel_v1_tunnel_proto_init() { } } file_grpctunnel_v1_tunnel_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseStream); i { + switch v := v.(*MessageData); i { case 0: return &v.state case 1: @@ -805,7 +1056,7 @@ func file_grpctunnel_v1_tunnel_proto_init() { } } file_grpctunnel_v1_tunnel_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { + switch v := v.(*CloseStream); i { case 0: return &v.state case 1: @@ -817,6 +1068,18 @@ func file_grpctunnel_v1_tunnel_proto_init() { } } file_grpctunnel_v1_tunnel_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpctunnel_v1_tunnel_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Metadata_Values); i { case 0: return &v.state @@ -835,25 +1098,29 @@ func file_grpctunnel_v1_tunnel_proto_init() { (*ClientToServer_MoreRequestData)(nil), (*ClientToServer_HalfClose)(nil), (*ClientToServer_Cancel)(nil), + (*ClientToServer_WindowUpdate)(nil), } file_grpctunnel_v1_tunnel_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ServerToClient_Settings)(nil), (*ServerToClient_ResponseHeaders)(nil), (*ServerToClient_ResponseMessage)(nil), (*ServerToClient_MoreResponseData)(nil), (*ServerToClient_CloseStream)(nil), + (*ServerToClient_WindowUpdate)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpctunnel_v1_tunnel_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, + NumEnums: 1, + NumMessages: 9, NumExtensions: 0, NumServices: 1, }, GoTypes: file_grpctunnel_v1_tunnel_proto_goTypes, DependencyIndexes: file_grpctunnel_v1_tunnel_proto_depIdxs, + EnumInfos: file_grpctunnel_v1_tunnel_proto_enumTypes, MessageInfos: file_grpctunnel_v1_tunnel_proto_msgTypes, }.Build() File_grpctunnel_v1_tunnel_proto = out.File