mirror of
https://github.com/Luzifer/nginx-sso.git
synced 2024-12-30 09:41:19 +00:00
1153 lines
33 KiB
Go
1153 lines
33 KiB
Go
/*
|
|
*
|
|
* Copyright 2016 gRPC authors.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
*/
|
|
|
|
package grpclb
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
durationpb "github.com/golang/protobuf/ptypes/duration"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/balancer"
|
|
lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
|
|
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/credentials"
|
|
_ "google.golang.org/grpc/grpclog/glogger"
|
|
"google.golang.org/grpc/internal/leakcheck"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/peer"
|
|
"google.golang.org/grpc/resolver"
|
|
"google.golang.org/grpc/resolver/manual"
|
|
"google.golang.org/grpc/status"
|
|
testpb "google.golang.org/grpc/test/grpc_testing"
|
|
)
|
|
|
|
var (
|
|
lbServerName = "bar.com"
|
|
beServerName = "foo.com"
|
|
lbToken = "iamatoken"
|
|
|
|
// Resolver replaces localhost with fakeName in Next().
|
|
// Dialer replaces fakeName with localhost when dialing.
|
|
// This will test that custom dialer is passed from Dial to grpclb.
|
|
fakeName = "fake.Name"
|
|
)
|
|
|
|
type serverNameCheckCreds struct {
|
|
mu sync.Mutex
|
|
sn string
|
|
expected string
|
|
}
|
|
|
|
func (c *serverNameCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
|
if _, err := io.WriteString(rawConn, c.sn); err != nil {
|
|
fmt.Printf("Failed to write the server name %s to the client %v", c.sn, err)
|
|
return nil, nil, err
|
|
}
|
|
return rawConn, nil, nil
|
|
}
|
|
func (c *serverNameCheckCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
b := make([]byte, len(c.expected))
|
|
errCh := make(chan error, 1)
|
|
go func() {
|
|
_, err := rawConn.Read(b)
|
|
errCh <- err
|
|
}()
|
|
select {
|
|
case err := <-errCh:
|
|
if err != nil {
|
|
fmt.Printf("Failed to read the server name from the server %v", err)
|
|
return nil, nil, err
|
|
}
|
|
case <-ctx.Done():
|
|
return nil, nil, ctx.Err()
|
|
}
|
|
if c.expected != string(b) {
|
|
fmt.Printf("Read the server name %s want %s", string(b), c.expected)
|
|
return nil, nil, errors.New("received unexpected server name")
|
|
}
|
|
return rawConn, nil, nil
|
|
}
|
|
func (c *serverNameCheckCreds) Info() credentials.ProtocolInfo {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
return credentials.ProtocolInfo{}
|
|
}
|
|
func (c *serverNameCheckCreds) Clone() credentials.TransportCredentials {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
return &serverNameCheckCreds{
|
|
expected: c.expected,
|
|
}
|
|
}
|
|
func (c *serverNameCheckCreds) OverrideServerName(s string) error {
|
|
c.mu.Lock()
|
|
defer c.mu.Unlock()
|
|
c.expected = s
|
|
return nil
|
|
}
|
|
|
|
// fakeNameDialer replaces fakeName with localhost when dialing.
|
|
// This will test that custom dialer is passed from Dial to grpclb.
|
|
func fakeNameDialer(ctx context.Context, addr string) (net.Conn, error) {
|
|
addr = strings.Replace(addr, fakeName, "localhost", 1)
|
|
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
|
|
}
|
|
|
|
// merge merges the new client stats into current stats.
|
|
//
|
|
// It's a test-only method. rpcStats is defined in grpclb_picker.
|
|
func (s *rpcStats) merge(cs *lbpb.ClientStats) {
|
|
atomic.AddInt64(&s.numCallsStarted, cs.NumCallsStarted)
|
|
atomic.AddInt64(&s.numCallsFinished, cs.NumCallsFinished)
|
|
atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, cs.NumCallsFinishedWithClientFailedToSend)
|
|
atomic.AddInt64(&s.numCallsFinishedKnownReceived, cs.NumCallsFinishedKnownReceived)
|
|
s.mu.Lock()
|
|
for _, perToken := range cs.CallsFinishedWithDrop {
|
|
s.numCallsDropped[perToken.LoadBalanceToken] += perToken.NumCalls
|
|
}
|
|
s.mu.Unlock()
|
|
}
|
|
|
|
func mapsEqual(a, b map[string]int64) bool {
|
|
if len(a) != len(b) {
|
|
return false
|
|
}
|
|
for k, v1 := range a {
|
|
if v2, ok := b[k]; !ok || v1 != v2 {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func atomicEqual(a, b *int64) bool {
|
|
return atomic.LoadInt64(a) == atomic.LoadInt64(b)
|
|
}
|
|
|
|
// equal compares two rpcStats.
|
|
//
|
|
// It's a test-only method. rpcStats is defined in grpclb_picker.
|
|
func (s *rpcStats) equal(o *rpcStats) bool {
|
|
if !atomicEqual(&s.numCallsStarted, &o.numCallsStarted) {
|
|
return false
|
|
}
|
|
if !atomicEqual(&s.numCallsFinished, &o.numCallsFinished) {
|
|
return false
|
|
}
|
|
if !atomicEqual(&s.numCallsFinishedWithClientFailedToSend, &o.numCallsFinishedWithClientFailedToSend) {
|
|
return false
|
|
}
|
|
if !atomicEqual(&s.numCallsFinishedKnownReceived, &o.numCallsFinishedKnownReceived) {
|
|
return false
|
|
}
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
o.mu.Lock()
|
|
defer o.mu.Unlock()
|
|
return mapsEqual(s.numCallsDropped, o.numCallsDropped)
|
|
}
|
|
|
|
type remoteBalancer struct {
|
|
sls chan *lbpb.ServerList
|
|
statsDura time.Duration
|
|
done chan struct{}
|
|
stats *rpcStats
|
|
}
|
|
|
|
func newRemoteBalancer(intervals []time.Duration) *remoteBalancer {
|
|
return &remoteBalancer{
|
|
sls: make(chan *lbpb.ServerList, 1),
|
|
done: make(chan struct{}),
|
|
stats: newRPCStats(),
|
|
}
|
|
}
|
|
|
|
func (b *remoteBalancer) stop() {
|
|
close(b.sls)
|
|
close(b.done)
|
|
}
|
|
|
|
func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error {
|
|
req, err := stream.Recv()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
initReq := req.GetInitialRequest()
|
|
if initReq.Name != beServerName {
|
|
return status.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name)
|
|
}
|
|
resp := &lbpb.LoadBalanceResponse{
|
|
LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{
|
|
InitialResponse: &lbpb.InitialLoadBalanceResponse{
|
|
ClientStatsReportInterval: &durationpb.Duration{
|
|
Seconds: int64(b.statsDura.Seconds()),
|
|
Nanos: int32(b.statsDura.Nanoseconds() - int64(b.statsDura.Seconds())*1e9),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
if err := stream.Send(resp); err != nil {
|
|
return err
|
|
}
|
|
go func() {
|
|
for {
|
|
var (
|
|
req *lbpb.LoadBalanceRequest
|
|
err error
|
|
)
|
|
if req, err = stream.Recv(); err != nil {
|
|
return
|
|
}
|
|
b.stats.merge(req.GetClientStats())
|
|
}
|
|
}()
|
|
for {
|
|
select {
|
|
case v := <-b.sls:
|
|
resp = &lbpb.LoadBalanceResponse{
|
|
LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{
|
|
ServerList: v,
|
|
},
|
|
}
|
|
case <-stream.Context().Done():
|
|
return stream.Context().Err()
|
|
}
|
|
if err := stream.Send(resp); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
type testServer struct {
|
|
testpb.TestServiceServer
|
|
|
|
addr string
|
|
fallback bool
|
|
}
|
|
|
|
const testmdkey = "testmd"
|
|
|
|
func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) {
|
|
md, ok := metadata.FromIncomingContext(ctx)
|
|
if !ok {
|
|
return nil, status.Error(codes.Internal, "failed to receive metadata")
|
|
}
|
|
if !s.fallback && (md == nil || md["lb-token"][0] != lbToken) {
|
|
return nil, status.Errorf(codes.Internal, "received unexpected metadata: %v", md)
|
|
}
|
|
grpc.SetTrailer(ctx, metadata.Pairs(testmdkey, s.addr))
|
|
return &testpb.Empty{}, nil
|
|
}
|
|
|
|
func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
|
|
return nil
|
|
}
|
|
|
|
func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) {
|
|
for _, l := range lis {
|
|
creds := &serverNameCheckCreds{
|
|
sn: sn,
|
|
}
|
|
s := grpc.NewServer(grpc.Creds(creds))
|
|
testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback})
|
|
servers = append(servers, s)
|
|
go func(s *grpc.Server, l net.Listener) {
|
|
s.Serve(l)
|
|
}(s, l)
|
|
}
|
|
return
|
|
}
|
|
|
|
func stopBackends(servers []*grpc.Server) {
|
|
for _, s := range servers {
|
|
s.Stop()
|
|
}
|
|
}
|
|
|
|
type testServers struct {
|
|
lbAddr string
|
|
ls *remoteBalancer
|
|
lb *grpc.Server
|
|
backends []*grpc.Server
|
|
beIPs []net.IP
|
|
bePorts []int
|
|
|
|
lbListener net.Listener
|
|
beListeners []net.Listener
|
|
}
|
|
|
|
func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), err error) {
|
|
var (
|
|
beListeners []net.Listener
|
|
ls *remoteBalancer
|
|
lb *grpc.Server
|
|
beIPs []net.IP
|
|
bePorts []int
|
|
)
|
|
for i := 0; i < numberOfBackends; i++ {
|
|
// Start a backend.
|
|
beLis, e := net.Listen("tcp", "localhost:0")
|
|
if e != nil {
|
|
err = fmt.Errorf("failed to listen %v", err)
|
|
return
|
|
}
|
|
beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP)
|
|
bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port)
|
|
|
|
beListeners = append(beListeners, newRestartableListener(beLis))
|
|
}
|
|
backends := startBackends(beServerName, false, beListeners...)
|
|
|
|
// Start a load balancer.
|
|
lbLis, err := net.Listen("tcp", "localhost:0")
|
|
if err != nil {
|
|
err = fmt.Errorf("failed to create the listener for the load balancer %v", err)
|
|
return
|
|
}
|
|
lbLis = newRestartableListener(lbLis)
|
|
lbCreds := &serverNameCheckCreds{
|
|
sn: lbServerName,
|
|
}
|
|
lb = grpc.NewServer(grpc.Creds(lbCreds))
|
|
ls = newRemoteBalancer(nil)
|
|
lbgrpc.RegisterLoadBalancerServer(lb, ls)
|
|
go func() {
|
|
lb.Serve(lbLis)
|
|
}()
|
|
|
|
tss = &testServers{
|
|
lbAddr: net.JoinHostPort(fakeName, strconv.Itoa(lbLis.Addr().(*net.TCPAddr).Port)),
|
|
ls: ls,
|
|
lb: lb,
|
|
backends: backends,
|
|
beIPs: beIPs,
|
|
bePorts: bePorts,
|
|
|
|
lbListener: lbLis,
|
|
beListeners: beListeners,
|
|
}
|
|
cleanup = func() {
|
|
defer stopBackends(backends)
|
|
defer func() {
|
|
ls.stop()
|
|
lb.Stop()
|
|
}()
|
|
}
|
|
return
|
|
}
|
|
|
|
func TestGRPCLB(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(1)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
|
|
be := &lbpb.Server{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}
|
|
var bes []*lbpb.Server
|
|
bes = append(bes, be)
|
|
sl := &lbpb.ServerList{
|
|
Servers: bes,
|
|
}
|
|
tss.ls.sls <- sl
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
}
|
|
|
|
// The remote balancer sends response with duplicates to grpclb client.
|
|
func TestGRPCLBWeighted(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(2)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
|
|
beServers := []*lbpb.Server{{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}, {
|
|
IpAddress: tss.beIPs[1],
|
|
Port: int32(tss.bePorts[1]),
|
|
LoadBalanceToken: lbToken,
|
|
}}
|
|
portsToIndex := make(map[int]int)
|
|
for i := range beServers {
|
|
portsToIndex[tss.bePorts[i]] = i
|
|
}
|
|
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
sequences := []string{"00101", "00011"}
|
|
for _, seq := range sequences {
|
|
var (
|
|
bes []*lbpb.Server
|
|
p peer.Peer
|
|
result string
|
|
)
|
|
for _, s := range seq {
|
|
bes = append(bes, beServers[s-'0'])
|
|
}
|
|
tss.ls.sls <- &lbpb.ServerList{Servers: bes}
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port])
|
|
}
|
|
// The generated result will be in format of "0010100101".
|
|
if !strings.Contains(result, strings.Repeat(seq, 2)) {
|
|
t.Errorf("got result sequence %q, want patten %q", result, seq)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestDropRequest(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(2)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
tss.ls.sls <- &lbpb.ServerList{
|
|
Servers: []*lbpb.Server{{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
Drop: false,
|
|
}, {
|
|
IpAddress: tss.beIPs[1],
|
|
Port: int32(tss.bePorts[1]),
|
|
LoadBalanceToken: lbToken,
|
|
Drop: false,
|
|
}, {
|
|
Drop: true,
|
|
}},
|
|
}
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
// Wait for the 1st, non-fail-fast RPC to succeed. This ensures both server
|
|
// connections are made, because the first one has Drop set to true.
|
|
var i int
|
|
for i = 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err == nil {
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if i >= 1000 {
|
|
t.Fatalf("%v.SayHello(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
select {
|
|
case <-ctx.Done():
|
|
t.Fatal("timed out", ctx.Err())
|
|
default:
|
|
}
|
|
for _, failfast := range []bool{true, false} {
|
|
for i := 0; i < 3; i++ {
|
|
// 1st RPCs pick the second item in server list. They should succeed
|
|
// since they choose the non-drop-request backend according to the
|
|
// round robin policy.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); err != nil {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
// 2st RPCs should fail, because they pick last item in server list,
|
|
// with Drop set to true.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); status.Code(err) != codes.Unavailable {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable)
|
|
}
|
|
// 3rd RPCs pick the first item in server list. They should succeed
|
|
// since they choose the non-drop-request backend according to the
|
|
// round robin policy.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); err != nil {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
}
|
|
}
|
|
tss.backends[0].Stop()
|
|
// This last pick was backend 0. Closing backend 0 doesn't reset drop index
|
|
// (for level 1 picking), so the following picks will be (backend1, drop,
|
|
// backend1), instead of (backend, backend, drop) if drop index was reset.
|
|
time.Sleep(time.Second)
|
|
for i := 0; i < 3; i++ {
|
|
var p peer.Peer
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want {
|
|
t.Errorf("got peer: %v, want peer port: %v", p.Addr, want)
|
|
}
|
|
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.Unavailable {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable)
|
|
}
|
|
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want {
|
|
t.Errorf("got peer: %v, want peer port: %v", p.Addr, want)
|
|
}
|
|
}
|
|
}
|
|
|
|
// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list.
|
|
func TestBalancerDisconnects(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
var (
|
|
tests []*testServers
|
|
lbs []*grpc.Server
|
|
)
|
|
for i := 0; i < 2; i++ {
|
|
tss, cleanup, err := newLoadBalancer(1)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
|
|
be := &lbpb.Server{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}
|
|
var bes []*lbpb.Server
|
|
bes = append(bes, be)
|
|
sl := &lbpb.ServerList{
|
|
Servers: bes,
|
|
}
|
|
tss.ls.sls <- sl
|
|
|
|
tests = append(tests, tss)
|
|
lbs = append(lbs, tss.lb)
|
|
}
|
|
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tests[0].lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}, {
|
|
Addr: tests[1].lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
var p peer.Peer
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if p.Addr.(*net.TCPAddr).Port != tests[0].bePorts[0] {
|
|
t.Fatalf("got peer: %v, want peer port: %v", p.Addr, tests[0].bePorts[0])
|
|
}
|
|
|
|
lbs[0].Stop()
|
|
// Stop balancer[0], balancer[1] should be used by grpclb.
|
|
// Check peer address to see if that happened.
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if p.Addr.(*net.TCPAddr).Port == tests[1].bePorts[0] {
|
|
return
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
t.Fatalf("No RPC sent to second backend after 1 second")
|
|
}
|
|
|
|
func TestFallback(t *testing.T) {
|
|
balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond))
|
|
defer balancer.Register(newLBBuilder())
|
|
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(1)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
|
|
// Start a standalone backend.
|
|
beLis, err := net.Listen("tcp", "localhost:0")
|
|
if err != nil {
|
|
t.Fatalf("Failed to listen %v", err)
|
|
}
|
|
defer beLis.Close()
|
|
standaloneBEs := startBackends(beServerName, true, beLis)
|
|
defer stopBackends(standaloneBEs)
|
|
|
|
be := &lbpb.Server{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}
|
|
var bes []*lbpb.Server
|
|
bes = append(bes, be)
|
|
sl := &lbpb.ServerList{
|
|
Servers: bes,
|
|
}
|
|
tss.ls.sls <- sl
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: "invalid.address",
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}, {
|
|
Addr: beLis.Addr().String(),
|
|
Type: resolver.Backend,
|
|
ServerName: beServerName,
|
|
}}})
|
|
|
|
var p peer.Peer
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
|
}
|
|
if p.Addr.String() != beLis.Addr().String() {
|
|
t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr())
|
|
}
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}, {
|
|
Addr: beLis.Addr().String(),
|
|
Type: resolver.Backend,
|
|
ServerName: beServerName,
|
|
}}})
|
|
|
|
var backendUsed bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] {
|
|
backendUsed = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !backendUsed {
|
|
t.Fatalf("No RPC sent to backend behind remote balancer after 1 second")
|
|
}
|
|
|
|
// Close backend and remote balancer connections, should use fallback.
|
|
tss.beListeners[0].(*restartableListener).stopPreviousConns()
|
|
tss.lbListener.(*restartableListener).stopPreviousConns()
|
|
time.Sleep(time.Second)
|
|
|
|
var fallbackUsed bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if p.Addr.String() == beLis.Addr().String() {
|
|
fallbackUsed = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !fallbackUsed {
|
|
t.Fatalf("No RPC sent to fallback after 1 second")
|
|
}
|
|
|
|
// Restart backend and remote balancer, should not use backends.
|
|
tss.beListeners[0].(*restartableListener).restart()
|
|
tss.lbListener.(*restartableListener).restart()
|
|
tss.ls.sls <- sl
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
var backendUsed2 bool
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] {
|
|
backendUsed2 = true
|
|
break
|
|
}
|
|
time.Sleep(time.Millisecond)
|
|
}
|
|
if !backendUsed2 {
|
|
t.Fatalf("No RPC sent to backend behind remote balancer after 1 second")
|
|
}
|
|
}
|
|
|
|
// The remote balancer sends response with duplicates to grpclb client.
|
|
func TestGRPCLBPickFirst(t *testing.T) {
|
|
balancer.Register(newLBBuilderWithPickFirst())
|
|
defer balancer.Register(newLBBuilder())
|
|
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(3)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
|
|
beServers := []*lbpb.Server{{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}, {
|
|
IpAddress: tss.beIPs[1],
|
|
Port: int32(tss.bePorts[1]),
|
|
LoadBalanceToken: lbToken,
|
|
}, {
|
|
IpAddress: tss.beIPs[2],
|
|
Port: int32(tss.bePorts[2]),
|
|
LoadBalanceToken: lbToken,
|
|
}}
|
|
portsToIndex := make(map[int]int)
|
|
for i := range beServers {
|
|
portsToIndex[tss.bePorts[i]] = i
|
|
}
|
|
|
|
creds := serverNameCheckCreds{
|
|
expected: beServerName,
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
var p peer.Peer
|
|
|
|
portPicked1 := 0
|
|
tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:2]}
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
|
}
|
|
if portPicked1 == 0 {
|
|
portPicked1 = p.Addr.(*net.TCPAddr).Port
|
|
continue
|
|
}
|
|
if portPicked1 != p.Addr.(*net.TCPAddr).Port {
|
|
t.Fatalf("Different backends are picked for RPCs: %v vs %v", portPicked1, p.Addr.(*net.TCPAddr).Port)
|
|
}
|
|
}
|
|
|
|
portPicked2 := portPicked1
|
|
tss.ls.sls <- &lbpb.ServerList{Servers: beServers[:1]}
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
|
}
|
|
if portPicked2 == portPicked1 {
|
|
portPicked2 = p.Addr.(*net.TCPAddr).Port
|
|
continue
|
|
}
|
|
if portPicked2 != p.Addr.(*net.TCPAddr).Port {
|
|
t.Fatalf("Different backends are picked for RPCs: %v vs %v", portPicked2, p.Addr.(*net.TCPAddr).Port)
|
|
}
|
|
}
|
|
|
|
portPicked := portPicked2
|
|
tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:]}
|
|
for i := 0; i < 1000; i++ {
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil {
|
|
t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, <nil>", err)
|
|
}
|
|
if portPicked == portPicked2 {
|
|
portPicked = p.Addr.(*net.TCPAddr).Port
|
|
continue
|
|
}
|
|
if portPicked != p.Addr.(*net.TCPAddr).Port {
|
|
t.Fatalf("Different backends are picked for RPCs: %v vs %v", portPicked, p.Addr.(*net.TCPAddr).Port)
|
|
}
|
|
}
|
|
}
|
|
|
|
type failPreRPCCred struct{}
|
|
|
|
func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
|
if strings.Contains(uri[0], failtosendURI) {
|
|
return nil, fmt.Errorf("rpc should fail to send")
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
func (failPreRPCCred) RequireTransportSecurity() bool {
|
|
return false
|
|
}
|
|
|
|
func checkStats(stats, expected *rpcStats) error {
|
|
if !stats.equal(expected) {
|
|
return fmt.Errorf("stats not equal: got %+v, want %+v", stats, expected)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func runAndGetStats(t *testing.T, drop bool, runRPCs func(*grpc.ClientConn)) *rpcStats {
|
|
defer leakcheck.Check(t)
|
|
|
|
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
|
defer cleanup()
|
|
|
|
tss, cleanup, err := newLoadBalancer(1)
|
|
if err != nil {
|
|
t.Fatalf("failed to create new load balancer: %v", err)
|
|
}
|
|
defer cleanup()
|
|
servers := []*lbpb.Server{{
|
|
IpAddress: tss.beIPs[0],
|
|
Port: int32(tss.bePorts[0]),
|
|
LoadBalanceToken: lbToken,
|
|
}}
|
|
if drop {
|
|
servers = append(servers, &lbpb.Server{
|
|
LoadBalanceToken: lbToken,
|
|
Drop: drop,
|
|
})
|
|
}
|
|
tss.ls.sls <- &lbpb.ServerList{Servers: servers}
|
|
tss.ls.statsDura = 100 * time.Millisecond
|
|
creds := serverNameCheckCreds{expected: beServerName}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
|
grpc.WithTransportCredentials(&creds),
|
|
grpc.WithPerRPCCredentials(failPreRPCCred{}),
|
|
grpc.WithContextDialer(fakeNameDialer))
|
|
if err != nil {
|
|
t.Fatalf("Failed to dial to the backend %v", err)
|
|
}
|
|
defer cc.Close()
|
|
|
|
r.UpdateState(resolver.State{Addresses: []resolver.Address{{
|
|
Addr: tss.lbAddr,
|
|
Type: resolver.GRPCLB,
|
|
ServerName: lbServerName,
|
|
}}})
|
|
|
|
runRPCs(cc)
|
|
time.Sleep(1 * time.Second)
|
|
stats := tss.ls.stats
|
|
return stats
|
|
}
|
|
|
|
const (
|
|
countRPC = 40
|
|
failtosendURI = "failtosend"
|
|
)
|
|
|
|
func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, false, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
testC.EmptyCall(context.Background(), &testpb.Empty{})
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedKnownReceived: int64(countRPC),
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestGRPCLBStatsUnaryDrop(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, true, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
testC.EmptyCall(context.Background(), &testpb.Empty{})
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedKnownReceived: int64(countRPC) / 2,
|
|
numCallsDropped: map[string]int64{lbToken: int64(countRPC) / 2},
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, false, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil {
|
|
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
cc.Invoke(context.Background(), failtosendURI, &testpb.Empty{}, nil)
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedWithClientFailedToSend: int64(countRPC - 1),
|
|
numCallsFinishedKnownReceived: 1,
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, false, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true))
|
|
if err != nil {
|
|
t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for {
|
|
if _, err = stream.Recv(); err == io.EOF {
|
|
break
|
|
}
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
stream, err = testC.FullDuplexCall(context.Background())
|
|
if err == nil {
|
|
// Wait for stream to end if err is nil.
|
|
for {
|
|
if _, err = stream.Recv(); err == io.EOF {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedKnownReceived: int64(countRPC),
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestGRPCLBStatsStreamingDrop(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, true, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true))
|
|
if err != nil {
|
|
t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for {
|
|
if _, err = stream.Recv(); err == io.EOF {
|
|
break
|
|
}
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
stream, err = testC.FullDuplexCall(context.Background())
|
|
if err == nil {
|
|
// Wait for stream to end if err is nil.
|
|
for {
|
|
if _, err = stream.Recv(); err == io.EOF {
|
|
break
|
|
}
|
|
}
|
|
}
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedKnownReceived: int64(countRPC) / 2,
|
|
numCallsDropped: map[string]int64{lbToken: int64(countRPC) / 2},
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
|
defer leakcheck.Check(t)
|
|
stats := runAndGetStats(t, false, func(cc *grpc.ClientConn) {
|
|
testC := testpb.NewTestServiceClient(cc)
|
|
// The first non-failfast RPC succeeds, all connections are up.
|
|
stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true))
|
|
if err != nil {
|
|
t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, <nil>", testC, err)
|
|
}
|
|
for {
|
|
if _, err = stream.Recv(); err == io.EOF {
|
|
break
|
|
}
|
|
}
|
|
for i := 0; i < countRPC-1; i++ {
|
|
cc.NewStream(context.Background(), &grpc.StreamDesc{}, failtosendURI)
|
|
}
|
|
})
|
|
|
|
if err := checkStats(stats, &rpcStats{
|
|
numCallsStarted: int64(countRPC),
|
|
numCallsFinished: int64(countRPC),
|
|
numCallsFinishedWithClientFailedToSend: int64(countRPC - 1),
|
|
numCallsFinishedKnownReceived: 1,
|
|
}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|