From f89a049907cda52cb975777e7032365ded1f7a96 Mon Sep 17 00:00:00 2001 From: Knut Ahlers Date: Sun, 27 Jan 2019 15:48:43 +0100 Subject: [PATCH] Lint: Fix linter errors, improve error handling Signed-off-by: Knut Ahlers --- influx.go | 14 ++++++-------- main.go | 2 +- ping.go | 10 +++++++--- spark.go | 20 ++++++++++++-------- throughput.go | 25 ++++++++++++++----------- 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/influx.go b/influx.go index f5c1ad9..33edcab 100644 --- a/influx.go +++ b/influx.go @@ -21,7 +21,7 @@ type metricsSender struct { influxDB string } -func NewMetricsSender(influxHost, influxUser, influxPass, influxDatabase string) (*metricsSender, error) { +func newMetricsSender(influxHost, influxUser, influxPass, influxDatabase string) (*metricsSender, error) { out := &metricsSender{ errs: make(chan error, 10), influxDB: influxDatabase, @@ -40,7 +40,7 @@ func (m *metricsSender) ForceTransmit() error { func (m *metricsSender) RecordPoint(name string, tags map[string]string, fields map[string]interface{}) error { pt, err := influx.NewPoint(name, tags, fields, time.Now()) if err != nil { - return err + return errors.Wrap(err, "Unable to create point") } m.batchLock.Lock() @@ -56,7 +56,7 @@ func (m *metricsSender) resetBatch() error { }) if err != nil { - return err + return errors.Wrap(err, "Unable to create new points batch") } m.batch = b @@ -80,9 +80,7 @@ func (m *metricsSender) transmit() error { if err := m.client.Write(m.batch); err != nil { return errors.Wrap(err, "Unable to write recorded points") } - m.resetBatch() - - return nil + return errors.Wrap(m.resetBatch(), "Unable to reset batch") } func (m *metricsSender) initialize(influxHost, influxUser, influxPass string) error { @@ -94,12 +92,12 @@ func (m *metricsSender) initialize(influxHost, influxUser, influxPass string) er }) if err != nil { - return err + return errors.Wrap(err, "Unable to create InfluxDB HTTP client") } m.client = influxClient if err := m.resetBatch(); err != nil { - return err + return errors.Wrap(err, "Unable to reset batch") } go m.sendLoop() diff --git a/main.go b/main.go index de23eba..aa682a0 100644 --- a/main.go +++ b/main.go @@ -53,7 +53,7 @@ func main() { var err error if cfg.InfluxDB != "" { - if metrics, err = NewMetricsSender(cfg.InfluxHost, cfg.InfluxUser, cfg.InfluxPass, cfg.InfluxDB); err != nil { + if metrics, err = newMetricsSender(cfg.InfluxHost, cfg.InfluxUser, cfg.InfluxPass, cfg.InfluxDB); err != nil { log.WithError(err).Fatalf("Unable to initialize InfluxDB sender") } } diff --git a/ping.go b/ping.go index e820233..159cebe 100644 --- a/ping.go +++ b/ping.go @@ -4,6 +4,8 @@ import ( "math" "sort" "time" + + "github.com/pkg/errors" ) type pingHistory []int64 @@ -12,18 +14,20 @@ func (s *sparkClient) ExecutePingTest(t *testResult) error { ph := pingHistory{} if err := s.connect(); err != nil { - return err + return errors.Wrap(err, "Unable to connect") } if err := s.writeCommand("ECO"); err != nil { - return err + return errors.Wrap(err, "Unable to send ECO command") } buf := make([]byte, 1) for i := 0; i < numPings; i++ { start := time.Now() - s.conn.Write([]byte{46}) + if _, err := s.conn.Write([]byte{46}); err != nil { + return err + } if _, err := s.conn.Read(buf); err != nil { return err diff --git a/spark.go b/spark.go index 46ef37c..5192853 100644 --- a/spark.go +++ b/spark.go @@ -7,6 +7,7 @@ import ( "net" "strings" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -74,7 +75,7 @@ func newSparkClient(hostname string, port int) *sparkClient { func (s *sparkClient) dial() error { c, err := net.Dial("tcp", s.remote) if err != nil { - return err + return errors.Wrap(err, "Unable to dial") } s.conn = c @@ -83,13 +84,13 @@ func (s *sparkClient) dial() error { func (s *sparkClient) connect() error { if err := s.dial(); err != nil { - return fmt.Errorf("Unable to connect to sparkyfish-server %q: %s", s.remote, err) + return errors.Wrapf(err, "Unable to connect to sparkyfish-server %q", s.remote) } s.reader = bufio.NewReader(s.conn) if err := s.writeCommand(fmt.Sprintf("HELO%d", protocolVersion)); err != nil { - return err + return errors.Wrap(err, "Unable to send HELO command") } return s.readGreeting() @@ -97,19 +98,19 @@ func (s *sparkClient) connect() error { func (s *sparkClient) writeCommand(command string) error { if _, err := fmt.Fprintf(s.conn, "%s\r\n", command); err != nil { - return fmt.Errorf("Unable to send command %q: %s", command, err) + return errors.Wrapf(err, "Unable to send command %q", command) } return nil } func (s *sparkClient) readGreeting() error { if helo, err := s.reader.ReadString('\n'); err != nil || strings.TrimSpace(helo) != "HELO" { - return fmt.Errorf("Unexpected response to greeting") + return errors.New("Unexpected response to greeting") } cn, err := s.reader.ReadString('\n') if err != nil { - return err + return errors.Wrap(err, "Unable to read string") } cn = strings.TrimSpace(cn) @@ -119,11 +120,14 @@ func (s *sparkClient) readGreeting() error { loc, err := s.reader.ReadString('\n') if err != nil { - return err + return errors.Wrap(err, "Unable to read string") } loc = strings.TrimSpace(loc) - log.Debugf("Connected to %q in location %q", cn, loc) + log.WithFields(log.Fields{ + "cn": cn, + "location": loc, + }).Debug("Connected to server") return nil } diff --git a/throughput.go b/throughput.go index e782321..69bd23f 100644 --- a/throughput.go +++ b/throughput.go @@ -3,36 +3,37 @@ package main import ( "bytes" "crypto/rand" - "fmt" "io" "io/ioutil" "log" "net" "syscall" "time" + + "github.com/pkg/errors" ) func (s *sparkClient) ExecuteThroughputTest(t *testResult) error { if err := s.runSendTest(t); err != nil { - return err + return errors.Wrap(err, "Send-test failed") } - return s.runRecvTest(t) + return errors.Wrap(s.runRecvTest(t), "Recv-test failed") } func (s *sparkClient) runSendTest(t *testResult) error { data := make([]byte, 1024*blockSize) if _, err := rand.Read(data); err != nil { - return fmt.Errorf("Was unable to gather random data: %s", err) + return errors.Wrap(err, "Unable to gather random data") } dataReader := bytes.NewReader(data) if err := s.connect(); err != nil { - return err + return errors.Wrap(err, "Unable to connect") } defer s.conn.Close() if err := s.writeCommand("RCV"); err != nil { - return err + return errors.Wrap(err, "Unable to send RCV command") } var ( @@ -57,7 +58,7 @@ func (s *sparkClient) runSendTest(t *testResult) error { break } - return fmt.Errorf("Error copying: %s", err) + return errors.Wrap(err, "Unable to copy data") } bps := float64(1024*blockSize*8) / (float64(time.Since(start).Nanoseconds()) / float64(time.Second.Nanoseconds())) @@ -69,7 +70,9 @@ func (s *sparkClient) runSendTest(t *testResult) error { } blockCount++ - dataReader.Seek(0, 0) + if _, err := dataReader.Seek(0, 0); err != nil { + return errors.Wrap(err, "Unable to seek") + } if time.Since(totalStart) > time.Duration(throughputTestLength)*time.Second { break @@ -84,12 +87,12 @@ func (s *sparkClient) runSendTest(t *testResult) error { func (s *sparkClient) runRecvTest(t *testResult) error { if err := s.connect(); err != nil { - return err + return errors.Wrap(err, "Unable to connect") } defer s.conn.Close() if err := s.writeCommand("SND"); err != nil { - return err + return errors.Wrap(err, "Unable to send SND command") } var ( @@ -111,7 +114,7 @@ func (s *sparkClient) runRecvTest(t *testResult) error { break } - return fmt.Errorf("Error copying: %s", err) + return errors.Wrap(err, "Unable to copy data") } bps := float64(1024*blockSize*8) / (float64(time.Since(start).Nanoseconds()) / float64(time.Second.Nanoseconds()))