From 54fb8b21f9d64a29dd21985f668adabd9c977bf6 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 25 May 2019 21:51:11 +0300 Subject: [PATCH] all: fix misspellings --- app/vmselect/promql/eval.go | 2 +- lib/bytesutil/bytebuffer.go | 2 +- lib/encoding/encoding.go | 2 +- lib/mergeset/table.go | 2 +- lib/mergeset/table_search_timing_test.go | 3 +++ lib/storage/index_db.go | 2 +- lib/storage/partition.go | 2 +- lib/storage/raw_row.go | 2 +- lib/storage/storage.go | 4 ++-- 9 files changed, 12 insertions(+), 9 deletions(-) diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index 7a26d81f1d..952e8d5724 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -19,7 +19,7 @@ var ( maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 10e3, "The maximum points per a single timeseries returned from the search") ) -// The minumum number of points per timeseries for enabling time rounding. +// The minimum number of points per timeseries for enabling time rounding. // This improves cache hit ratio for frequently requested queries over // big time ranges. const minTimeseriesPointsForTimeRounding = 50 diff --git a/lib/bytesutil/bytebuffer.go b/lib/bytesutil/bytebuffer.go index 6ae31c5b1e..4c294d5f29 100644 --- a/lib/bytesutil/bytebuffer.go +++ b/lib/bytesutil/bytebuffer.go @@ -44,7 +44,7 @@ func (bb *ByteBuffer) ReadAt(p []byte, offset int64) { logger.Panicf("BUG: too big offset=%d; cannot exceed len(bb.B)=%d", offset, len(bb.B)) } if n := copy(p, bb.B[offset:]); n < len(p) { - logger.Panicf("BUG: EOF occured after reading %d bytes out of %d bytes at offset %d", n, len(p), offset) + logger.Panicf("BUG: EOF occurred after reading %d bytes out of %d bytes at offset %d", n, len(p), offset) } } diff --git a/lib/encoding/encoding.go b/lib/encoding/encoding.go index 5cb9347f4d..3b44c89818 100644 --- a/lib/encoding/encoding.go +++ b/lib/encoding/encoding.go @@ -117,7 +117,7 @@ func marshalInt64Array(dst []byte, a []int64, precisionBits uint8) (result []byt bb := bbPool.Get() if isGauge(a) { - // Guage values are better compressed with delta encoding. + // Gauge values are better compressed with delta encoding. mt = MarshalTypeZSTDNearestDelta pb := precisionBits if pb < 6 { diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index ad5142236a..3450acaba8 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -208,7 +208,7 @@ func (tb *Table) MustClose() { logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), tb.path) // Remove references to parts from the tb, so they may be eventually closed - // after all the seraches are done. + // after all the searches are done. tb.partsLock.Lock() parts := tb.parts tb.parts = nil diff --git a/lib/mergeset/table_search_timing_test.go b/lib/mergeset/table_search_timing_test.go index 0ab2514d1a..ddafc9728c 100644 --- a/lib/mergeset/table_search_timing_test.go +++ b/lib/mergeset/table_search_timing_test.go @@ -33,6 +33,9 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) { // Force finishing pending merges tb.MustClose() tb, err = OpenTable(path) + if err != nil { + b.Fatalf("unexpected error when re-opening table %q: %s", path, err) + } defer tb.MustClose() keys := make([][]byte, len(items)) diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index c1150a1e11..d6aa7e5146 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -945,7 +945,7 @@ func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error { if len(dmis) > 0 { // Verify whether the dst is marked as deleted. if _, deleted := dmis[dst.MetricID]; deleted { - // The dst is deleted. Continue seraching. + // The dst is deleted. Continue searching. continue } } diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 181c734c72..8dc827d060 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -599,7 +599,7 @@ func (pt *partition) MustClose() { logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), pt.smallPartsPath) // Remove references to smallParts from the pt, so they may be eventually closed - // after all the seraches are done. + // after all the searches are done. pt.partsLock.Lock() smallParts := pt.smallParts pt.smallParts = nil diff --git a/lib/storage/raw_row.go b/lib/storage/raw_row.go index 3d58c8e5e1..c738e29f4f 100644 --- a/lib/storage/raw_row.go +++ b/lib/storage/raw_row.go @@ -19,7 +19,7 @@ type rawRow struct { // Value is time series value for the given timestamp. Value float64 - // PrecisionBits is the number of the siginificant bits in the Value + // PrecisionBits is the number of the significant bits in the Value // to store. Possible values are [1..64]. // 1 means max. 50% error, 2 - 25%, 3 - 12.5%, 64 means no error, i.e. // Value stored without information loss. diff --git a/lib/storage/storage.go b/lib/storage/storage.go index b68d5db56b..394191fb64 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -30,7 +30,7 @@ type Storage struct { cachePath string retentionMonths int - // lock file for excluse access to the storage on the given path. + // lock file for exclusive access to the storage on the given path. flockF *os.File idbCurr atomic.Value @@ -656,7 +656,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra errors = s.updateDateMetricIDCache(rows, errors) if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - return rows, fmt.Errorf("errors occured during rows addition: %s", errors[0]) + return rows, fmt.Errorf("errors occurred during rows addition: %s", errors[0]) } return rows, nil }