all: fix misspellings

This commit is contained in:
Aliaksandr Valialkin 2019-05-25 21:51:11 +03:00
parent d6523ffe90
commit 54fb8b21f9
9 changed files with 12 additions and 9 deletions

View file

@ -19,7 +19,7 @@ var (
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 10e3, "The maximum points per a single timeseries returned from the search")
)
// The minumum number of points per timeseries for enabling time rounding.
// The minimum number of points per timeseries for enabling time rounding.
// This improves cache hit ratio for frequently requested queries over
// big time ranges.
const minTimeseriesPointsForTimeRounding = 50

View file

@ -44,7 +44,7 @@ func (bb *ByteBuffer) ReadAt(p []byte, offset int64) {
logger.Panicf("BUG: too big offset=%d; cannot exceed len(bb.B)=%d", offset, len(bb.B))
}
if n := copy(p, bb.B[offset:]); n < len(p) {
logger.Panicf("BUG: EOF occured after reading %d bytes out of %d bytes at offset %d", n, len(p), offset)
logger.Panicf("BUG: EOF occurred after reading %d bytes out of %d bytes at offset %d", n, len(p), offset)
}
}

View file

@ -117,7 +117,7 @@ func marshalInt64Array(dst []byte, a []int64, precisionBits uint8) (result []byt
bb := bbPool.Get()
if isGauge(a) {
// Guage values are better compressed with delta encoding.
// Gauge values are better compressed with delta encoding.
mt = MarshalTypeZSTDNearestDelta
pb := precisionBits
if pb < 6 {

View file

@ -208,7 +208,7 @@ func (tb *Table) MustClose() {
logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), tb.path)
// Remove references to parts from the tb, so they may be eventually closed
// after all the seraches are done.
// after all the searches are done.
tb.partsLock.Lock()
parts := tb.parts
tb.parts = nil

View file

@ -33,6 +33,9 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) {
// Force finishing pending merges
tb.MustClose()
tb, err = OpenTable(path)
if err != nil {
b.Fatalf("unexpected error when re-opening table %q: %s", path, err)
}
defer tb.MustClose()
keys := make([][]byte, len(items))

View file

@ -945,7 +945,7 @@ func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error {
if len(dmis) > 0 {
// Verify whether the dst is marked as deleted.
if _, deleted := dmis[dst.MetricID]; deleted {
// The dst is deleted. Continue seraching.
// The dst is deleted. Continue searching.
continue
}
}

View file

@ -599,7 +599,7 @@ func (pt *partition) MustClose() {
logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), pt.smallPartsPath)
// Remove references to smallParts from the pt, so they may be eventually closed
// after all the seraches are done.
// after all the searches are done.
pt.partsLock.Lock()
smallParts := pt.smallParts
pt.smallParts = nil

View file

@ -19,7 +19,7 @@ type rawRow struct {
// Value is time series value for the given timestamp.
Value float64
// PrecisionBits is the number of the siginificant bits in the Value
// PrecisionBits is the number of the significant bits in the Value
// to store. Possible values are [1..64].
// 1 means max. 50% error, 2 - 25%, 3 - 12.5%, 64 means no error, i.e.
// Value stored without information loss.

View file

@ -30,7 +30,7 @@ type Storage struct {
cachePath string
retentionMonths int
// lock file for excluse access to the storage on the given path.
// lock file for exclusive access to the storage on the given path.
flockF *os.File
idbCurr atomic.Value
@ -656,7 +656,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
errors = s.updateDateMetricIDCache(rows, errors)
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
return rows, fmt.Errorf("errors occured during rows addition: %s", errors[0])
return rows, fmt.Errorf("errors occurred during rows addition: %s", errors[0])
}
return rows, nil
}