app/vmagent: do not allow sending unpacked requests with sizes exceeding -maxInsertRequestSize

This commit is contained in:
Aliaksandr Valialkin 2020-02-25 19:34:35 +02:00
parent 6386aeb1e0
commit 4e24839a2c
2 changed files with 6 additions and 6 deletions

View file

@ -160,17 +160,17 @@ func pushWriteRequest(wr *prompbmarshal.WriteRequest, pushBlock func(block []byt
} }
bb := writeRequestBufPool.Get() bb := writeRequestBufPool.Get()
bb.B = prompbmarshal.MarshalWriteRequest(bb.B[:0], wr) bb.B = prompbmarshal.MarshalWriteRequest(bb.B[:0], wr)
zb := snappyBufPool.Get() if len(bb.B) <= persistentqueue.MaxBlockSize {
zb.B = snappy.Encode(zb.B[:cap(zb.B)], bb.B) zb := snappyBufPool.Get()
writeRequestBufPool.Put(bb) zb.B = snappy.Encode(zb.B[:cap(zb.B)], bb.B)
if len(zb.B) <= persistentqueue.MaxBlockSize { writeRequestBufPool.Put(bb)
pushBlock(zb.B) pushBlock(zb.B)
blockSizeRows.Update(float64(len(wr.Timeseries))) blockSizeRows.Update(float64(len(wr.Timeseries)))
blockSizeBytes.Update(float64(len(zb.B))) blockSizeBytes.Update(float64(len(zb.B)))
snappyBufPool.Put(zb) snappyBufPool.Put(zb)
return return
} }
snappyBufPool.Put(zb) writeRequestBufPool.Put(bb)
// Too big block. Recursively split it into smaller parts. // Too big block. Recursively split it into smaller parts.
timeseries := wr.Timeseries timeseries := wr.Timeseries

View file

@ -112,7 +112,7 @@ func readSnappy(dst []byte, r io.Reader) ([]byte, error) {
return dst, err return dst, err
} }
if len(buf) > *maxInsertRequestSize { if len(buf) > *maxInsertRequestSize {
return dst, fmt.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes", *maxInsertRequestSize) return dst, fmt.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", *maxInsertRequestSize, len(buf))
} }
if len(buf) > 0 && len(dst) < cap(dst) && &buf[0] == &dst[len(dst):cap(dst)][0] { if len(buf) > 0 && len(dst) < cap(dst) && &buf[0] == &dst[len(dst):cap(dst)][0] {
dst = dst[:len(dst)+len(buf)] dst = dst[:len(dst)+len(buf)]