all: rename ReadAt* to MustReadAt* in order to dont clash with io.ReaderAt

This commit is contained in:
Aliaksandr Valialkin 2020-01-30 13:22:15 +02:00
parent d68546aa4a
commit ad8af629bb
7 changed files with 34 additions and 34 deletions

View file

@ -11,9 +11,9 @@ import (
var ( var (
// Verify ByteBuffer implements the given interfaces. // Verify ByteBuffer implements the given interfaces.
_ io.Writer = &ByteBuffer{} _ io.Writer = &ByteBuffer{}
_ fs.ReadAtCloser = &ByteBuffer{} _ fs.MustReadAtCloser = &ByteBuffer{}
_ io.ReaderFrom = &ByteBuffer{} _ io.ReaderFrom = &ByteBuffer{}
// Verify reader implement filestream.ReadCloser interface. // Verify reader implement filestream.ReadCloser interface.
_ filestream.ReadCloser = &reader{} _ filestream.ReadCloser = &reader{}
@ -36,8 +36,8 @@ func (bb *ByteBuffer) Write(p []byte) (int, error) {
return len(p), nil return len(p), nil
} }
// ReadAt reads len(p) bytes starting from the given offset. // MustReadAt reads len(p) bytes starting from the given offset.
func (bb *ByteBuffer) ReadAt(p []byte, offset int64) { func (bb *ByteBuffer) MustReadAt(p []byte, offset int64) {
if offset < 0 { if offset < 0 {
logger.Panicf("BUG: cannot read at negative offset=%d", offset) logger.Panicf("BUG: cannot read at negative offset=%d", offset)
} }

View file

@ -218,7 +218,7 @@ func TestByteBufferRead(t *testing.T) {
} }
} }
func TestByteBufferReadAt(t *testing.T) { func TestByteBufferMustReadAt(t *testing.T) {
testStr := "foobar baz" testStr := "foobar baz"
var bb ByteBuffer var bb ByteBuffer
@ -232,7 +232,7 @@ func TestByteBufferReadAt(t *testing.T) {
t.Fatalf("expecting non-nil error when reading at negative offset") t.Fatalf("expecting non-nil error when reading at negative offset")
} }
}() }()
bb.ReadAt(p, -1) bb.MustReadAt(p, -1)
}() }()
// Try reading past the end of buffer // Try reading past the end of buffer
@ -242,18 +242,18 @@ func TestByteBufferReadAt(t *testing.T) {
t.Fatalf("expecting non-nil error when reading past the end of buffer") t.Fatalf("expecting non-nil error when reading past the end of buffer")
} }
}() }()
bb.ReadAt(p, int64(len(testStr))+1) bb.MustReadAt(p, int64(len(testStr))+1)
}() }()
// Try reading the first byte // Try reading the first byte
n := len(p) n := len(p)
bb.ReadAt(p, 0) bb.MustReadAt(p, 0)
if string(p) != testStr[:n] { if string(p) != testStr[:n] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[:n]) t.Fatalf("unexpected value read: %q; want %q", p, testStr[:n])
} }
// Try reading the last byte // Try reading the last byte
bb.ReadAt(p, int64(len(testStr))-1) bb.MustReadAt(p, int64(len(testStr))-1)
if string(p) != testStr[len(testStr)-1:] { if string(p) != testStr[len(testStr)-1:] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[len(testStr)-1:]) t.Fatalf("unexpected value read: %q; want %q", p, testStr[len(testStr)-1:])
} }
@ -266,18 +266,18 @@ func TestByteBufferReadAt(t *testing.T) {
} }
}() }()
p := make([]byte, 10) p := make([]byte, 10)
bb.ReadAt(p, int64(len(testStr))-3) bb.MustReadAt(p, int64(len(testStr))-3)
}() }()
// Try reading multiple bytes from the middle // Try reading multiple bytes from the middle
p = make([]byte, 3) p = make([]byte, 3)
bb.ReadAt(p, 2) bb.MustReadAt(p, 2)
if string(p) != testStr[2:2+len(p)] { if string(p) != testStr[2:2+len(p)] {
t.Fatalf("unexpected value read: %q; want %q", p, testStr[2:2+len(p)]) t.Fatalf("unexpected value read: %q; want %q", p, testStr[2:2+len(p)])
} }
} }
func TestByteBufferReadAtParallel(t *testing.T) { func TestByteBufferMustReadAtParallel(t *testing.T) {
ch := make(chan error, 10) ch := make(chan error, 10)
var bb ByteBuffer var bb ByteBuffer
bb.B = []byte("foo bar baz adsf adsf dsakjlkjlkj2l34324") bb.B = []byte("foo bar baz adsf adsf dsakjlkjlkj2l34324")
@ -285,7 +285,7 @@ func TestByteBufferReadAtParallel(t *testing.T) {
go func() { go func() {
p := make([]byte, 3) p := make([]byte, 3)
for i := 0; i < len(bb.B)-len(p); i++ { for i := 0; i < len(bb.B)-len(p); i++ {
bb.ReadAt(p, int64(i)) bb.MustReadAt(p, int64(i))
} }
ch <- nil ch <- nil
}() }()

View file

@ -14,10 +14,10 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// ReadAtCloser is rand-access read interface. // MustReadAtCloser is rand-access read interface.
type ReadAtCloser interface { type MustReadAtCloser interface {
// ReadAt must read len(p) bytes from offset off to p. // MustReadAt must read len(p) bytes from offset off to p.
ReadAt(p []byte, off int64) MustReadAt(p []byte, off int64)
// MustClose must close the reader. // MustClose must close the reader.
MustClose() MustClose()
@ -28,8 +28,8 @@ type ReaderAt struct {
f *os.File f *os.File
} }
// ReadAt reads len(p) bytes from off to p. // MustReadAt reads len(p) bytes from off to p.
func (ra *ReaderAt) ReadAt(p []byte, off int64) { func (ra *ReaderAt) MustReadAt(p []byte, off int64) {
if len(p) == 0 { if len(p) == 0 {
return return
} }

View file

@ -53,9 +53,9 @@ type part struct {
mrs []metaindexRow mrs []metaindexRow
indexFile fs.ReadAtCloser indexFile fs.MustReadAtCloser
itemsFile fs.ReadAtCloser itemsFile fs.MustReadAtCloser
lensFile fs.ReadAtCloser lensFile fs.MustReadAtCloser
idxbCache *indexBlockCache idxbCache *indexBlockCache
ibCache *inmemoryBlockCache ibCache *inmemoryBlockCache
@ -107,7 +107,7 @@ func openFilePart(path string) (*part, error) {
return newPart(&ph, path, size, metaindexFile, indexFile, itemsFile, lensFile) return newPart(&ph, path, size, metaindexFile, indexFile, itemsFile, lensFile)
} }
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, indexFile, itemsFile, lensFile fs.ReadAtCloser) (*part, error) { func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, indexFile, itemsFile, lensFile fs.MustReadAtCloser) (*part, error) {
var errors []error var errors []error
mrs, err := unmarshalMetaindexRows(nil, metaindexReader) mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
if err != nil { if err != nil {

View file

@ -311,7 +311,7 @@ func (ps *partSearch) getIndexBlock(mr *metaindexRow) (*indexBlock, bool, error)
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf, int(mr.indexBlockSize)) ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf, int(mr.indexBlockSize))
ps.p.indexFile.ReadAt(ps.compressedIndexBuf, int64(mr.indexBlockOffset)) ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.indexBlockOffset))
var err error var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf) ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
@ -355,10 +355,10 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
ps.sb.Reset() ps.sb.Reset()
ps.sb.itemsData = bytesutil.Resize(ps.sb.itemsData, int(bh.itemsBlockSize)) ps.sb.itemsData = bytesutil.Resize(ps.sb.itemsData, int(bh.itemsBlockSize))
ps.p.itemsFile.ReadAt(ps.sb.itemsData, int64(bh.itemsBlockOffset)) ps.p.itemsFile.MustReadAt(ps.sb.itemsData, int64(bh.itemsBlockOffset))
ps.sb.lensData = bytesutil.Resize(ps.sb.lensData, int(bh.lensBlockSize)) ps.sb.lensData = bytesutil.Resize(ps.sb.lensData, int(bh.lensBlockSize))
ps.p.lensFile.ReadAt(ps.sb.lensData, int64(bh.lensBlockOffset)) ps.p.lensFile.MustReadAt(ps.sb.lensData, int64(bh.lensBlockOffset))
ib := getInmemoryBlock() ib := getInmemoryBlock()
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil { if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {

View file

@ -40,9 +40,9 @@ type part struct {
// Total size in bytes of part data. // Total size in bytes of part data.
size uint64 size uint64
timestampsFile fs.ReadAtCloser timestampsFile fs.MustReadAtCloser
valuesFile fs.ReadAtCloser valuesFile fs.MustReadAtCloser
indexFile fs.ReadAtCloser indexFile fs.MustReadAtCloser
metaindex []metaindexRow metaindex []metaindexRow
@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) {
// //
// The returned part calls MustClose on all the files passed to newPart // The returned part calls MustClose on all the files passed to newPart
// when calling part.MustClose. // when calling part.MustClose.
func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, timestampsFile, valuesFile, indexFile fs.ReadAtCloser) (*part, error) { func newPart(ph *partHeader, path string, size uint64, metaindexReader filestream.ReadCloser, timestampsFile, valuesFile, indexFile fs.MustReadAtCloser) (*part, error) {
var errors []error var errors []error
metaindex, err := unmarshalMetaindexRows(nil, metaindexReader) metaindex, err := unmarshalMetaindexRows(nil, metaindexReader)
if err != nil { if err != nil {

View file

@ -229,7 +229,7 @@ func skipSmallMetaindexRows(metaindex []metaindexRow, tsid *TSID) []metaindexRow
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf[:0], int(mr.IndexBlockSize)) ps.compressedIndexBuf = bytesutil.Resize(ps.compressedIndexBuf[:0], int(mr.IndexBlockSize))
ps.p.indexFile.ReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset)) ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset))
var err error var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf) ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
@ -302,8 +302,8 @@ func (ps *partSearch) readBlock(bh *blockHeader) {
} }
ps.Block.timestampsData = bytesutil.Resize(ps.Block.timestampsData[:0], int(bh.TimestampsBlockSize)) ps.Block.timestampsData = bytesutil.Resize(ps.Block.timestampsData[:0], int(bh.TimestampsBlockSize))
ps.p.timestampsFile.ReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset)) ps.p.timestampsFile.MustReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset))
ps.Block.valuesData = bytesutil.Resize(ps.Block.valuesData[:0], int(bh.ValuesBlockSize)) ps.Block.valuesData = bytesutil.Resize(ps.Block.valuesData[:0], int(bh.ValuesBlockSize))
ps.p.valuesFile.ReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset)) ps.p.valuesFile.MustReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset))
} }