2019-05-22 21:16:55 +00:00
|
|
|
package netstorage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
2023-04-14 05:11:56 +00:00
|
|
|
"path/filepath"
|
2019-05-22 21:16:55 +00:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2019-09-03 09:27:21 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
2019-05-22 21:16:55 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
// InitTmpBlocksDir initializes directory to store temporary search results.
|
|
|
|
//
|
|
|
|
// It stores data in system-defined temporary directory if tmpDirPath is empty.
|
|
|
|
func InitTmpBlocksDir(tmpDirPath string) {
|
|
|
|
if len(tmpDirPath) == 0 {
|
|
|
|
tmpDirPath = os.TempDir()
|
|
|
|
}
|
2023-04-14 05:11:56 +00:00
|
|
|
tmpBlocksDir = filepath.Join(tmpDirPath, "searchResults")
|
2019-06-11 22:53:43 +00:00
|
|
|
fs.MustRemoveAll(tmpBlocksDir)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(tmpBlocksDir)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var tmpBlocksDir string
|
|
|
|
|
2019-09-03 09:27:21 +00:00
|
|
|
func maxInmemoryTmpBlocksFile() int {
|
|
|
|
mem := memory.Allowed()
|
|
|
|
maxLen := mem / 1024
|
|
|
|
if maxLen < 64*1024 {
|
|
|
|
return 64 * 1024
|
|
|
|
}
|
2020-01-17 14:27:16 +00:00
|
|
|
if maxLen > 4*1024*1024 {
|
2020-01-17 15:46:20 +00:00
|
|
|
return 4 * 1024 * 1024
|
2020-01-17 14:27:16 +00:00
|
|
|
}
|
2019-09-03 09:27:21 +00:00
|
|
|
return maxLen
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ = metrics.NewGauge(`vm_tmp_blocks_max_inmemory_file_size_bytes`, func() float64 {
|
|
|
|
return float64(maxInmemoryTmpBlocksFile())
|
|
|
|
})
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
type tmpBlocksFile struct {
|
|
|
|
buf []byte
|
|
|
|
|
2019-09-03 09:27:21 +00:00
|
|
|
f *os.File
|
2020-01-30 13:03:24 +00:00
|
|
|
r *fs.ReaderAt
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
offset uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func getTmpBlocksFile() *tmpBlocksFile {
|
|
|
|
v := tmpBlocksFilePool.Get()
|
|
|
|
if v == nil {
|
2019-09-03 09:27:21 +00:00
|
|
|
return &tmpBlocksFile{
|
|
|
|
buf: make([]byte, 0, maxInmemoryTmpBlocksFile()),
|
|
|
|
}
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
return v.(*tmpBlocksFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putTmpBlocksFile(tbf *tmpBlocksFile) {
|
|
|
|
tbf.MustClose()
|
|
|
|
tbf.buf = tbf.buf[:0]
|
|
|
|
tbf.f = nil
|
2020-01-30 13:03:24 +00:00
|
|
|
tbf.r = nil
|
2019-05-22 21:16:55 +00:00
|
|
|
tbf.offset = 0
|
|
|
|
tmpBlocksFilePool.Put(tbf)
|
|
|
|
}
|
|
|
|
|
|
|
|
var tmpBlocksFilePool sync.Pool
|
|
|
|
|
|
|
|
type tmpBlockAddr struct {
|
|
|
|
offset uint64
|
|
|
|
size int
|
2022-10-01 19:05:43 +00:00
|
|
|
tbfIdx uint
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (addr tmpBlockAddr) String() string {
|
2022-08-11 20:22:53 +00:00
|
|
|
return fmt.Sprintf("offset %d, size %d, tbfIdx %d", addr.offset, addr.size, addr.tbfIdx)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 10:05:50 +00:00
|
|
|
var (
|
|
|
|
tmpBlocksFilesCreated = metrics.NewCounter(`vm_tmp_blocks_files_created_total`)
|
2020-06-04 10:13:00 +00:00
|
|
|
_ = metrics.NewGauge(`vm_tmp_blocks_files_directory_free_bytes`, func() float64 {
|
2020-06-04 10:05:50 +00:00
|
|
|
return float64(fs.MustGetFreeSpace(tmpBlocksDir))
|
|
|
|
})
|
|
|
|
)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2019-09-28 17:38:24 +00:00
|
|
|
// WriteBlockData writes b to tbf.
|
2019-05-22 21:16:55 +00:00
|
|
|
//
|
|
|
|
// It returns errors since the operation may fail on space shortage
|
|
|
|
// and this must be handled.
|
2022-10-01 19:05:43 +00:00
|
|
|
func (tbf *tmpBlocksFile) WriteBlockData(b []byte, tbfIdx uint) (tmpBlockAddr, error) {
|
2019-05-22 21:16:55 +00:00
|
|
|
var addr tmpBlockAddr
|
2022-08-11 20:22:53 +00:00
|
|
|
addr.tbfIdx = tbfIdx
|
2019-05-22 21:16:55 +00:00
|
|
|
addr.offset = tbf.offset
|
2019-09-28 17:38:24 +00:00
|
|
|
addr.size = len(b)
|
2019-05-22 21:16:55 +00:00
|
|
|
tbf.offset += uint64(addr.size)
|
2019-09-28 17:38:24 +00:00
|
|
|
if len(tbf.buf)+len(b) <= cap(tbf.buf) {
|
2019-09-03 09:27:21 +00:00
|
|
|
// Fast path - the data fits tbf.buf
|
2019-09-28 17:38:24 +00:00
|
|
|
tbf.buf = append(tbf.buf, b...)
|
2019-05-22 21:16:55 +00:00
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2019-09-03 09:27:21 +00:00
|
|
|
// Slow path: flush the data from tbf.buf to file.
|
2019-05-22 21:16:55 +00:00
|
|
|
if tbf.f == nil {
|
2024-03-30 05:29:24 +00:00
|
|
|
f, err := os.CreateTemp(tmpBlocksDir, "")
|
2019-05-22 21:16:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return addr, err
|
|
|
|
}
|
|
|
|
tbf.f = f
|
|
|
|
tmpBlocksFilesCreated.Inc()
|
|
|
|
}
|
2019-09-03 09:27:21 +00:00
|
|
|
_, err := tbf.f.Write(tbf.buf)
|
2019-09-28 17:38:24 +00:00
|
|
|
tbf.buf = append(tbf.buf[:0], b...)
|
2019-05-22 21:16:55 +00:00
|
|
|
if err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return addr, fmt.Errorf("cannot write block to %q: %w", tbf.f.Name(), err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
2022-05-31 23:31:40 +00:00
|
|
|
// Len() returnt tbf size in bytes.
|
|
|
|
func (tbf *tmpBlocksFile) Len() uint64 {
|
|
|
|
return tbf.offset
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
func (tbf *tmpBlocksFile) Finalize() error {
|
|
|
|
if tbf.f == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-30 13:03:24 +00:00
|
|
|
fname := tbf.f.Name()
|
2019-09-03 09:27:21 +00:00
|
|
|
if _, err := tbf.f.Write(tbf.buf); err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("cannot write the remaining %d bytes to %q: %w", len(tbf.buf), fname, err)
|
2019-09-03 09:27:21 +00:00
|
|
|
}
|
|
|
|
tbf.buf = tbf.buf[:0]
|
2024-02-01 17:09:03 +00:00
|
|
|
r := fs.NewReaderAt(tbf.f)
|
|
|
|
|
2019-09-29 21:11:01 +00:00
|
|
|
// Hint the OS that the file is read almost sequentiallly.
|
|
|
|
// This should reduce the number of disk seeks, which is important
|
|
|
|
// for HDDs.
|
2020-01-30 13:03:24 +00:00
|
|
|
r.MustFadviseSequentialRead(true)
|
2024-02-01 17:09:03 +00:00
|
|
|
|
2023-03-25 23:33:09 +00:00
|
|
|
// Collect local stats in order to improve performance on systems with big number of CPU cores.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3966
|
|
|
|
r.SetUseLocalStats()
|
2024-02-01 17:09:03 +00:00
|
|
|
|
2020-01-30 13:03:24 +00:00
|
|
|
tbf.r = r
|
2024-02-01 17:09:03 +00:00
|
|
|
tbf.f = nil
|
2019-09-03 09:27:21 +00:00
|
|
|
return nil
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tbf *tmpBlocksFile) MustReadBlockAt(dst *storage.Block, addr tmpBlockAddr) {
|
|
|
|
var buf []byte
|
2024-02-01 17:09:03 +00:00
|
|
|
if tbf.r == nil {
|
2019-05-22 21:16:55 +00:00
|
|
|
buf = tbf.buf[addr.offset : addr.offset+uint64(addr.size)]
|
|
|
|
} else {
|
|
|
|
bb := tmpBufPool.Get()
|
|
|
|
defer tmpBufPool.Put(bb)
|
2022-01-31 22:18:39 +00:00
|
|
|
bb.B = bytesutil.ResizeNoCopyMayOverallocate(bb.B, addr.size)
|
2020-01-30 13:03:24 +00:00
|
|
|
tbf.r.MustReadAt(bb.B, int64(addr.offset))
|
2019-05-22 21:16:55 +00:00
|
|
|
buf = bb.B
|
|
|
|
}
|
|
|
|
tail, err := storage.UnmarshalBlock(dst, buf)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot unmarshal data at %s: %s", addr, err)
|
|
|
|
}
|
|
|
|
if len(tail) > 0 {
|
|
|
|
logger.Panicf("FATAL: unexpected non-empty tail left after unmarshaling data at %s; len(tail)=%d", addr, len(tail))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var tmpBufPool bytesutil.ByteBufferPool
|
|
|
|
|
|
|
|
func (tbf *tmpBlocksFile) MustClose() {
|
2024-02-01 17:09:03 +00:00
|
|
|
if tbf.f != nil {
|
|
|
|
// tbf.f could be non-nil if Finalize wasn't called.
|
|
|
|
// In this case tbf.r must be nil.
|
|
|
|
if tbf.r != nil {
|
|
|
|
logger.Panicf("BUG: tbf.r must be nil when tbf.f!=nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try removing the file before closing it in order to prevent from flushing the in-memory data
|
|
|
|
// from page cache to the disk and save disk write IO. This may fail on non-posix systems such as Windows.
|
|
|
|
// Gracefully handle this case by attempting to remove the file after closing it.
|
|
|
|
fname := tbf.f.Name()
|
|
|
|
errRemove := os.Remove(fname)
|
|
|
|
if err := tbf.f.Close(); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot close %q: %s", fname, err)
|
|
|
|
}
|
|
|
|
if errRemove != nil {
|
|
|
|
if err := os.Remove(fname); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tbf.f = nil
|
2019-05-22 21:16:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-02-01 17:09:03 +00:00
|
|
|
if tbf.r == nil {
|
|
|
|
// Nothing to do
|
|
|
|
return
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
2024-02-01 17:09:03 +00:00
|
|
|
|
|
|
|
// Try removing the file before closing it in order to prevent from flushing the in-memory data
|
|
|
|
// from page cache to the disk and save disk write IO. This may fail on non-posix systems such as Windows.
|
|
|
|
// Gracefully handle this case by attempting to remove the file after closing it.
|
|
|
|
fname := tbf.r.Path()
|
|
|
|
errRemove := os.Remove(fname)
|
|
|
|
tbf.r.MustClose()
|
|
|
|
if errRemove != nil {
|
|
|
|
if err := os.Remove(fname); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
|
|
|
}
|
2023-03-28 01:10:15 +00:00
|
|
|
}
|
2024-02-01 17:09:03 +00:00
|
|
|
tbf.r = nil
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|