Merge branch 'public-single-node' into victorialogs-wip

This commit is contained in:
Aliaksandr Valialkin 2024-05-27 16:50:04 +02:00
commit 66e294edfb
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
20 changed files with 23 additions and 23 deletions

View file

@ -43,7 +43,7 @@ services:
# storing logs and serving read queries.
victorialogs:
container_name: victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
command:
- "--storageDataPath=/vlogs"
- "--httpListenAddr=:9428"

View file

@ -22,7 +22,7 @@ services:
- -beat.uri=http://filebeat-victorialogs:5066
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-filebeat-docker-vl:/vlogs
ports:

View file

@ -13,7 +13,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-filebeat-syslog-vl:/vlogs
ports:

View file

@ -11,7 +11,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-fluentbit-vl:/vlogs
ports:

View file

@ -14,7 +14,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-logstash-vl:/vlogs
ports:

View file

@ -12,7 +12,7 @@ services:
- "5140:5140"
vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-promtail-docker:/vlogs
ports:

View file

@ -22,7 +22,7 @@ services:
condition: service_healthy
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- victorialogs-vector-docker-vl:/vlogs
ports:

View file

@ -3,7 +3,7 @@ version: '3'
services:
# Run `make package-victoria-logs` to build victoria-logs image
vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
volumes:
- vlogs:/vlogs
ports:

View file

@ -580,7 +580,7 @@ See also:
### Range comparison filter
LogsQL supports `field:>X`, `field:>=X`, `field:<X` and `field:<=X` filters, where `field` is the name of [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model)
and `X` is either [numeric value](#numeric-values) or a string. For example, the following query returns logs containing numeric values for the `response_size` field bigger than 10*1024:
and `X` is either [numeric value](#numeric-values) or a string. For example, the following query returns logs containing numeric values for the `response_size` field bigger than `10*1024`:
```logsql
response_size:>10KiB

View file

@ -34,8 +34,8 @@ Just download archive for the needed Operating system and architecture, unpack i
For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it:
```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.12.0-victorialogs/victoria-logs-linux-amd64-v0.12.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.12.0-victorialogs.tar.gz
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.12.1-victorialogs/victoria-logs-linux-amd64-v0.12.1-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.12.1-victorialogs.tar.gz
./victoria-logs-prod
```
@ -59,7 +59,7 @@ Here is the command to run VictoriaLogs in a Docker container:
```sh
docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
docker.io/victoriametrics/victoria-logs:v0.12.0-victorialogs
docker.io/victoriametrics/victoria-logs:v0.12.1-victorialogs
```
See also:

View file

@ -58,7 +58,7 @@ func (pc *pipeCopy) hasFilterInWithQuery() bool {
return false
}
func (pc *pipeCopy) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pc *pipeCopy) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pc, nil
}

View file

@ -40,7 +40,7 @@ func (pd *pipeDelete) hasFilterInWithQuery() bool {
return false
}
func (pd *pipeDelete) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pd *pipeDelete) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pd, nil
}

View file

@ -45,7 +45,7 @@ func (pf *pipeFieldNames) hasFilterInWithQuery() bool {
return false
}
func (pf *pipeFieldNames) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pf *pipeFieldNames) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pf, nil
}

View file

@ -57,7 +57,7 @@ func (pf *pipeFields) hasFilterInWithQuery() bool {
return false
}
func (pf *pipeFields) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pf *pipeFields) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pf, nil
}

View file

@ -28,7 +28,7 @@ func (pl *pipeLimit) hasFilterInWithQuery() bool {
return false
}
func (pl *pipeLimit) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pl *pipeLimit) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pl, nil
}

View file

@ -28,7 +28,7 @@ func (po *pipeOffset) hasFilterInWithQuery() bool {
return false
}
func (po *pipeOffset) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (po *pipeOffset) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return po, nil
}

View file

@ -42,7 +42,7 @@ func (pp *pipePackJSON) hasFilterInWithQuery() bool {
return false
}
func (pp *pipePackJSON) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pp *pipePackJSON) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pp, nil
}

View file

@ -75,7 +75,7 @@ func (ps *pipeSort) hasFilterInWithQuery() bool {
return false
}
func (ps *pipeSort) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (ps *pipeSort) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return ps, nil
}

View file

@ -59,7 +59,7 @@ func (pu *pipeUniq) hasFilterInWithQuery() bool {
return false
}
func (pu *pipeUniq) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (pipe, error) {
func (pu *pipeUniq) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
return pu, nil
}

View file

@ -144,7 +144,7 @@ func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
if needStop(pup.stopCh) {
return
}
shard.writeUnrolledFields(br, pu.fields, columnValues, rowIdx)
shard.writeUnrolledFields(pu.fields, columnValues, rowIdx)
} else {
fields = fields[:0]
for i, f := range pu.fields {
@ -163,7 +163,7 @@ func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
shard.a.reset()
}
func (shard *pipeUnrollProcessorShard) writeUnrolledFields(br *blockResult, fieldNames []string, columnValues [][]string, rowIdx int) {
func (shard *pipeUnrollProcessorShard) writeUnrolledFields(fieldNames []string, columnValues [][]string, rowIdx int) {
// unroll values at rowIdx row
shard.unrolledValues = slicesutil.SetLength(shard.unrolledValues, len(columnValues))