Merge branch 'public-single-node' into victorialogs-wip

This commit is contained in:
Aliaksandr Valialkin 2024-06-04 01:52:19 +02:00
commit a9d06710c1
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
15 changed files with 39 additions and 33 deletions

View file

@ -362,14 +362,14 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
return nil, err
}
if len(rows) <= limit {
// Fast path - the requested time range contains up to limit rows
// Fast path - the requested time range contains up to limit rows.
sortRowsByTime(rows)
return rows, nil
}
// Slow path - search for the time range with the requested limit rows.
start, end := q.GetFilterTimeRange()
d := (end - start) / 2
d := end/2 - start/2
start += d
qOrig := q
@ -381,10 +381,10 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
return nil, err
}
if len(rows) == limit || d < 1e6 {
if len(rows) == limit || len(rows) > limit && d < 10e6 || d == 0 {
sortRowsByTime(rows)
if len(rows) > limit {
rows = rows[:limit]
rows = rows[len(rows)-limit:]
}
return rows, nil
}

View file

@ -43,7 +43,7 @@ services:
# storing logs and serving read queries.
victorialogs:
container_name: victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
command:
- "--storageDataPath=/vlogs"
- "--httpListenAddr=:9428"

View file

@ -22,7 +22,7 @@ services:
- -beat.uri=http://filebeat-victorialogs:5066
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-filebeat-docker-vl:/vlogs
ports:

View file

@ -13,7 +13,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-filebeat-syslog-vl:/vlogs
ports:

View file

@ -11,7 +11,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-fluentbit-vl:/vlogs
ports:

View file

@ -14,7 +14,7 @@ services:
- "5140:5140"
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-logstash-vl:/vlogs
ports:

View file

@ -12,7 +12,7 @@ services:
- "5140:5140"
vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-promtail-docker:/vlogs
ports:

View file

@ -22,7 +22,7 @@ services:
condition: service_healthy
victorialogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- victorialogs-vector-docker-vl:/vlogs
ports:

View file

@ -3,7 +3,7 @@ version: '3'
services:
# Run `make package-victoria-logs` to build victoria-logs image
vlogs:
image: docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
image: docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
volumes:
- vlogs:/vlogs
ports:

View file

@ -34,8 +34,8 @@ Just download archive for the needed Operating system and architecture, unpack i
For example, the following commands download VictoriaLogs archive for Linux/amd64, unpack and run it:
```sh
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.15.0-victorialogs/victoria-logs-linux-amd64-v0.15.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.15.0-victorialogs.tar.gz
curl -L -O https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v0.16.0-victorialogs/victoria-logs-linux-amd64-v0.16.0-victorialogs.tar.gz
tar xzf victoria-logs-linux-amd64-v0.16.0-victorialogs.tar.gz
./victoria-logs-prod
```
@ -59,7 +59,7 @@ Here is the command to run VictoriaLogs in a Docker container:
```sh
docker run --rm -it -p 9428:9428 -v ./victoria-logs-data:/victoria-logs-data \
docker.io/victoriametrics/victoria-logs:v0.15.0-victorialogs
docker.io/victoriametrics/victoria-logs:v0.16.0-victorialogs
```
See also:

View file

@ -377,3 +377,23 @@ over logs for the last 5 minutes:
```logsql
_time:5m | uniq by (host, path)
```
## How to return last N logs for the given query?
Use [`sort` pipe with limit](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe). For example, the following query returns the last 10 logs with the `error`
[word](https://docs.victoriametrics.com/victorialogs/logsql/#word) in the [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field)
over the logs for the last 5 minutes:
```logsql
_time:5m error | sort by (_time desc) limit 10
```
It sorts the matching logs by [`_time` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#time-field) in descending order and then selects
the first 10 logs with the highest values for the `_time` field.
If the query is sent to [`/select/logsql/query` HTTP API](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs), then `limit=N` query arg
can be passed to it in order to return up to `N` latest log entries. For example, the following command returns up to 10 latest log entries with the `error` word:
```sh
curl http://localhost:9428/select/logsql/query -d 'query=error' -d 'limit=10'
```

View file

@ -114,16 +114,14 @@ func (fa *filterAnd) getByFieldTokens() []fieldTokens {
func (fa *filterAnd) initByFieldTokens() {
m := make(map[string]map[string]struct{})
byFieldFilters := make(map[string]int)
var fieldNames []string
mergeFieldTokens := func(fieldName string, tokens []string) {
fieldName = getCanonicalColumnName(fieldName)
byFieldFilters[fieldName]++
if len(tokens) == 0 {
return
}
fieldName = getCanonicalColumnName(fieldName)
mTokens, ok := m[fieldName]
if !ok {
fieldNames = append(fieldNames, fieldName)
@ -165,11 +163,6 @@ func (fa *filterAnd) initByFieldTokens() {
var byFieldTokens []fieldTokens
for _, fieldName := range fieldNames {
if byFieldFilters[fieldName] < 2 {
// It is faster to perform bloom filter match inline when visiting the corresponding column
continue
}
mTokens := m[fieldName]
tokens := make([]string, 0, len(mTokens))
for token := range mTokens {

View file

@ -5,7 +5,7 @@ type filterNoop struct {
}
func (fn *filterNoop) String() string {
return ""
return "*"
}
func (fn *filterNoop) updateNeededFields(_ fieldsSet) {

View file

@ -127,16 +127,14 @@ func (fo *filterOr) getByFieldTokens() []fieldTokens {
func (fo *filterOr) initByFieldTokens() {
m := make(map[string][][]string)
byFieldFilters := make(map[string]int)
var fieldNames []string
mergeFieldTokens := func(fieldName string, tokens []string) {
fieldName = getCanonicalColumnName(fieldName)
byFieldFilters[fieldName]++
if len(tokens) == 0 {
return
}
fieldName = getCanonicalColumnName(fieldName)
if _, ok := m[fieldName]; !ok {
fieldNames = append(fieldNames, fieldName)
}
@ -173,11 +171,6 @@ func (fo *filterOr) initByFieldTokens() {
var byFieldTokens []fieldTokens
for _, fieldName := range fieldNames {
if byFieldFilters[fieldName] < 2 {
// It is faster to perform bloom filter match inline when visiting the corresponding column
continue
}
commonTokens := getCommonTokens(m[fieldName])
if len(commonTokens) > 0 {
byFieldTokens = append(byFieldTokens, fieldTokens{

View file

@ -1031,7 +1031,7 @@ func TestParseQuerySuccess(t *testing.T) {
sum(duration) if (host:in('foo.com', 'bar.com') and path:/foobar) as bar`,
`* | stats by (_time:1d offset -2h, f2) count(*) if (is_admin:true or "foo bar"*) as foo, sum(duration) if (host:in(foo.com,bar.com) path:"/foobar") as bar`)
f(`* | stats count(x) if (error ip:in(_time:1d | fields ip)) rows`, `* | stats count(x) if (error ip:in(_time:1d | fields ip)) as rows`)
f(`* | stats count() if () rows`, `* | stats count(*) if () as rows`)
f(`* | stats count() if () rows`, `* | stats count(*) if (*) as rows`)
// sort pipe
f(`* | sort`, `* | sort`)