This commit is contained in:
Aliaksandr Valialkin 2024-06-03 16:58:47 +02:00
parent 001f8969f8
commit 28cee4e9db
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
14 changed files with 299 additions and 84 deletions

View file

@ -298,11 +298,36 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
httpserver.Errorf(w, r, "%s", err) httpserver.Errorf(w, r, "%s", err)
return return
} }
if limit > 0 {
q.AddPipeLimit(uint64(limit))
}
bw := getBufferedWriter(w) bw := getBufferedWriter(w)
defer func() {
bw.FlushIgnoreErrors()
putBufferedWriter(bw)
}()
w.Header().Set("Content-Type", "application/stream+json")
if limit > 0 {
if q.CanReturnLastNResults() {
rows, err := getLastNQueryResults(ctx, tenantIDs, q, limit)
if err != nil {
httpserver.Errorf(w, r, "%s", err)
return
}
bb := blockResultPool.Get()
b := bb.B
for i := range rows {
b = logstorage.MarshalFieldsToJSON(b[:0], rows[i].fields)
b = append(b, '\n')
bw.WriteIgnoreErrors(b)
}
bb.B = b
blockResultPool.Put(bb)
return
}
q.AddPipeLimit(uint64(limit))
q.Optimize()
}
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) { writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
if len(columns) == 0 || len(columns[0].Values) == 0 { if len(columns) == 0 || len(columns[0].Values) == 0 {
@ -317,20 +342,103 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
blockResultPool.Put(bb) blockResultPool.Put(bb)
} }
w.Header().Set("Content-Type", "application/stream+json") if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
q.Optimize()
err = vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock)
bw.FlushIgnoreErrors()
putBufferedWriter(bw)
if err != nil {
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err) httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
} }
} }
var blockResultPool bytesutil.ByteBufferPool var blockResultPool bytesutil.ByteBufferPool
type row struct {
timestamp int64
fields []logstorage.Field
}
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
q.AddPipeLimit(uint64(limit + 1))
q.Optimize()
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limit+1)
if err != nil {
return nil, err
}
if len(rows) <= limit {
// Fast path - the requested time range contains up to limit rows
sortRowsByTime(rows)
return rows, nil
}
// Slow path - search for the time range with the requested limit rows.
start, end := q.GetFilterTimeRange()
d := (end - start) / 2
start += d
qOrig := q
for {
q = qOrig.Clone()
q.AddTimeFilter(start, end)
rows, err := getQueryResultsWithLimit(ctx, tenantIDs, q, limit+1)
if err != nil {
return nil, err
}
if len(rows) == limit || d == 0 {
sortRowsByTime(rows)
if len(rows) > limit {
rows = rows[:limit]
}
return rows, nil
}
lastBit := d & 1
d /= 2
if len(rows) > limit {
start += d
} else {
start -= d + lastBit
}
}
}
func sortRowsByTime(rows []row) {
sort.Slice(rows, func(i, j int) bool {
return rows[i].timestamp < rows[j].timestamp
})
}
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
ctxWithCancel, cancel := context.WithCancel(ctx)
defer cancel()
var rows []row
var rowsLock sync.Mutex
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
rowsLock.Lock()
defer rowsLock.Unlock()
for i, timestamp := range timestamps {
fields := make([]logstorage.Field, len(columns))
for j := range columns {
f := &fields[j]
f.Name = strings.Clone(columns[j].Name)
f.Value = strings.Clone(columns[j].Values[i])
}
rows = append(rows, row{
timestamp: timestamp,
fields: fields,
})
}
if len(rows) >= limit {
cancel()
}
}
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock); err != nil {
return nil, err
}
return rows, nil
}
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) { func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
// Extract tenantID // Extract tenantID
tenantID, err := logstorage.GetTenantIDFromRequest(r) tenantID, err := logstorage.GetTenantIDFromRequest(r)
@ -373,10 +481,10 @@ func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
if s == "" { if s == "" {
return 0, false, nil return 0, false, nil
} }
currentTimestamp := float64(time.Now().UnixNano()) / 1e9 currentTimestamp := time.Now().UnixNano()
secs, err := promutils.ParseTimeAt(s, currentTimestamp) nsecs, err := promutils.ParseTimeAt(s, currentTimestamp)
if err != nil { if err != nil {
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err) return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
} }
return int64(secs * 1e9), true, nil return nsecs, true, nil
} }

View file

@ -19,6 +19,7 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
## tip ## tip
* FEATURE: return the last `N` matching logs from [`/select/logsql/query` HTTP API](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs) with the maximum timestamps if `limit=N` query arg is passed to it. Previously a random subset of matching logs could be returned, which could complicate investigation of the returned logs.
* FEATURE: add [`drop_empty_fields` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#drop_empty_fields-pipe) for dropping [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with empty values. * FEATURE: add [`drop_empty_fields` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#drop_empty_fields-pipe) for dropping [log fields](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) with empty values.
## [v0.15.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.15.0-victorialogs) ## [v0.15.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v0.15.0-victorialogs)

View file

@ -58,12 +58,14 @@ By default the `/select/logsql/query` returns all the log entries matching the g
- By closing the response stream at any time. VictoriaLogs stops query execution and frees all the resources occupied by the request as soon as it detects closed client connection. - By closing the response stream at any time. VictoriaLogs stops query execution and frees all the resources occupied by the request as soon as it detects closed client connection.
So it is safe running [`*` query](https://docs.victoriametrics.com/victorialogs/logsql/#any-value-filter), which selects all the logs, even if trillions of logs are stored in VictoriaLogs. So it is safe running [`*` query](https://docs.victoriametrics.com/victorialogs/logsql/#any-value-filter), which selects all the logs, even if trillions of logs are stored in VictoriaLogs.
- By specifying the maximum number of log entries, which can be returned in the response via `limit` query arg. For example, the following request returns - By specifying the maximum number of log entries, which can be returned in the response via `limit` query arg. For example, the following command returns
up to 10 matching log entries: up to 10 most recently added log entries with the `error` [word](https://docs.victoriametrics.com/victorialogs/logsql/#word)
in the [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field):
```sh ```sh
curl http://localhost:9428/select/logsql/query -d 'query=error' -d 'limit=10' curl http://localhost:9428/select/logsql/query -d 'query=error' -d 'limit=10'
``` ```
- By adding [`limit` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#limit-pipe) to the query. For example: - By adding [`limit` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#limit-pipe) to the query. For example, the following command returns up to 10 **random** log entries
with the `error` [word](https://docs.victoriametrics.com/victorialogs/logsql/#word) in the [`_msg` field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field):
```sh ```sh
curl http://localhost:9428/select/logsql/query -d 'query=error | limit 10' curl http://localhost:9428/select/logsql/query -d 'query=error | limit 10'
``` ```
@ -87,8 +89,11 @@ This allows post-processing the returned lines at the client side with the usual
without worrying about resource usage at VictoriaLogs side. See [these docs](#command-line) for more details. without worrying about resource usage at VictoriaLogs side. See [these docs](#command-line) for more details.
The returned lines aren't sorted by default, since sorting disables the ability to send matching log entries to response stream as soon as they are found. The returned lines aren't sorted by default, since sorting disables the ability to send matching log entries to response stream as soon as they are found.
Query results can be sorted either at VictoriaLogs side via [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe) Query results can be sorted in the following ways:
or at client side with the usual `sort` command according to [these docs](#command-line).
- By passing `limit=N` query arg to `/select/logsql/query`. The up to `N` most recent matching log entries are returned in the response.
- By adding [`sort` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#sort-pipe) to the query.
- By using Unix `sort` command at client side according to [these docs](#command-line).
By default the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/#multitenancy) is queried. By default the `(AccountID=0, ProjectID=0)` [tenant](https://docs.victoriametrics.com/victorialogs/#multitenancy) is queried.
If you need querying other tenant, then specify it via `AccounID` and `ProjectID` http request headers. For example, the following query searches If you need querying other tenant, then specify it via `AccounID` and `ProjectID` http request headers. For example, the following query searches

View file

@ -12,7 +12,7 @@ func TestLogfmtParser(t *testing.T) {
defer putLogfmtParser(p) defer putLogfmtParser(p)
p.parse(s) p.parse(s)
result := marshalFieldsToJSON(nil, p.fields) result := MarshalFieldsToJSON(nil, p.fields)
if string(result) != resultExpected { if string(result) != resultExpected {
t.Fatalf("unexpected result when parsing [%s]; got\n%s\nwant\n%s\n", s, result, resultExpected) t.Fatalf("unexpected result when parsing [%s]; got\n%s\nwant\n%s\n", s, result, resultExpected)
} }

View file

@ -279,6 +279,38 @@ func (q *Query) AddCountByTimePipe(step, off int64, fields []string) {
} }
} }
// Clone returns a copy of q.
func (q *Query) Clone() *Query {
qStr := q.String()
qCopy, err := ParseQuery(qStr)
if err != nil {
logger.Panicf("BUG: cannot parse %q: %s", qStr, err)
}
return qCopy
}
// CanReturnLastNResults returns true if time range filter at q can be adjusted for returning the last N results.
func (q *Query) CanReturnLastNResults() bool {
for _, p := range q.pipes {
switch p.(type) {
case *pipeFieldNames,
*pipeFieldValues,
*pipeLimit,
*pipeOffset,
*pipeSort,
*pipeStats,
*pipeUniq:
return false
}
}
return true
}
// GetFilterTimeRange returns filter time range for the given q.
func (q *Query) GetFilterTimeRange() (int64, int64) {
return getFilterTimeRange(q.f)
}
// AddTimeFilter adds global filter _time:[start ... end] to q. // AddTimeFilter adds global filter _time:[start ... end] to q.
func (q *Query) AddTimeFilter(start, end int64) { func (q *Query) AddTimeFilter(start, end int64) {
startStr := marshalTimestampRFC3339NanoString(nil, start) startStr := marshalTimestampRFC3339NanoString(nil, start)
@ -1394,12 +1426,12 @@ func parseFilterTime(lex *lexer) (*filterTime, error) {
sLower := strings.ToLower(s) sLower := strings.ToLower(s)
if sLower == "now" || startsWithYear(s) { if sLower == "now" || startsWithYear(s) {
// Parse '_time:YYYY-MM-DD', which transforms to '_time:[YYYY-MM-DD, YYYY-MM-DD+1)' // Parse '_time:YYYY-MM-DD', which transforms to '_time:[YYYY-MM-DD, YYYY-MM-DD+1)'
t, err := promutils.ParseTimeAt(s, float64(lex.currentTimestamp)/1e9) nsecs, err := promutils.ParseTimeAt(s, lex.currentTimestamp)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse _time filter: %w", err) return nil, fmt.Errorf("cannot parse _time filter: %w", err)
} }
// Round to milliseconds // Round to milliseconds
startTime := int64(math.Round(t*1e3)) * 1e6 startTime := nsecs
endTime := getMatchingEndTime(startTime, s) endTime := getMatchingEndTime(startTime, s)
ft := &filterTime{ ft := &filterTime{
minTimestamp: startTime, minTimestamp: startTime,
@ -1549,12 +1581,11 @@ func parseTime(lex *lexer) (int64, string, error) {
if err != nil { if err != nil {
return 0, "", err return 0, "", err
} }
t, err := promutils.ParseTimeAt(s, float64(lex.currentTimestamp)/1e9) nsecs, err := promutils.ParseTimeAt(s, lex.currentTimestamp)
if err != nil { if err != nil {
return 0, "", err return 0, "", err
} }
// round to milliseconds return nsecs, s, nil
return int64(math.Round(t*1e3)) * 1e6, s, nil
} }
func quoteStringTokenIfNeeded(s string) string { func quoteStringTokenIfNeeded(s string) string {

View file

@ -1832,3 +1832,72 @@ func TestQueryGetNeededColumns(t *testing.T) {
f(`* | unroll (a, b) | count() r1`, `a,b`, ``) f(`* | unroll (a, b) | count() r1`, `a,b`, ``)
f(`* | unroll if (q:w p:a) (a, b) | count() r1`, `a,b,p,q`, ``) f(`* | unroll if (q:w p:a) (a, b) | count() r1`, `a,b,p,q`, ``)
} }
func TestQueryClone(t *testing.T) {
f := func(qStr string) {
t.Helper()
q, err := ParseQuery(qStr)
if err != nil {
t.Fatalf("cannot parse [%s]: %s", qStr, err)
}
qCopy := q.Clone()
qCopyStr := qCopy.String()
if qStr != qCopyStr {
t.Fatalf("unexpected cloned query\ngot\n%s\nwant\n%s", qCopyStr, qStr)
}
}
f("*")
f("error")
f("_time:5m error | fields foo, bar")
f("ip:in(foo | fields user_ip) bar | stats by (x:1h, y) count(*) if (user_id:in(q:w | fields abc)) as ccc")
}
func TestQueryGetFilterTimeRange(t *testing.T) {
f := func(qStr string, startExpected, endExpected int64) {
t.Helper()
q, err := ParseQuery(qStr)
if err != nil {
t.Fatalf("cannot parse [%s]: %s", qStr, err)
}
start, end := q.GetFilterTimeRange()
if start != startExpected || end != endExpected {
t.Fatalf("unexpected filter time range; got [%d, %d]; want [%d, %d]", start, end, startExpected, endExpected)
}
}
f("*", -9223372036854775808, 9223372036854775807)
f("_time:2024-05-31T10:20:30.456789123Z", 1717150830456789123, 1717150830456789123)
f("_time:2024-05-31", 1717113600000000000, 1717199999999999999)
}
func TestQueryCanReturnLastNResults(t *testing.T) {
f := func(qStr string, resultExpected bool) {
t.Helper()
q, err := ParseQuery(qStr)
if err != nil {
t.Fatalf("cannot parse [%s]: %s", qStr, err)
}
result := q.CanReturnLastNResults()
if result != resultExpected {
t.Fatalf("unexpected result for CanRetrurnLastNResults(%q); got %v; want %v", qStr, result, resultExpected)
}
}
f("*", true)
f("error", true)
f("error | fields foo | filter foo:bar", true)
f("error | extract '<foo>bar<baz>'", true)
f("* | rm x", true)
f("* | stats count() rows", false)
f("* | sort by (x)", false)
f("* | limit 10", false)
f("* | offset 10", false)
f("* | uniq (x)", false)
f("* | field_names", false)
f("* | field_values x", false)
}

View file

@ -126,7 +126,7 @@ func (ppp *pipePackJSONProcessor) writeBlock(workerID uint, br *blockResult) {
} }
bufLen := len(buf) bufLen := len(buf)
buf = marshalFieldsToJSON(buf, fields) buf = MarshalFieldsToJSON(buf, fields)
v := bytesutil.ToUnsafeString(buf[bufLen:]) v := bytesutil.ToUnsafeString(buf[bufLen:])
shard.rc.addValue(v) shard.rc.addValue(v)
} }

View file

@ -64,7 +64,8 @@ func (f *Field) marshalToJSON(dst []byte) []byte {
return dst return dst
} }
func marshalFieldsToJSON(dst []byte, fields []Field) []byte { // MarshalFieldsToJSON appends JSON-marshaled fields to dt and returns the result.
func MarshalFieldsToJSON(dst []byte, fields []Field) []byte {
dst = append(dst, '{') dst = append(dst, '{')
if len(fields) > 0 { if len(fields) > 0 {
dst = fields[0].marshalToJSON(dst) dst = fields[0].marshalToJSON(dst)

View file

@ -99,7 +99,7 @@ func (sap *statsRowAnyProcessor) updateState(br *blockResult, rowIdx int) int {
func (sap *statsRowAnyProcessor) finalizeStats() string { func (sap *statsRowAnyProcessor) finalizeStats() string {
bb := bbPool.Get() bb := bbPool.Get()
bb.B = marshalFieldsToJSON(bb.B, sap.fields) bb.B = MarshalFieldsToJSON(bb.B, sap.fields)
result := string(bb.B) result := string(bb.B)
bbPool.Put(bb) bbPool.Put(bb)

View file

@ -206,7 +206,7 @@ func (smp *statsRowMaxProcessor) updateState(v string, br *blockResult, rowIdx i
func (smp *statsRowMaxProcessor) finalizeStats() string { func (smp *statsRowMaxProcessor) finalizeStats() string {
bb := bbPool.Get() bb := bbPool.Get()
bb.B = marshalFieldsToJSON(bb.B, smp.fields) bb.B = MarshalFieldsToJSON(bb.B, smp.fields)
result := string(bb.B) result := string(bb.B)
bbPool.Put(bb) bbPool.Put(bb)

View file

@ -206,7 +206,7 @@ func (smp *statsRowMinProcessor) updateState(v string, br *blockResult, rowIdx i
func (smp *statsRowMinProcessor) finalizeStats() string { func (smp *statsRowMinProcessor) finalizeStats() string {
bb := bbPool.Get() bb := bbPool.Get()
bb.B = marshalFieldsToJSON(bb.B, smp.fields) bb.B = MarshalFieldsToJSON(bb.B, smp.fields)
result := string(bb.B) result := string(bb.B)
bbPool.Put(bb) bbPool.Put(bb)

View file

@ -969,7 +969,7 @@ func TestParseStreamFieldsSuccess(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
result := marshalFieldsToJSON(nil, labels) result := MarshalFieldsToJSON(nil, labels)
if string(result) != resultExpected { if string(result) != resultExpected {
t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected) t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected)
} }

View file

@ -14,12 +14,12 @@ import (
// //
// It returns unix timestamp in milliseconds. // It returns unix timestamp in milliseconds.
func ParseTimeMsec(s string) (int64, error) { func ParseTimeMsec(s string) (int64, error) {
currentTimestamp := float64(time.Now().UnixNano()) / 1e9 currentTimestamp := time.Now().UnixNano()
secs, err := ParseTimeAt(s, currentTimestamp) nsecs, err := ParseTimeAt(s, currentTimestamp)
if err != nil { if err != nil {
return 0, err return 0, err
} }
msecs := int64(math.Round(secs * 1000)) msecs := int64(math.Round(float64(nsecs) / 1e6))
return msecs, nil return msecs, nil
} }
@ -33,13 +33,13 @@ const (
// //
// See https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats // See https://docs.victoriametrics.com/single-server-victoriametrics/#timestamp-formats
// //
// It returns unix timestamp in seconds. // It returns unix timestamp in nanoseconds.
func ParseTimeAt(s string, currentTimestamp float64) (float64, error) { func ParseTimeAt(s string, currentTimestamp int64) (int64, error) {
if s == "now" { if s == "now" {
return currentTimestamp, nil return currentTimestamp, nil
} }
sOrig := s sOrig := s
tzOffset := float64(0) tzOffset := int64(0)
if len(sOrig) > 6 { if len(sOrig) > 6 {
// Try parsing timezone offset // Try parsing timezone offset
tz := sOrig[len(sOrig)-6:] tz := sOrig[len(sOrig)-6:]
@ -53,7 +53,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, fmt.Errorf("cannot parse minute from timezone offset %q: %w", tz, err) return 0, fmt.Errorf("cannot parse minute from timezone offset %q: %w", tz, err)
} }
tzOffset = float64(hour*3600 + minute*60) tzOffset = int64(hour*3600+minute*60) * 1e9
if isPlus { if isPlus {
tzOffset = -tzOffset tzOffset = -tzOffset
} }
@ -71,7 +71,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if d > 0 { if d > 0 {
d = -d d = -d
} }
return currentTimestamp + float64(d)/1e9, nil return currentTimestamp + int64(d), nil
} }
if len(s) == 4 { if len(s) == 4 {
// Parse YYYY // Parse YYYY
@ -83,7 +83,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if y > maxValidYear || y < minValidYear { if y > maxValidYear || y < minValidYear {
return 0, fmt.Errorf("cannot parse year from %q: year must in range [%d, %d]", s, minValidYear, maxValidYear) return 0, fmt.Errorf("cannot parse year from %q: year must in range [%d, %d]", s, minValidYear, maxValidYear)
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
if !strings.Contains(sOrig, "-") { if !strings.Contains(sOrig, "-") {
// Parse the timestamp in seconds or in milliseconds // Parse the timestamp in seconds or in milliseconds
@ -95,7 +95,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
// The timestamp is in milliseconds. Convert it to seconds. // The timestamp is in milliseconds. Convert it to seconds.
ts /= 1000 ts /= 1000
} }
return ts, nil return int64(math.Round(ts*1e3)) * 1e6, nil
} }
if len(s) == 7 { if len(s) == 7 {
// Parse YYYY-MM // Parse YYYY-MM
@ -103,7 +103,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
if len(s) == 10 { if len(s) == 10 {
// Parse YYYY-MM-DD // Parse YYYY-MM-DD
@ -111,7 +111,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
if len(s) == 13 { if len(s) == 13 {
// Parse YYYY-MM-DDTHH // Parse YYYY-MM-DDTHH
@ -119,7 +119,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
if len(s) == 16 { if len(s) == 16 {
// Parse YYYY-MM-DDTHH:MM // Parse YYYY-MM-DDTHH:MM
@ -127,7 +127,7 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
if len(s) == 19 { if len(s) == 19 {
// Parse YYYY-MM-DDTHH:MM:SS // Parse YYYY-MM-DDTHH:MM:SS
@ -135,12 +135,12 @@ func ParseTimeAt(s string, currentTimestamp float64) (float64, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
return tzOffset + float64(t.UnixNano())/1e9, nil return tzOffset + t.UnixNano(), nil
} }
// Parse RFC3339 // Parse RFC3339
t, err := time.Parse(time.RFC3339, sOrig) t, err := time.Parse(time.RFC3339, sOrig)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return float64(t.UnixNano()) / 1e9, nil return t.UnixNano(), nil
} }

View file

@ -6,7 +6,7 @@ import (
) )
func TestParseTimeAtSuccess(t *testing.T) { func TestParseTimeAtSuccess(t *testing.T) {
f := func(s string, currentTime, resultExpected float64) { f := func(s string, currentTime, resultExpected int64) {
t.Helper() t.Helper()
result, err := ParseTimeAt(s, currentTime) result, err := ParseTimeAt(s, currentTime)
if err != nil { if err != nil {
@ -17,65 +17,65 @@ func TestParseTimeAtSuccess(t *testing.T) {
} }
} }
now := float64(time.Now().UnixNano()) / 1e9 now := time.Now().UnixNano()
// unix timestamp in seconds // unix timestamp in seconds
f("1562529662", now, 1562529662) f("1562529662", now, 1562529662*1e9)
f("1562529662.678", now, 1562529662.678) f("1562529662.678", now, 1562529662678*1e6)
// unix timestamp in milliseconds // unix timestamp in milliseconds
f("1562529662678", now, 1562529662.678) f("1562529662678", now, 1562529662678*1e6)
// duration relative to the current time // duration relative to the current time
f("now", now, now) f("now", now, now)
f("1h5s", now, now-3605) f("1h5s", now, now-3605*1e9)
// negative duration relative to the current time // negative duration relative to the current time
f("-5m", now, now-5*60) f("-5m", now, now-5*60*1e9)
f("-123", now, now-123) f("-123", now, now-123*1e9)
f("-123.456", now, now-123.456) f("-123.456", now, now-123456*1e6)
f("now-1h5m", now, now-(3600+5*60)) f("now-1h5m", now, now-(3600+5*60)*1e9)
// Year // Year
f("2023", now, 1.6725312e+09) f("2023", now, 1.6725312e+09*1e9)
f("2023Z", now, 1.6725312e+09) f("2023Z", now, 1.6725312e+09*1e9)
f("2023+02:00", now, 1.672524e+09) f("2023+02:00", now, 1.672524e+09*1e9)
f("2023-02:00", now, 1.6725384e+09) f("2023-02:00", now, 1.6725384e+09*1e9)
// Year and month // Year and month
f("2023-05", now, 1.6828992e+09) f("2023-05", now, 1.6828992e+09*1e9)
f("2023-05Z", now, 1.6828992e+09) f("2023-05Z", now, 1.6828992e+09*1e9)
f("2023-05+02:00", now, 1.682892e+09) f("2023-05+02:00", now, 1.682892e+09*1e9)
f("2023-05-02:00", now, 1.6829064e+09) f("2023-05-02:00", now, 1.6829064e+09*1e9)
// Year, month and day // Year, month and day
f("2023-05-20", now, 1.6845408e+09) f("2023-05-20", now, 1.6845408e+09*1e9)
f("2023-05-20Z", now, 1.6845408e+09) f("2023-05-20Z", now, 1.6845408e+09*1e9)
f("2023-05-20+02:30", now, 1.6845318e+09) f("2023-05-20+02:30", now, 1.6845318e+09*1e9)
f("2023-05-20-02:30", now, 1.6845498e+09) f("2023-05-20-02:30", now, 1.6845498e+09*1e9)
// Year, month, day and hour // Year, month, day and hour
f("2023-05-20T04", now, 1.6845552e+09) f("2023-05-20T04", now, 1.6845552e+09*1e9)
f("2023-05-20T04Z", now, 1.6845552e+09) f("2023-05-20T04Z", now, 1.6845552e+09*1e9)
f("2023-05-20T04+02:30", now, 1.6845462e+09) f("2023-05-20T04+02:30", now, 1.6845462e+09*1e9)
f("2023-05-20T04-02:30", now, 1.6845642e+09) f("2023-05-20T04-02:30", now, 1.6845642e+09*1e9)
// Year, month, day, hour and minute // Year, month, day, hour and minute
f("2023-05-20T04:57", now, 1.68455862e+09) f("2023-05-20T04:57", now, 1.68455862e+09*1e9)
f("2023-05-20T04:57Z", now, 1.68455862e+09) f("2023-05-20T04:57Z", now, 1.68455862e+09*1e9)
f("2023-05-20T04:57+02:30", now, 1.68454962e+09) f("2023-05-20T04:57+02:30", now, 1.68454962e+09*1e9)
f("2023-05-20T04:57-02:30", now, 1.68456762e+09) f("2023-05-20T04:57-02:30", now, 1.68456762e+09*1e9)
// Year, month, day, hour, minute and second // Year, month, day, hour, minute and second
f("2023-05-20T04:57:43", now, 1.684558663e+09) f("2023-05-20T04:57:43", now, 1.684558663e+09*1e9)
f("2023-05-20T04:57:43Z", now, 1.684558663e+09) f("2023-05-20T04:57:43Z", now, 1.684558663e+09*1e9)
f("2023-05-20T04:57:43+02:30", now, 1.684549663e+09) f("2023-05-20T04:57:43+02:30", now, 1.684549663e+09*1e9)
f("2023-05-20T04:57:43-02:30", now, 1.684567663e+09) f("2023-05-20T04:57:43-02:30", now, 1.684567663e+09*1e9)
// milliseconds // milliseconds
f("2023-05-20T04:57:43.123Z", now, 1.6845586631230001e+09) f("2023-05-20T04:57:43.123Z", now, 1684558663123000000)
f("2023-05-20T04:57:43.123456789+02:30", now, 1.6845496631234567e+09) f("2023-05-20T04:57:43.123456789+02:30", now, 1684549663123456789)
f("2023-05-20T04:57:43.123456789-02:30", now, 1.6845676631234567e+09) f("2023-05-20T04:57:43.123456789-02:30", now, 1684567663123456789)
} }
func TestParseTimeMsecFailure(t *testing.T) { func TestParseTimeMsecFailure(t *testing.T) {