diff --git a/app/vmselect/main.go b/app/vmselect/main.go
index 0af87b18f1..cd7cfaec9f 100644
--- a/app/vmselect/main.go
+++ b/app/vmselect/main.go
@@ -501,12 +501,11 @@ func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
 	}
 	w.WriteHeader(statusCode)
 
-	var ure promql.UserReadableError
+	var ure *promql.UserReadableError
 	if errors.As(err, &ure) {
-		prometheus.WriteErrorResponse(w, statusCode, ure.Err)
+		prometheus.WriteErrorResponse(w, statusCode, ure)
 		return
 	}
-
 	prometheus.WriteErrorResponse(w, statusCode, err)
 }
 
diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go
index f21b404307..7f3a5808aa 100644
--- a/app/vmselect/promql/eval.go
+++ b/app/vmselect/promql/eval.go
@@ -294,7 +294,9 @@ func evalExprInternal(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr)
 func evalTransformFunc(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]*timeseries, error) {
 	tf := getTransformFunc(fe.Name)
 	if tf == nil {
-		return nil, UserReadableError{Err: fmt.Errorf(`unknown func %q`, fe.Name)}
+		return nil, &UserReadableError{
+			Err: fmt.Errorf(`unknown func %q`, fe.Name),
+		}
 	}
 	args, err := evalExprs(qt, ec, fe.Args)
 	if err != nil {
@@ -336,7 +338,9 @@ func evalAggrFunc(qt *querytracer.Tracer, ec *EvalConfig, ae *metricsql.AggrFunc
 	}
 	af := getAggrFunc(ae.Name)
 	if af == nil {
-		return nil, UserReadableError{Err: fmt.Errorf(`unknown func %q`, ae.Name)}
+		return nil, &UserReadableError{
+			Err: fmt.Errorf(`unknown func %q`, ae.Name),
+		}
 	}
 	afa := &aggrFuncArg{
 		ae:   ae,
@@ -679,10 +683,14 @@ func evalRollupFunc(qt *querytracer.Tracer, ec *EvalConfig, funcName string, rf
 	}
 	tssAt, err := evalExpr(qt, ec, re.At)
 	if err != nil {
-		return nil, UserReadableError{Err: fmt.Errorf("cannot evaluate `@` modifier: %w", err)}
+		return nil, &UserReadableError{
+			Err: fmt.Errorf("cannot evaluate `@` modifier: %w", err),
+		}
 	}
 	if len(tssAt) != 1 {
-		return nil, UserReadableError{Err: fmt.Errorf("`@` modifier must return a single series; it returns %d series instead", len(tssAt))}
+		return nil, &UserReadableError{
+			Err: fmt.Errorf("`@` modifier must return a single series; it returns %d series instead", len(tssAt)),
+		}
 	}
 	atTimestamp := int64(tssAt[0].Values[0] * 1000)
 	ecNew := copyEvalConfig(ec)
@@ -742,7 +750,9 @@ func evalRollupFuncWithoutAt(qt *querytracer.Tracer, ec *EvalConfig, funcName st
 		rvs, err = evalRollupFuncWithSubquery(qt, ecNew, funcName, rf, expr, re)
 	}
 	if err != nil {
-		return nil, UserReadableError{Err: err}
+		return nil, &UserReadableError{
+			Err: err,
+		}
 	}
 	if funcName == "absent_over_time" {
 		rvs = aggregateAbsentOverTime(ec, re.Expr, rvs)
@@ -964,7 +974,9 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
 	sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss, ec.MaxSeries)
 	rss, err := netstorage.ProcessSearchQuery(qt, sq, ec.Deadline)
 	if err != nil {
-		return nil, UserReadableError{Err: err}
+		return nil, &UserReadableError{
+			Err: err,
+		}
 	}
 	rssLen := rss.Len()
 	if rssLen == 0 {
@@ -1000,12 +1012,14 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
 	rml := getRollupMemoryLimiter()
 	if !rml.Get(uint64(rollupMemorySize)) {
 		rss.Cancel()
-		return nil, UserReadableError{Err: fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
-			"total available memory for concurrent requests: %d bytes; "+
-			"requested memory: %d bytes; "+
-			"possible solutions are: reducing the number of matching time series; switching to node with more RAM; "+
-			"increasing -memory.allowedPercent; increasing `step` query arg (%gs)",
-			rollupPoints, timeseriesLen*len(rcs), pointsPerTimeseries, rml.MaxSize, uint64(rollupMemorySize), float64(ec.Step)/1e3)}
+		return nil, &UserReadableError{
+			Err: fmt.Errorf("not enough memory for processing %d data points across %d time series with %d points in each time series; "+
+				"total available memory for concurrent requests: %d bytes; "+
+				"requested memory: %d bytes; "+
+				"possible solutions are: reducing the number of matching time series; switching to node with more RAM; "+
+				"increasing -memory.allowedPercent; increasing `step` query arg (%gs)",
+				rollupPoints, timeseriesLen*len(rcs), pointsPerTimeseries, rml.MaxSize, uint64(rollupMemorySize), float64(ec.Step)/1e3),
+		}
 	}
 	defer rml.Put(uint64(rollupMemorySize))
 
@@ -1018,7 +1032,9 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
 		tss, err = evalRollupNoIncrementalAggregate(qt, funcName, keepMetricNames, rss, rcs, preFunc, sharedTimestamps)
 	}
 	if err != nil {
-		return nil, UserReadableError{Err: err}
+		return nil, &UserReadableError{
+			Err: err,
+		}
 	}
 	tss = mergeTimeseries(tssCached, tss, start, ec)
 	rollupResultCacheV.Put(qt, ec, expr, window, tss)
diff --git a/app/vmselect/promql/exec.go b/app/vmselect/promql/exec.go
index d8d2fe1c99..a605157ef4 100644
--- a/app/vmselect/promql/exec.go
+++ b/app/vmselect/promql/exec.go
@@ -26,14 +26,21 @@ var (
 		`This option is DEPRECATED in favor of {__graphite__="a.*.c"} syntax for selecting metrics matching the given Graphite metrics filter`)
 )
 
-// UserReadableError is a type of error which supposed
-// to be returned to the user without additional context.
+// UserReadableError is a type of error which supposed to be returned to the user without additional context.
 type UserReadableError struct {
+	// Err is the error which needs to be returned to the user.
 	Err error
 }
 
+// Unwrap returns ure.Err.
+//
+// This is used by standard errors package. See https://golang.org/pkg/errors
+func (ure *UserReadableError) Unwrap() error {
+	return ure.Err
+}
+
 // Error satisfies Error interface
-func (ure UserReadableError) Error() string {
+func (ure *UserReadableError) Error() string {
 	return ure.Err.Error()
 }
 
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index e95f5ec389..0eec544550 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -19,6 +19,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
 
 * SECURITY: [vmalert](https://docs.victoriametrics.com/vmalert.html): do not expose `-remoteWrite.url`, `-remoteRead.url` and `-datasource.url` command-line flag values in logs and at `http://vmalert:8880/flags` page by default, since they may contain sensitive data such as auth keys. This aligns `vmalert` behaviour with [vmagent](https://docs.victoriametrics.com/vmagent.html), which doesn't expose `-remoteWrite.url` command-line flag value in logs and at `http://vmagent:8429/flags` page by default. Specify `-remoteWrite.showURL`, `-remoteRead.showURL` and `-datasource.showURL` command-line flags for showing values for the corresponding `-*.url` flags in logs. Thanks to @mble for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2965).
 
+* FEATURE: return shorter error messages to Grafana and to other clients requesting [/api/v1/query](https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries) and [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) endpoints. This should simplify reading these errors by humans. The long error message with full context is still written to logs.
 * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve performance for heavy queries on systems with many CPU cores.
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for MX record types in [dns_sd_configs](https://docs.victoriametrics.com/sd_configs.html#dns_sd_configs) in the same way as Prometheus 2.38 [does](https://github.com/prometheus/prometheus/pull/10099).
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `__meta_kubernetes_service_port_number` meta-label for `role: service` in [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs) in the same way as Prometheus 2.38 [does](https://github.com/prometheus/prometheus/pull/11002).