[chore]: Bump github.com/gin-gonic/gin from 1.9.0 to 1.9.1 (#1855)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-06-01 22:20:16 +01:00 committed by GitHub
parent 23705b31e4
commit 55aacaf4b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
138 changed files with 27543 additions and 25484 deletions

14
go.mod
View File

@ -24,7 +24,7 @@ require (
github.com/gin-contrib/cors v1.4.0
github.com/gin-contrib/gzip v0.0.6
github.com/gin-contrib/sessions v0.0.5
github.com/gin-gonic/gin v1.9.0
github.com/gin-gonic/gin v1.9.1
github.com/go-fed/httpsig v1.1.0
github.com/go-playground/form/v4 v4.2.0
github.com/go-playground/validator/v10 v10.14.0
@ -82,7 +82,7 @@ require (
codeberg.org/gruf/go-maps v1.0.3 // indirect
codeberg.org/gruf/go-pools v1.1.0 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/bytedance/sonic v1.8.0 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/cilium/ebpf v0.9.1 // indirect
@ -108,7 +108,7 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect
github.com/goccy/go-json v0.10.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
@ -132,13 +132,13 @@ require (
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
@ -154,7 +154,7 @@ require (
github.com/tdewolff/parse/v2 v2.6.5 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.9 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
@ -162,7 +162,7 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
go.opentelemetry.io/otel/metric v0.37.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/tools v0.6.0 // indirect

27
go.sum
View File

@ -102,8 +102,8 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA=
github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -201,8 +201,8 @@ github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs9
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs=
@ -239,8 +239,8 @@ github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 h1:PSPmmucxGiFBtbQcttHTUc4LQ3P09AW+ldO2qspyKdY=
github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA=
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
@ -440,8 +440,8 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/microcosm-cc/bluemonday v1.0.24 h1:NGQoPtwGVcbGkKfvyYk1yRqknzBuoMiUrO6R7uFTPlw=
github.com/microcosm-cc/bluemonday v1.0.24/go.mod h1:ArQySAMps0790cHSkdPEJ7bGkF2VePWH773hsJNSHf8=
@ -476,8 +476,8 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw=
github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -588,8 +588,8 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU=
github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
github.com/uptrace/bun v1.1.13 h1:IrxlIJHzCHFwmIzx66A9vi6qx8rHsHFiiT9LqlafHZw=
@ -671,8 +671,9 @@ go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

View File

@ -1,48 +0,0 @@
# Introduction to Sonic
## Background
According to the overall profiling of production services in Bytedance, we found that the overhead of JSON serialization and deserialization is unexpectedly high: the total is near to 10% CPU, and the extreme one accounts for more than 40% CPU. Therefore, **the performance of JSON lib is a key issue for the promotion of machine utilization**.
## Research
We conducted a series of surveys and benchmarks on open-sourced JSON libraries for Golang, but the result is disappointing: **no silver bullet**. First of all, no one can perform at least the top three across various business scenarios. Even the most widely used [json-iterator](https://github.com/json-iterator/go) will severely degrade in generic (no-schema) or big-volume JSON serialization and deserialization. Secondly, compared with other JSON libraries writing in other languages, their speed is generally much slower. For example, [Simdjson-go](https://github.com/minio/simdjson-go) has a 50% reduction in decoding performance compared to [simdjson](https://github.com/simdjson/simdjson). What's more, we barely found JSON libraries which provide API to modify the underlying values.
Therefore, we decided to **develop a brand-new JSON library with high performance as well as wide applicability**.
## Thinking
Before starting our design, we need to figure out some questions:
### Why is Json-iterator faster than Standard Library?
First of all, the **schema-based processing mechanism** used by the standard library is commendable, in which the parser can obtain meta information in advance when scanning, thereby shortening the time of branch selection. However, its original implementation did not make good use of this mechanism, instead, **it spent a lot of time reflecting to obtain meta info of schema**. Meanwhile, The approach of json-iterator is: Interprete structure as field-by-field encoding and decoding functions, and then assembled and cached them, minimizing the performance loss cost by reflection. But does it work once and for all? No. In practical tests, we found that **the deeper and larger the input JSON got, the smaller the gap between json-iterator and other libraries gradually became** - eventually event got surpassed:
![Scalability](introduction-1.png)
The reason is that **this implementation transforms into a large number of interface encapsulations and function calls**, followed by function-call losses:
1. **Calling interface involves dynamic addressing of itab**
2. **Assembly functions cannot be inlined**, while Golang's function-call performance is poor (no parameter-passing-by-register)
#### Is there a way to avoid the function-call overhead of dynamic assembly?
The first thing we thought about was code generation like [easyjson](https://github.com/mailru/easyjson). But it comes with **schema dependency and convenience losses**. To achieve a real drop-in replacement of the standard library, we turned to another technology - **[JIT](https://en.wikipedia.org/wiki/Jit) (just-in-time compiling)**. Because the compiled codec function is an integrated function, which can greatly reduce function calls while ensuring flexibility.
### Why is Simdjson-go not fast enough?
[SIMD](https://en.wikipedia.org/wiki/SIMD) (Single-Instruction-Multi-Data) is a special set of CPU instructions for the parallel processing of vectorized data. At present, it is supported by most CPUs and widely used in image processing and big data computing. Undoubtedly, SIMD is useful in JSON processing (itoa, char-search and so on are all suitable scenarios). We can see that simdjson-go is very competitive in large JSON scenarios (>100KB). However, for some extremely small or irregular character strings, **the extra load operation required by SIMD will lead to performance degradation**. Therefore, we need to dedicate ourselves to branch predicting and decide which scenarios should use SIMD and which should not (for example, the string length is less than 16 bytes).
The second problem comes from the Go compiler itself. In order to ensure the compilation speed, **Golang does very little optimization work during the compilation phase** and cannot directly use compiler backends such as [LLVM](https://en.wikipedia.org/wiki/LLVM) (Low-Level Virtual Machine) for optimization.
So, **can some crucial calculation functions be written in another language with higher execution efficiency**?
C/Clang is an ideal compilation tool (internal integration LLVM). But the key is how to embed the optimized assembly into Golang.
### How to use Gjson well?
We also found that [gjson](https://github.com/tidwall/gjson) has a huge advantage in single-key lookup scenarios. This is because its lookup is implemented by a **lazy-load mechanism**, which subtlely skips passing-by values and effectively reduces a lot of unnecessary parsing. Practical application has proved that making good use of this feature in product can indeed bring benefits. But when it comes to multi-key lookup, Gjson does worse event than std, which is a side effect of its skipping mechanism - **searching for the same path leads to repeated parsing** (skip is also a lightweight parsing). Therefore, the accurate adaptation of practical scenarios is the key.
## Design
Based on the above questions, our design is easy to implement:
1. Aiming at the function-call overhead cost by the codec dynamic-assembly, **`JIT` tech is used to assemble opcodes (asm) corresponding to the schema at runtime**, which is finally cached into the off-heap memory in the form of Golang functions.
2. For practical scenarios where big data and small data coexist, we **use pre-conditional judgment** (string size, floating precision, etc.) **to combine `SIMD` with scalar instructions** to achieve the best adaptation.
3. As for insufficiency in compiling optimization of go language, we decided to **use `C/Clang` to write and compile core computational functions**, and **developed a set of [asm2asm](https://github.com/chenzhuoyu/asm2asm) tools to translate the fully optimized x86 assembly into plan9** and finally load it into Golang runtime.
4. Giving the big speed gap between parsing and skipping, the **`lazy-load` mechanism** is certainly used in our AST parser, but in **a more adaptive and efficient way to reduce the overhead of multiple-key queries**.
![design](introduction-2.png)
In detail, we conducted some further optimization:
1. Since the native-asm functions cannot be inlined in Golang, we found that its cost even exceeded the improvement brought by the optimization of the C compiler. So we reimplemented a set of lightweight function-calls in JIT:
- `Global-function-table + static offset` for calling instruction
- **Pass parameters using registers**
2. `Sync.Map` was used to cache the codecs at first, but for our **quasi-static** (read far more than write), **fewer elements** (usually no more than a few dozen) scenarios, its performance is not optimal, so we reimplement a high-performance and concurrent-safe cache with `open-addressing-hash + RCU` tech.

View File

@ -1,5 +1,7 @@
# Sonic
English | [中文](README_ZH_CN.md)
A blazingly fast JSON serializing &amp; deserializing library, accelerated by JIT (just-in-time compiling) and SIMD (single-instruction-multiple-data).
## Requirement
@ -76,14 +78,14 @@ BenchmarkSetOne_Parallel_Sjson-16 18194 ns/op 715.7
BenchmarkSetOne_Parallel_Jsoniter-16 33560 ns/op 388.05 MB/s 45892 B/op 964 allocs/op
```
- [Small](https://github.com/bytedance/sonic/blob/main/testdata/small.go) (400B, 11 keys, 3 layers)
![small benchmarks](bench-small.png)
![small benchmarks](./docs/imgs/bench-small.png)
- [Large](https://github.com/bytedance/sonic/blob/main/testdata/twitter.json) (635KB, 10000+ key, 6 layers)
![large benchmarks](bench-large.png)
![large benchmarks](./docs/imgs/bench-large.png)
See [bench.sh](https://github.com/bytedance/sonic/blob/main/bench.sh) for benchmark codes.
## How it works
See [INTRODUCTION.md](INTRODUCTION.md).
See [INTRODUCTION.md](./docs/INTRODUCTION.md).
## Usage
@ -104,28 +106,29 @@ err := sonic.Unmarshal(output, &data)
Sonic supports decoding json from `io.Reader` or encoding objects into `io.`Writer`, aims at handling multiple values as well as reducing memory consumption.
- encoder
```go
import "github.com/bytedance/sonic/encoder"
var o1 = map[string]interface{}{
"a": "b",
}
var o2 = 1
var w = bytes.NewBuffer(nil)
var enc = encoder.NewStreamEncoder(w)
var enc = sonic.ConfigDefault.NewEncoder(w)
enc.Encode(o1)
enc.Encode(o2)
println(w.String()) // "{"a":"b"}\n1"
fmt.Println(w.String())
// Output:
// {"a":"b"}
// 1
```
- decoder
```go
import "github.com/bytedance/sonic/decoder"
var o = map[string]interface{}{}
var r = strings.NewReader(`{"a":"b"}{"1":"2"}`)
var dec = decoder.NewStreamDecoder(r)
var dec = sonic.ConfigDefault.NewDecoder(r)
dec.Decode(&o)
dec.Decode(&o)
fmt.Printf("%+v", o) // map[1:2 a:b]
fmt.Printf("%+v", o)
// Output:
// map[1:2 a:b]
```
### Use Number/Use Int64

382
vendor/github.com/bytedance/sonic/README_ZH_CN.md generated vendored Normal file
View File

@ -0,0 +1,382 @@
# Sonic
[English](README.md) | 中文
一个速度奇快的 JSON 序列化/反序列化库,由 JIT (即时编译)和 SIMD (单指令流多数据流)加速。
## 依赖
- Go 1.15~1.20
- Linux/MacOS/Windows
- Amd64 架构
## 特色
- 运行时对象绑定,无需代码生成
- 完备的 JSON 操作 API
- 快,更快,还要更快!
## 基准测试
对于**所有大小**的 json 和**所有使用场景** **Sonic 表现均为最佳**
- [中型](https://github.com/bytedance/sonic/blob/main/decoder/testdata_test.go#L19) (13kB, 300+ 键, 6 层)
```powershell
goversion: 1.17.1
goos: darwin
goarch: amd64
cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz
BenchmarkEncoder_Generic_Sonic-16 32393 ns/op 402.40 MB/s 11965 B/op 4 allocs/op
BenchmarkEncoder_Generic_Sonic_Fast-16 21668 ns/op 601.57 MB/s 10940 B/op 4 allocs/op
BenchmarkEncoder_Generic_JsonIter-16 42168 ns/op 309.12 MB/s 14345 B/op 115 allocs/op
BenchmarkEncoder_Generic_GoJson-16 65189 ns/op 199.96 MB/s 23261 B/op 16 allocs/op
BenchmarkEncoder_Generic_StdLib-16 106322 ns/op 122.60 MB/s 49136 B/op 789 allocs/op
BenchmarkEncoder_Binding_Sonic-16 6269 ns/op 2079.26 MB/s 14173 B/op 4 allocs/op
BenchmarkEncoder_Binding_Sonic_Fast-16 5281 ns/op 2468.16 MB/s 12322 B/op 4 allocs/op
BenchmarkEncoder_Binding_JsonIter-16 20056 ns/op 649.93 MB/s 9488 B/op 2 allocs/op
BenchmarkEncoder_Binding_GoJson-16 8311 ns/op 1568.32 MB/s 9481 B/op 1 allocs/op
BenchmarkEncoder_Binding_StdLib-16 16448 ns/op 792.52 MB/s 9479 B/op 1 allocs/op
BenchmarkEncoder_Parallel_Generic_Sonic-16 6681 ns/op 1950.93 MB/s 12738 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Generic_Sonic_Fast-16 4179 ns/op 3118.99 MB/s 10757 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Generic_JsonIter-16 9861 ns/op 1321.84 MB/s 14362 B/op 115 allocs/op
BenchmarkEncoder_Parallel_Generic_GoJson-16 18850 ns/op 691.52 MB/s 23278 B/op 16 allocs/op
BenchmarkEncoder_Parallel_Generic_StdLib-16 45902 ns/op 283.97 MB/s 49174 B/op 789 allocs/op
BenchmarkEncoder_Parallel_Binding_Sonic-16 1480 ns/op 8810.09 MB/s 13049 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Binding_Sonic_Fast-16 1209 ns/op 10785.23 MB/s 11546 B/op 4 allocs/op
BenchmarkEncoder_Parallel_Binding_JsonIter-16 6170 ns/op 2112.58 MB/s 9504 B/op 2 allocs/op
BenchmarkEncoder_Parallel_Binding_GoJson-16 3321 ns/op 3925.52 MB/s 9496 B/op 1 allocs/op
BenchmarkEncoder_Parallel_Binding_StdLib-16 3739 ns/op 3486.49 MB/s 9480 B/op 1 allocs/op
BenchmarkDecoder_Generic_Sonic-16 66812 ns/op 195.10 MB/s 57602 B/op 723 allocs/op
BenchmarkDecoder_Generic_Sonic_Fast-16 54523 ns/op 239.07 MB/s 49786 B/op 313 allocs/op
BenchmarkDecoder_Generic_StdLib-16 124260 ns/op 104.90 MB/s 50869 B/op 772 allocs/op
BenchmarkDecoder_Generic_JsonIter-16 91274 ns/op 142.81 MB/s 55782 B/op 1068 allocs/op
BenchmarkDecoder_Generic_GoJson-16 88569 ns/op 147.17 MB/s 66367 B/op 973 allocs/op
BenchmarkDecoder_Binding_Sonic-16 32557 ns/op 400.38 MB/s 28302 B/op 137 allocs/op
BenchmarkDecoder_Binding_Sonic_Fast-16 28649 ns/op 455.00 MB/s 24999 B/op 34 allocs/op
BenchmarkDecoder_Binding_StdLib-16 111437 ns/op 116.97 MB/s 10576 B/op 208 allocs/op
BenchmarkDecoder_Binding_JsonIter-16 35090 ns/op 371.48 MB/s 14673 B/op 385 allocs/op
BenchmarkDecoder_Binding_GoJson-16 28738 ns/op 453.59 MB/s 22039 B/op 49 allocs/op
BenchmarkDecoder_Parallel_Generic_Sonic-16 12321 ns/op 1057.91 MB/s 57233 B/op 723 allocs/op
BenchmarkDecoder_Parallel_Generic_Sonic_Fast-16 10644 ns/op 1224.64 MB/s 49362 B/op 313 allocs/op
BenchmarkDecoder_Parallel_Generic_StdLib-16 57587 ns/op 226.35 MB/s 50874 B/op 772 allocs/op
BenchmarkDecoder_Parallel_Generic_JsonIter-16 38666 ns/op 337.12 MB/s 55789 B/op 1068 allocs/op
BenchmarkDecoder_Parallel_Generic_GoJson-16 30259 ns/op 430.79 MB/s 66370 B/op 974 allocs/op
BenchmarkDecoder_Parallel_Binding_Sonic-16 5965 ns/op 2185.28 MB/s 27747 B/op 137 allocs/op
BenchmarkDecoder_Parallel_Binding_Sonic_Fast-16 5170 ns/op 2521.31 MB/s 24715 B/op 34 allocs/op
BenchmarkDecoder_Parallel_Binding_StdLib-16 27582 ns/op 472.58 MB/s 10576 B/op 208 allocs/op
BenchmarkDecoder_Parallel_Binding_JsonIter-16 13571 ns/op 960.51 MB/s 14685 B/op 385 allocs/op
BenchmarkDecoder_Parallel_Binding_GoJson-16 10031 ns/op 1299.51 MB/s 22111 B/op 49 allocs/op
BenchmarkGetOne_Sonic-16 3276 ns/op 3975.78 MB/s 24 B/op 1 allocs/op
BenchmarkGetOne_Gjson-16 9431 ns/op 1380.81 MB/s 0 B/op 0 allocs/op
BenchmarkGetOne_Jsoniter-16 51178 ns/op 254.46 MB/s 27936 B/op 647 allocs/op
BenchmarkGetOne_Parallel_Sonic-16 216.7 ns/op 60098.95 MB/s 24 B/op 1 allocs/op
BenchmarkGetOne_Parallel_Gjson-16 1076 ns/op 12098.62 MB/s 0 B/op 0 allocs/op
BenchmarkGetOne_Parallel_Jsoniter-16 17741 ns/op 734.06 MB/s 27945 B/op 647 allocs/op
BenchmarkSetOne_Sonic-16 9571 ns/op 1360.61 MB/s 1584 B/op 17 allocs/op
BenchmarkSetOne_Sjson-16 36456 ns/op 357.22 MB/s 52180 B/op 9 allocs/op
BenchmarkSetOne_Jsoniter-16 79475 ns/op 163.86 MB/s 45862 B/op 964 allocs/op
BenchmarkSetOne_Parallel_Sonic-16 850.9 ns/op 15305.31 MB/s 1584 B/op 17 allocs/op
BenchmarkSetOne_Parallel_Sjson-16 18194 ns/op 715.77 MB/s 52247 B/op 9 allocs/op
BenchmarkSetOne_Parallel_Jsoniter-16 33560 ns/op 388.05 MB/s 45892 B/op 964 allocs/op
```
- [小型](https://github.com/bytedance/sonic/blob/main/testdata/small.go) (400B, 11 个键, 3 层)
![small benchmarks](./docs/imgs/bench-small.png)
- [大型](https://github.com/bytedance/sonic/blob/main/testdata/twitter.json) (635kB, 10000+ 个键, 6 层)
![large benchmarks](./docs/imgs/bench-large.png)
要查看基准测试代码,请参阅 [bench.sh](https://github.com/bytedance/sonic/blob/main/bench.sh) 。
## 工作原理
请参阅 [INTRODUCTION_ZH_CN.md](./docs/INTRODUCTION_ZH_CN.md).
## 使用方式
### 序列化/反序列化
默认的行为基本上与 `encoding/json` 相一致,除了 HTML 转义形式(参见 [Escape HTML](https://github.com/bytedance/sonic/blob/main/README.md#escape-html)) 和 `SortKeys` 功能(参见 [Sort Keys](https://github.com/bytedance/sonic/blob/main/README.md#sort-keys)**没有**遵循 [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259) 。
```go
import "github.com/bytedance/sonic"
var data YourSchema
// Marshal
output, err := sonic.Marshal(&data)
// Unmarshal
err := sonic.Unmarshal(output, &data)
```
### 流式输入输出
Sonic 支持解码 `io.Reader` 中输入的 json或将对象编码为 json 后输出至 `io.Writer`,以处理多个值并减少内存消耗。
- 编码器
```go
var o1 = map[string]interface{}{
"a": "b",
}
var o2 = 1
var w = bytes.NewBuffer(nil)
var enc = sonic.ConfigDefault.NewEncoder(w)
enc.Encode(o1)
enc.Encode(o2)
fmt.Println(w.String())
// Output:
// {"a":"b"}
// 1
```
- 解码器
```go
var o = map[string]interface{}{}
var r = strings.NewReader(`{"a":"b"}{"1":"2"}`)
var dec = sonic.ConfigDefault.NewDecoder(r)
dec.Decode(&o)
dec.Decode(&o)
fmt.Printf("%+v", o)
// Output:
// map[1:2 a:b]
```
### 使用 `Number` / `int64`
```go
import "github.com/bytedance/sonic/decoder"
var input = `1`
var data interface{}
// default float64
dc := decoder.NewDecoder(input)
dc.Decode(&data) // data == float64(1)
// use json.Number
dc = decoder.NewDecoder(input)
dc.UseNumber()
dc.Decode(&data) // data == json.Number("1")
// use int64
dc = decoder.NewDecoder(input)
dc.UseInt64()
dc.Decode(&data) // data == int64(1)
root, err := sonic.GetFromString(input)
// Get json.Number
jn := root.Number()
jm := root.InterfaceUseNumber().(json.Number) // jn == jm
// Get float64
fn := root.Float64()
fm := root.Interface().(float64) // jn == jm
```
### 对键排序
考虑到排序带来的性能损失(约 10% sonic 默认不会启用这个功能。如果你的组件依赖这个行为(如 [zstd](https://github.com/facebook/zstd)) ,可以仿照下面的例子:
```go
import "github.com/bytedance/sonic"
import "github.com/bytedance/sonic/encoder"
// Binding map only
m := map[string]interface{}{}
v, err := encoder.Encode(m, encoder.SortMapKeys)
// Or ast.Node.SortKeys() before marshal
var root := sonic.Get(JSON)
err := root.SortKeys()
```
### HTML 转义
考虑到性能损失约15% sonic 默认不会启用这个功能。你可以使用 `encoder.EscapeHTML` 选项来开启(与 `encoding/json.HTMLEscape` 行为一致)。
```go
import "github.com/bytedance/sonic"
v := map[string]string{"&&":"<>"}
ret, err := Encode(v, EscapeHTML) // ret == `{"\u0026\u0026":{"X":"\u003c\u003e"}}`
```
### 紧凑格式
Sonic 默认将基本类型( `struct` `map` 等)编码为紧凑格式的 JSON ,除非使用 `json.RawMessage` or `json.Marshaler` 进行编码: sonic 确保输出的 JSON 合法,但出于性能考虑,**不会**加工成紧凑格式。我们提供选项 `encoder.CompactMarshaler` 来添加此过程,
### 打印错误
如果输入的 JSON 存在无效的语法sonic 将返回 `decoder.SyntaxError`,该错误支持错误位置的美化输出。
```go
import "github.com/bytedance/sonic"
import "github.com/bytedance/sonic/decoder"
var data interface{}
err := sonic.UnmarshalString("[[[}]]", &data)
if err != nil {
/* One line by default */
println(e.Error()) // "Syntax error at index 3: invalid char\n\n\t[[[}]]\n\t...^..\n"
/* Pretty print */
if e, ok := err.(decoder.SyntaxError); ok {
/*Syntax error at index 3: invalid char
[[[}]]
...^..
*/
print(e.Description())
} else if me, ok := err.(*decoder.MismatchTypeError); ok {
// decoder.MismatchTypeError is new to Sonic v1.6.0
print(me.Description())
}
}
```
#### 类型不匹配 [Sonic v1.6.0]
如果给定键中存在**类型不匹配**的值, sonic 会抛出 `decoder.MismatchTypeError` (如果有多个,只会报告最后一个),但仍会跳过错误的值并解码下一个 JSON 。
```go
import "github.com/bytedance/sonic"
import "github.com/bytedance/sonic/decoder"
var data = struct{
A int
B int
}{}
err := UnmarshalString(`{"A":"1","B":1}`, &data)
println(err.Error()) // Mismatch type int with value string "at index 5: mismatched type with value\n\n\t{\"A\":\"1\",\"B\":1}\n\t.....^.........\n"
fmt.Printf("%+v", data) // {A:0 B:1}
```
### `Ast.Node`
Sonic/ast.Node 是完全独立的 JSON 抽象语法树库。它实现了序列化和反序列化,并提供了获取和修改通用数据的鲁棒的 API。
#### 查找/索引
通过给定的路径搜索 JSON 片段,路径必须为非负整数,字符串或 `nil`
```go
import "github.com/bytedance/sonic"
input := []byte(`{"key1":[{},{"key2":{"key3":[1,2,3]}}]}`)
// no path, returns entire json
root, err := sonic.Get(input)
raw := root.Raw() // == string(input)
// multiple paths
root, err := sonic.Get(input, "key1", 1, "key2")
sub := root.Get("key3").Index(2).Int64() // == 3
```
**注意**:由于 `Index()` 使用偏移量来定位数据,比使用扫描的 `Get()` 要快的多,建议尽可能的使用 `Index` 。 Sonic 也提供了另一个 API `IndexOrGet()` ,以偏移量为基础并且也确保键的匹配。
#### 修改
使用 ` Set()` / `Unset()` 修改 json 的内容
```go
import "github.com/bytedance/sonic"
// Set
exist, err := root.Set("key4", NewBool(true)) // exist == false
alias1 := root.Get("key4")
println(alias1.Valid()) // true
alias2 := root.Index(1)
println(alias1 == alias2) // true
// Unset
exist, err := root.UnsetByIndex(1) // exist == true
println(root.Get("key4").Check()) // "value not exist"
```
#### 序列化
要将 `ast.Node` 编码为 json ,使用 `MarshalJson()` 或者 `json.Marshal()` (必须传递指向节点的指针)
```go
import (
"encoding/json"
"github.com/bytedance/sonic"
)
buf, err := root.MarshalJson()
println(string(buf)) // {"key1":[{},{"key2":{"key3":[1,2,3]}}]}
exp, err := json.Marshal(&root) // WARN: use pointer
println(string(buf) == string(exp)) // true
```
#### APIs
- 合法性检查: `Check()`, `Error()`, `Valid()`, `Exist()`
- 索引: `Index()`, `Get()`, `IndexPair()`, `IndexOrGet()`, `GetByPath()`
- 转换至 go 内置类型: `Int64()`, `Float64()`, `String()`, `Number()`, `Bool()`, `Map[UseNumber|UseNode]()`, `Array[UseNumber|UseNode]()`, `Interface[UseNumber|UseNode]()`
- go 类型打包: `NewRaw()`, `NewNumber()`, `NewNull()`, `NewBool()`, `NewString()`, `NewObject()`, `NewArray()`
- 迭代: `Values()`, `Properties()`, `ForEach()`, `SortKeys()`
- 修改: `Set()`, `SetByIndex()`, `Add()`
## 兼容性
由于开发高性能代码的困难性, Sonic **不**保证对所有环境的支持。对于在不同环境中使用 Sonic 构建应用程序的开发者,我们有以下建议:
- 在 **Mac M1** 上开发:确保在您的计算机上安装了 Rosetta 2并在构建时设置 `GOARCH=amd64` 。 Rosetta 2 可以自动将 x86 二进制文件转换为 arm64 二进制文件,并在 Mac M1 上运行 x86 应用程序。
- 在 **Linux arm64** 上开发:您可以安装 qemu 并使用 `qemu-x86_64 -cpu max` 命令来将 x86 二进制文件转换为 arm64 二进制文件。qemu可以实现与Mac M1上的Rosetta 2类似的转换效果。
对于希望在不使用 qemu 下使用 sonic 的开发者,或者希望处理 JSON 时与 `encoding/JSON` 严格保持一致的开发者,我们在 `sonic.API` 中提供了一些兼容性 API
- `ConfigDefault`: 在支持 sonic 的环境下 sonic 的默认配置(`EscapeHTML=false``SortKeys=false`等)。行为与具有相应配置的 `encoding/json` 一致,一些选项,如 `SortKeys=false` 将无效。
- `ConfigStd`: 在支持 sonic 的环境下与标准库兼容的配置(`EscapeHTML=true``SortKeys=true`等)。行为与 `encoding/json` 一致。
- `ConfigFastest`: 在支持 sonic 的环境下运行最快的配置(`NoQuoteTextMarshaler=true`)。行为与具有相应配置的 `encoding/json` 一致,某些选项将无效。
## 注意事项
### 预热
由于 Sonic 使用 [golang-asm](https://github.com/twitchyliquid64/golang-asm) 作为 JIT 汇编器,这个库并不适用于运行时编译,第一次运行一个大型模式可能会导致请求超时甚至进程内存溢出。为了更好地稳定性,我们建议在运行大型模式或在内存有限的应用中,在使用 `Marshal()/Unmarshal()` 前运行 `Pretouch()`
```go
import (
"reflect"
"github.com/bytedance/sonic"
"github.com/bytedance/sonic/option"
)
func init() {
var v HugeStruct
// For most large types (nesting depth <= option.DefaultMaxInlineDepth)
err := sonic.Pretouch(reflect.TypeOf(v))
// with more CompileOption...
err := sonic.Pretouch(reflect.TypeOf(v),
// If the type is too deep nesting (nesting depth > option.DefaultMaxInlineDepth),
// you can set compile recursive loops in Pretouch for better stability in JIT.
option.WithCompileRecursiveDepth(loop),
// For a large nested struct, try to set a smaller depth to reduce compiling time.
option.WithCompileMaxInlineDepth(depth),
)
}
```
### 拷贝字符串
当解码 **没有转义字符的字符串**时, sonic 会从原始的 JSON 缓冲区内引用而不是复制到新的一个缓冲区中。这对 CPU 的性能方面很有帮助,但是可能因此在解码后对象仍在使用的时候将整个 JSON 缓冲区保留在内存中。实践中我们发现,通过引用 JSON 缓冲区引入的额外内存通常是解码后对象的 20% 至 80% ,一旦应用长期保留这些对象(如缓存以备重用),服务器所使用的内存可能会增加。我们提供了选项 `decoder.CopyString()` 供用户选择,不引用 JSON 缓冲区。这可能在一定程度上降低 CPU 性能。
### 传递字符串还是字节数组?
为了和 `encoding/json` 保持一致,我们提供了传递 `[]byte` 作为参数的 API ,但考虑到安全性,字符串到字节的复制是同时进行的,这在原始 JSON 非常大时可能会导致性能损失。因此,你可以使用 `UnmarshalString()``GetFromString()` 来传递字符串,只要你的原始数据是字符串,或**零拷贝类型转换**对于你的字节数组是安全的。我们也提供了 `MarshalString()` 的 API ,以便对编码的 JSON 字节数组进行**零拷贝类型转换**,因为 sonic 输出的字节始终是重复并且唯一的,所以这样是安全的。
### 加速 `encoding.TextMarshaler`
为了保证数据安全性, `sonic.Encoder` 默认会对来自 `encoding.TextMarshaler` 接口的字符串进行引用和转义,如果大部分数据都是这种形式那可能会导致很大的性能损失。我们提供了 `encoder.NoQuoteTextMarshaler` 选项来跳过这些操作,但你**必须**保证他们的输出字符串依照 [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259) 进行了转义和引用。
### 泛型的性能优化
在 **完全解析**的场景下, `Unmarshal()` 表现得比 `Get()`+`Node.Interface()` 更好。但是如果你只有特定 JSON 的部分模式,你可以将 `Get()``Unmarshal()` 结合使用:
```go
import "github.com/bytedance/sonic"
node, err := sonic.GetFromString(_TwitterJson, "statuses", 3, "user")
var user User // your partial schema...
err = sonic.UnmarshalString(node.Raw(), &user)
```
甚至如果你没有任何模式,可以用 `ast.Node` 代替 `map``interface` 作为泛型的容器:
```go
import "github.com/bytedance/sonic"
root, err := sonic.GetFromString(_TwitterJson)
user := root.GetByPath("statuses", 3, "user") // === root.Get("status").Index(3).Get("user")
err = user.Check()
// err = user.LoadAll() // only call this when you want to use 'user' concurrently...
go someFunc(user)
```
为什么?因为 `ast.Node` 使用 `array` 来存储其子节点:
- 在插入(反序列化)和扫描(序列化)数据时,`Array` 的性能比 `Map` **好得多**
- **哈希**`map[x]`)的效率不如**索引**`array[x]`)高效,而 `ast.Node` 可以在数组和对象上使用索引;
- 使用 `Interface()` / `Map()` 意味着 sonic 必须解析所有的底层值,而 `ast.Node` 可以**按需解析**它们。
**注意**:由于 `ast.Node` 的惰性加载设计,其**不能**直接保证并发安全性,但你可以调用 `Node.Load()` / `Node.LoadAll()` 来实现并发安全。尽管可能会带来性能损失,但仍比转换成 `map``interface{}` 更为高效。
## 社区
Sonic 是 [CloudWeGo](https://www.cloudwego.io/) 下的一个子项目。我们致力于构建云原生生态系统。

View File

@ -169,9 +169,10 @@ func UnmarshalString(buf string, val interface{}) error {
// and returns its representing ast.Node.
//
// Each path arg must be integer or string:
// - Integer means searching current node as array
// - String means searching current node as object
// - Integer is target index(>=0), means searching current node as array.
// - String is target key, means searching current node as object.
//
//
// Note, the api expects the json is well-formed at least,
// otherwise it may return unexpected result.
func Get(src []byte, path ...interface{}) (ast.Node, error) {

View File

@ -1,5 +1,20 @@
// +build amd64,go1.15,!go1.21
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ast
@ -17,10 +32,12 @@
var typeByte = rt.UnpackEface(byte(0)).Type
//go:nocheckptr
func quote(buf *[]byte, val string) {
*buf = append(*buf, '"')
if len(val) == 0 {
*buf = append(*buf, '"')
return
}
sp := rt.IndexChar(val, 0)
@ -99,7 +116,9 @@ func (self *Parser) skipFast() (int, types.ParsingError) {
}
func (self *Parser) getByPath(path ...interface{}) (int, types.ParsingError) {
start := native.GetByPath(&self.s, &self.p, &path)
fsm := types.NewStateMachine()
start := native.GetByPath(&self.s, &self.p, &path, fsm)
types.FreeStateMachine(fsm)
runtime.KeepAlive(path)
if start < 0 {
return self.p, types.ParsingError(-start)
@ -107,7 +126,6 @@ func (self *Parser) getByPath(path ...interface{}) (int, types.ParsingError) {
return start, 0
}
func (self *Searcher) GetByPath(path ...interface{}) (Node, error) {
var err types.ParsingError
var start int
@ -115,6 +133,13 @@ func (self *Searcher) GetByPath(path ...interface{}) (Node, error) {
self.parser.p = 0
start, err = self.parser.getByPath(path...)
if err != 0 {
// for compatibility with old version
if err == types.ERR_NOT_FOUND {
return Node{}, ErrNotExist
}
if err == types.ERR_UNSUPPORT_TYPE {
panic("path must be either int(>=0) or string")
}
return Node{}, self.parser.syntaxError(err)
}

View File

@ -1,5 +1,21 @@
// +build !amd64 go1.21
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ast
import (
@ -24,8 +40,6 @@ func unquote(src string) (string, types.ParsingError) {
return rt.Mem2Str(out), 0
}
func decodeBase64(src string) ([]byte, error) {
return base64.StdEncoding.DecodeString(src)
}
@ -53,7 +67,12 @@ func (self *Parser) skip() (int, types.ParsingError) {
}
func (self *Parser) skipFast() (int, types.ParsingError) {
return self.skip()
e, s := skipValueFast(self.s, self.p)
if e < 0 {
return self.p, types.ParsingError(-e)
}
self.p = e
return s, 0
}
func (self *Node) encodeInterface(buf *[]byte) error {
@ -70,17 +89,16 @@ func (self *Searcher) GetByPath(path ...interface{}) (Node, error) {
var err types.ParsingError
for _, p := range path {
switch p := p.(type) {
case int:
if err = self.parser.searchIndex(p); err != 0 {
if idx, ok := p.(int); ok && idx >= 0 {
if err = self.parser.searchIndex(idx); err != 0 {
return Node{}, self.parser.ExportError(err)
}
case string:
if err = self.parser.searchKey(p); err != 0 {
} else if key, ok := p.(string); ok {
if err = self.parser.searchKey(key); err != 0 {
return Node{}, self.parser.ExportError(err)
}
default:
panic("path must be either int or string")
} else {
panic("path must be either int(>=0) or string")
}
}

View File

@ -1,3 +1,19 @@
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ast
import (
@ -24,6 +40,7 @@ func isSpace(c byte) bool {
return (int(1<<c) & _blankCharsMask) != 0
}
//go:nocheckptr
func skipBlank(src string, pos int) int {
se := uintptr(rt.IndexChar(src, len(src)))
sp := uintptr(rt.IndexChar(src, pos))
@ -77,6 +94,7 @@ func decodeFalse(src string, pos int) (ret int) {
return -int(types.ERR_INVALID_CHAR)
}
//go:nocheckptr
func decodeString(src string, pos int) (ret int, v string) {
ret, ep := skipString(src, pos)
if ep == -1 {
@ -112,6 +130,7 @@ func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
//go:nocheckptr
func decodeInt64(src string, pos int) (ret int, v int64, err error) {
sp := uintptr(rt.IndexChar(src, pos))
ss := uintptr(sp)
@ -161,6 +180,7 @@ func isNumberChars(c byte) bool {
return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.'
}
//go:nocheckptr
func decodeFloat64(src string, pos int) (ret int, v float64, err error) {
sp := uintptr(rt.IndexChar(src, pos))
ss := uintptr(sp)
@ -255,6 +275,7 @@ func decodeValue(src string, pos int) (ret int, v types.JsonState) {
}
}
//go:nocheckptr
func skipNumber(src string, pos int) (ret int) {
sp := uintptr(rt.IndexChar(src, pos))
se := uintptr(rt.IndexChar(src, len(src)))
@ -281,7 +302,7 @@ func skipNumber(src string, pos int) (ret int) {
} else if nextNeedDigit {
return -int(types.ERR_INVALID_CHAR)
} else if c == '.' {
if !lastIsDigit || pointer || sp == ss {
if !lastIsDigit || pointer || exponent || sp == ss {
return -int(types.ERR_INVALID_CHAR)
}
pointer = true
@ -319,6 +340,7 @@ func skipNumber(src string, pos int) (ret int) {
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
}
//go:nocheckptr
func skipString(src string, pos int) (ret int, ep int) {
if pos+1 >= len(src) {
return -int(types.ERR_EOF), -1
@ -327,6 +349,7 @@ func skipString(src string, pos int) (ret int, ep int) {
sp := uintptr(rt.IndexChar(src, pos))
se := uintptr(rt.IndexChar(src, len(src)))
// not start with quote
if *(*byte)(unsafe.Pointer(sp)) != '"' {
return -int(types.ERR_INVALID_CHAR), -1
}
@ -344,18 +367,16 @@ func skipString(src string, pos int) (ret int, ep int) {
}
sp += 1
if c == '"' {
break
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep
}
}
if sp > se {
return -int(types.ERR_EOF), -1
}
runtime.KeepAlive(src)
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep
// not found the closed quote until EOF
return -int(types.ERR_EOF), -1
}
//go:nocheckptr
func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) {
if pos+1 >= len(src) {
return -int(types.ERR_EOF)
@ -403,7 +424,7 @@ func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) {
return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr))
}
func skipValue(src string, pos int) (ret int, start int) {
func skipValueFast(src string, pos int) (ret int, start int) {
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
@ -428,3 +449,127 @@ func skipValue(src string, pos int) (ret int, start int) {
}
return ret, pos
}
func skipValue(src string, pos int) (ret int, start int) {
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
switch c := src[pos]; c {
case 'n':
ret = decodeNull(src, pos)
case '"':
ret, _ = skipString(src, pos)
case '{':
ret, _ = skipObject(src, pos)
case '[':
ret, _ = skipArray(src, pos)
case 't':
ret = decodeTrue(src, pos)
case 'f':
ret = decodeFalse(src, pos)
case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
ret = skipNumber(src, pos)
default:
ret = -int(types.ERR_INVALID_CHAR)
}
return ret, pos
}
func skipObject(src string, pos int) (ret int, start int) {
start = skipBlank(src, pos)
if start < 0 {
return start, -1
}
if src[start] != '{' {
return -int(types.ERR_INVALID_CHAR), -1
}
pos = start + 1
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
if src[pos] == '}' {
return pos + 1, start
}
for {
pos, _ = skipString(src, pos)
if pos < 0 {
return pos, -1
}
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
if src[pos] != ':' {
return -int(types.ERR_INVALID_CHAR), -1
}
pos++
pos, _ = skipValue(src, pos)
if pos < 0 {
return pos, -1
}
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
if src[pos] == '}' {
return pos + 1, start
}
if src[pos] != ',' {
return -int(types.ERR_INVALID_CHAR), -1
}
pos++
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
}
}
func skipArray(src string, pos int) (ret int, start int) {
start = skipBlank(src, pos)
if start < 0 {
return start, -1
}
if src[start] != '[' {
return -int(types.ERR_INVALID_CHAR), -1
}
pos = start + 1
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
if src[pos] == ']' {
return pos + 1, start
}
for {
pos, _ = skipValue(src, pos)
if pos < 0 {
return pos, -1
}
pos = skipBlank(src, pos)
if pos < 0 {
return pos, -1
}
if src[pos] == ']' {
return pos + 1, start
}
if src[pos] != ',' {
return -int(types.ERR_INVALID_CHAR), -1
}
pos++
}
}

View File

@ -1541,13 +1541,19 @@ func (self *Node) toGenericObjectUseNode() (map[string]Node, error) {
emptyObjectNode = Node{t: types.V_OBJECT}
)
// NewRaw creates a node of raw json, and decides its type by first char.
// NewRaw creates a node of raw json.
// If the input json is invalid, NewRaw returns a error Node.
func NewRaw(json string) Node {
if json == "" {
panic("empty json string")
parser := NewParser(json)
start, err := parser.skip()
if err != 0 {
return *newError(err, err.Message())
}
it := switchRawType(json[0])
return newRawNode(json, it)
it := switchRawType(parser.s[start])
if it == _V_NONE {
return Node{}
}
return newRawNode(parser.s[start:parser.p], it)
}
// NewAny creates a node of type V_ANY if any's type isn't Node or *Node,

View File

@ -350,7 +350,7 @@ func (self *Parser) searchKey(match string) types.ParsingError {
/* skip value */
if key != match {
if _, err = self.skip(); err != 0 {
if _, err = self.skipFast(); err != 0 {
return err
}
} else {
@ -398,7 +398,7 @@ func (self *Parser) searchIndex(idx int) types.ParsingError {
for i := 0; i < idx; i++ {
/* decode the value */
if _, err = self.skip(); err != 0 {
if _, err = self.skipFast(); err != 0 {
return err
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

View File

@ -0,0 +1,66 @@
// +build amd64,go1.15,!go1.21
/*
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package decoder
import (
`github.com/bytedance/sonic/internal/decoder`
)
// Decoder is the decoder context object
type Decoder = decoder.Decoder
type MismatchTypeError = decoder.MismatchTypeError
// Options for decode.
type Options = decoder.Options
const (
OptionUseInt64 Options = decoder.OptionUseInt64
OptionUseNumber Options = decoder.OptionUseNumber
OptionUseUnicodeErrors Options = decoder.OptionUseUnicodeErrors
OptionDisableUnknown Options = decoder.OptionDisableUnknown
OptionCopyString Options = decoder.OptionCopyString
OptionValidateString Options = decoder.OptionValidateString
)
// StreamDecoder is the decoder context object for streaming input.
type StreamDecoder = decoder.StreamDecoder
type SyntaxError = decoder.SyntaxError
var (
// NewDecoder creates a new decoder instance.
NewDecoder = decoder.NewDecoder
// NewStreamDecoder adapts to encoding/json.NewDecoder API.
//
// NewStreamDecoder returns a new decoder that reads from r.
NewStreamDecoder = decoder.NewStreamDecoder
// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
// order to reduce the first-hit latency.
//
// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
// a compile option to set the depth of recursive compile for the nested struct type.
Pretouch = decoder.Pretouch
// Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid.
// Otherwise, returns negative error code using start and invalid character position using end
Skip = decoder.Skip
)

View File

@ -0,0 +1,196 @@
// +build !amd64 go1.21
/*
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package decoder
import (
`encoding/json`
`bytes`
`reflect`
`github.com/bytedance/sonic/internal/native/types`
`github.com/bytedance/sonic/option`
`io`
)
const (
_F_use_int64 = iota
_F_use_number
_F_disable_urc
_F_disable_unknown
_F_copy_string
_F_validate_string
_F_allow_control = 31
)
type Options uint64
const (
OptionUseInt64 Options = 1 << _F_use_int64
OptionUseNumber Options = 1 << _F_use_number
OptionUseUnicodeErrors Options = 1 << _F_disable_urc
OptionDisableUnknown Options = 1 << _F_disable_unknown
OptionCopyString Options = 1 << _F_copy_string
OptionValidateString Options = 1 << _F_validate_string
)
func (self *Decoder) SetOptions(opts Options) {
if (opts & OptionUseNumber != 0) && (opts & OptionUseInt64 != 0) {
panic("can't set OptionUseInt64 and OptionUseNumber both!")
}
self.f = uint64(opts)
}
// Decoder is the decoder context object
type Decoder struct {
i int
f uint64
s string
}
// NewDecoder creates a new decoder instance.
func NewDecoder(s string) *Decoder {
return &Decoder{s: s}
}
// Pos returns the current decoding position.
func (self *Decoder) Pos() int {
return self.i
}
func (self *Decoder) Reset(s string) {
self.s = s
self.i = 0
// self.f = 0
}
// NOTE: api fallback do nothing
func (self *Decoder) CheckTrailings() error {
pos := self.i
buf := self.s
/* skip all the trailing spaces */
if pos != len(buf) {
for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 {
pos++
}
}
/* then it must be at EOF */
if pos == len(buf) {
return nil
}
/* junk after JSON value */
return nil
}
// Decode parses the JSON-encoded data from current position and stores the result
// in the value pointed to by val.
func (self *Decoder) Decode(val interface{}) error {
r := bytes.NewBufferString(self.s)
dec := json.NewDecoder(r)
if (self.f | uint64(OptionUseNumber)) != 0 {
dec.UseNumber()
}
if (self.f | uint64(OptionDisableUnknown)) != 0 {
dec.DisallowUnknownFields()
}
return dec.Decode(val)
}
// UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an
// int64 instead of as a float64.
func (self *Decoder) UseInt64() {
self.f |= 1 << _F_use_int64
self.f &^= 1 << _F_use_number
}
// UseNumber indicates the Decoder to unmarshal a number into an interface{} as a
// json.Number instead of as a float64.
func (self *Decoder) UseNumber() {
self.f &^= 1 << _F_use_int64
self.f |= 1 << _F_use_number
}
// UseUnicodeErrors indicates the Decoder to return an error when encounter invalid
// UTF-8 escape sequences.
func (self *Decoder) UseUnicodeErrors() {
self.f |= 1 << _F_disable_urc
}
// DisallowUnknownFields indicates the Decoder to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
func (self *Decoder) DisallowUnknownFields() {
self.f |= 1 << _F_disable_unknown
}
// CopyString indicates the Decoder to decode string values by copying instead of referring.
func (self *Decoder) CopyString() {
self.f |= 1 << _F_copy_string
}
// ValidateString causes the Decoder to validate string values when decoding string value
// in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or
// invalid UTF-8 chars in the string value of JSON.
func (self *Decoder) ValidateString() {
self.f |= 1 << _F_validate_string
}
// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
// order to reduce the first-hit latency.
//
// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
// a compile option to set the depth of recursive compile for the nested struct type.
func Pretouch(vt reflect.Type, opts ...option.CompileOption) error {
return nil
}
type StreamDecoder struct {
r io.Reader
buf []byte
scanp int
scanned int64
err error
Decoder
}
// NewStreamDecoder adapts to encoding/json.NewDecoder API.
//
// NewStreamDecoder returns a new decoder that reads from r.
func NewStreamDecoder(r io.Reader) *StreamDecoder {
return &StreamDecoder{r : r}
}
// Decode decodes input stream into val with corresponding data.
// Redundantly bytes may be read and left in its buffer, and can be used at next call.
// Either io error from underlying io.Reader (except io.EOF)
// or syntax error from data will be recorded and stop subsequently decoding.
func (self *StreamDecoder) Decode(val interface{}) (err error) {
dec := json.NewDecoder(self.r)
if (self.f | uint64(OptionUseNumber)) != 0 {
dec.UseNumber()
}
if (self.f | uint64(OptionDisableUnknown)) != 0 {
dec.DisallowUnknownFields()
}
return dec.Decode(val)
}

View File

@ -0,0 +1,108 @@
// +build amd64,go1.15,!go1.21
/*
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encoder
import (
`github.com/bytedance/sonic/internal/encoder`
)
// Encoder represents a specific set of encoder configurations.
type Encoder = encoder.Encoder
// StreamEncoder uses io.Writer as input.
type StreamEncoder = encoder.StreamEncoder
// Options is a set of encoding options.
type Options = encoder.Options
const (
// SortMapKeys indicates that the keys of a map needs to be sorted
// before serializing into JSON.
// WARNING: This hurts performance A LOT, USE WITH CARE.
SortMapKeys Options = encoder.SortMapKeys
// EscapeHTML indicates encoder to escape all HTML characters
// after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape).
// WARNING: This hurts performance A LOT, USE WITH CARE.
EscapeHTML Options = encoder.EscapeHTML
// CompactMarshaler indicates that the output JSON from json.Marshaler
// is always compact and needs no validation
CompactMarshaler Options = encoder.CompactMarshaler
// NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler
// is always escaped string and needs no quoting
NoQuoteTextMarshaler Options = encoder.NoQuoteTextMarshaler
// NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}',
// instead of 'null'
NoNullSliceOrMap Options = encoder.NoNullSliceOrMap
// ValidateString indicates that encoder should validate the input string
// before encoding it into JSON.
ValidateString Options = encoder.ValidateString
// CompatibleWithStd is used to be compatible with std encoder.
CompatibleWithStd Options = encoder.CompatibleWithStd
)
var (
// Encode returns the JSON encoding of val, encoded with opts.
Encode = encoder.Encode
// EncodeInto is like Encode but uses a user-supplied buffer instead of allocating a new one.
EncodeIndented = encoder.EncodeIndented
// EncodeIndented is like Encode but applies Indent to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
EncodeInto = encoder.EncodeInto
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
// so that the JSON will be safe to embed inside HTML <script> tags.
// For historical reasons, web browsers don't honor standard HTML
// escaping within <script> tags, so an alternative JSON encoding must
// be used.
HTMLEscape = encoder.HTMLEscape
// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
// order to reduce the first-hit latency.
//
// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
// a compile option to set the depth of recursive compile for the nested struct type.
Pretouch = encoder.Pretouch
// Quote returns the JSON-quoted version of s.
Quote = encoder.Quote
// Valid validates json and returns first non-blank character position,
// if it is only one valid json value.
// Otherwise returns invalid character position using start.
//
// Note: it does not check for the invalid UTF-8 characters.
Valid = encoder.Valid
// NewStreamEncoder adapts to encoding/json.NewDecoder API.
//
// NewStreamEncoder returns a new encoder that write to w.
NewStreamEncoder = encoder.NewStreamEncoder
)

View File

@ -0,0 +1,234 @@
// +build !amd64 go1.21
/*
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encoder
import (
`io`
`bytes`
`encoding/json`
`reflect`
`github.com/bytedance/sonic/option`
)
// Options is a set of encoding options.
type Options uint64
const (
bitSortMapKeys = iota
bitEscapeHTML
bitCompactMarshaler
bitNoQuoteTextMarshaler
bitNoNullSliceOrMap
bitValidateString
// used for recursive compile
bitPointerValue = 63
)
const (
// SortMapKeys indicates that the keys of a map needs to be sorted
// before serializing into JSON.
// WARNING: This hurts performance A LOT, USE WITH CARE.
SortMapKeys Options = 1 << bitSortMapKeys
// EscapeHTML indicates encoder to escape all HTML characters
// after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape).
// WARNING: This hurts performance A LOT, USE WITH CARE.
EscapeHTML Options = 1 << bitEscapeHTML
// CompactMarshaler indicates that the output JSON from json.Marshaler
// is always compact and needs no validation
CompactMarshaler Options = 1 << bitCompactMarshaler
// NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler
// is always escaped string and needs no quoting
NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler
// NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}',
// instead of 'null'
NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap
// ValidateString indicates that encoder should validate the input string
// before encoding it into JSON.
ValidateString Options = 1 << bitValidateString
// CompatibleWithStd is used to be compatible with std encoder.
CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler
)
// Encoder represents a specific set of encoder configurations.
type Encoder struct {
Opts Options
prefix string
indent string
}
// Encode returns the JSON encoding of v.
func (self *Encoder) Encode(v interface{}) ([]byte, error) {
if self.indent != "" || self.prefix != "" {
return EncodeIndented(v, self.prefix, self.indent, self.Opts)
}
return Encode(v, self.Opts)
}
// SortKeys enables the SortMapKeys option.
func (self *Encoder) SortKeys() *Encoder {
self.Opts |= SortMapKeys
return self
}
// SetEscapeHTML specifies if option EscapeHTML opens
func (self *Encoder) SetEscapeHTML(f bool) {
if f {
self.Opts |= EscapeHTML
} else {
self.Opts &= ^EscapeHTML
}
}
// SetValidateString specifies if option ValidateString opens
func (self *Encoder) SetValidateString(f bool) {
if f {
self.Opts |= ValidateString
} else {
self.Opts &= ^ValidateString
}
}
// SetCompactMarshaler specifies if option CompactMarshaler opens
func (self *Encoder) SetCompactMarshaler(f bool) {
if f {
self.Opts |= CompactMarshaler
} else {
self.Opts &= ^CompactMarshaler
}
}
// SetNoQuoteTextMarshaler specifies if option NoQuoteTextMarshaler opens
func (self *Encoder) SetNoQuoteTextMarshaler(f bool) {
if f {
self.Opts |= NoQuoteTextMarshaler
} else {
self.Opts &= ^NoQuoteTextMarshaler
}
}
// SetIndent instructs the encoder to format each subsequent encoded
// value as if indented by the package-level function EncodeIndent().
// Calling SetIndent("", "") disables indentation.
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.prefix = prefix
enc.indent = indent
}
// Quote returns the JSON-quoted version of s.
func Quote(s string) string {
/* check for empty string */
if s == "" {
return `""`
}
out, _ := json.Marshal(s)
return string(out)
}
// Encode returns the JSON encoding of val, encoded with opts.
func Encode(val interface{}, opts Options) ([]byte, error) {
return json.Marshal(val)
}
// EncodeInto is like Encode but uses a user-supplied buffer instead of allocating
// a new one.
func EncodeInto(buf *[]byte, val interface{}, opts Options) error {
if buf == nil {
panic("user-supplied buffer buf is nil")
}
w := bytes.NewBuffer(*buf)
enc := json.NewEncoder(w)
enc.SetEscapeHTML((opts & EscapeHTML) != 0)
err := enc.Encode(val)
*buf = w.Bytes()
return err
}
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
// so that the JSON will be safe to embed inside HTML <script> tags.
// For historical reasons, web browsers don't honor standard HTML
// escaping within <script> tags, so an alternative JSON encoding must
// be used.
func HTMLEscape(dst []byte, src []byte) []byte {
d := bytes.NewBuffer(dst)
json.HTMLEscape(d, src)
return d.Bytes()
}
// EncodeIndented is like Encode but applies Indent to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
func EncodeIndented(val interface{}, prefix string, indent string, opts Options) ([]byte, error) {
w := bytes.NewBuffer([]byte{})
enc := json.NewEncoder(w)
enc.SetEscapeHTML((opts & EscapeHTML) != 0)
enc.SetIndent(prefix, indent)
err := enc.Encode(val)
out := w.Bytes()
return out, err
}
// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
// order to reduce the first-hit latency.
//
// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
// a compile option to set the depth of recursive compile for the nested struct type.
func Pretouch(vt reflect.Type, opts ...option.CompileOption) error {
return nil
}
// Valid validates json and returns first non-blank character position,
// if it is only one valid json value.
// Otherwise returns invalid character position using start.
//
// Note: it does not check for the invalid UTF-8 characters.
func Valid(data []byte) (ok bool, start int) {
return json.Valid(data), 0
}
// StreamEncoder uses io.Writer as
type StreamEncoder struct {
w io.Writer
Encoder
}
// NewStreamEncoder adapts to encoding/json.NewDecoder API.
//
// NewStreamEncoder returns a new encoder that write to w.
func NewStreamEncoder(w io.Writer) *StreamEncoder {
return &StreamEncoder{w: w}
}
// Encode encodes interface{} as JSON to io.Writer
func (enc *StreamEncoder) Encode(val interface{}) (err error) {
jenc := json.NewEncoder(enc.w)
jenc.SetEscapeHTML((enc.Opts & EscapeHTML) != 0)
jenc.SetIndent(enc.prefix, enc.indent)
err = jenc.Encode(val)
return err
}

View File

@ -319,6 +319,7 @@ func (self *_Assembler) compile() {
_OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err,
_OP_go_skip : (*_Assembler)._asm_OP_go_skip,
_OP_add : (*_Assembler)._asm_OP_add,
_OP_check_empty : (*_Assembler)._asm_OP_check_empty,
}
func (self *_Assembler) instr(v *_Instr) {
@ -458,6 +459,7 @@ func (self *_Assembler) call_vf(fn obj.Addr) {
var (
_V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow))))
_I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError)))
_I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError)))
)
func (self *_Assembler) type_error() {
@ -471,7 +473,12 @@ func (self *_Assembler) type_error() {
func (self *_Assembler) mismatch_error() {
self.Link(_LB_mismatch_error) // _type_error:
self.Link(_LB_mismatch_error) // _type_error:
self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET
self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP
self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX
self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX
self.Sjmp("JE" , _LB_error) // JE _LB_error
self.Emit("MOVQ", _ARG_sp, _AX)
self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
self.Emit("MOVQ", _ARG_sl, _CX)
@ -1128,9 +1135,16 @@ func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
self.call_go(_F_decodeTypedPointer) // CALL_GO decodeTypedPointer
self.Emit("MOVQ" , jit.Ptr(_SP, 64), _ET) // MOVQ 64(SP), ET
self.Emit("MOVQ" , jit.Ptr(_SP, 72), _EP) // MOVQ 72(SP), EP
self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
self.Sjmp("JNZ" , _LB_error) // JNZ _error
self.Emit("MOVQ" , jit.Ptr(_SP, 56), _IC) // MOVQ 56(SP), IC
self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n}
self.Emit("MOVQ", _I_json_MismatchTypeError, _AX) // MOVQ _I_json_MismatchTypeError, AX
self.Emit("CMPQ", _ET, _AX) // CMPQ ET, AX
self.Sjmp("JNE" , _LB_error) // JNE LB_error
self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic
self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et
self.Link("_decode_dynamic_end_{n}")
}
/** OpCode Assembler Functions **/
@ -1161,6 +1175,8 @@ func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
var (
_F_FieldMap_GetCaseInsensitive obj.Addr
_Empty_Slice = make([]byte, 0)
_Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr))
)
const (
@ -1487,18 +1503,21 @@ func (self *_Assembler) _asm_OP_map_init(_ *_Instr) {
func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) {
self.parse_signed(int8Type, "", p.vi()) // PARSE int8
self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv
}
func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) {
self.parse_signed(int16Type, "", p.vi()) // PARSE int16
self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv
}
func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
self.parse_signed(int32Type, "", p.vi()) // PARSE int32
self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv
} else {
@ -1508,6 +1527,7 @@ func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
self.parse_signed(int64Type, "", p.vi()) // PARSE int64
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv
} else {
@ -1519,18 +1539,21 @@ func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) {
self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8
self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv
}
func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) {
self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16
self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv
}
func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32
self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv
} else {
@ -1540,6 +1563,7 @@ func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) {
self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv
} else {
@ -1552,11 +1576,13 @@ func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) {
self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER
self.range_single() // RANGE float32
self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
}
func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) {
self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
}
@ -1621,6 +1647,24 @@ func (self *_Assembler) _asm_OP_slice_init(p *_Instr) {
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
}
func (self *_Assembler) _asm_OP_check_empty(p *_Instr) {
rbracket := p.vb()
if rbracket == ']' {
self.check_eof(1)
self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']'
self.Sjmp("JNE" , "_not_empty_array_{n}") // JNE _not_empty_array_{n}
self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP)
self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP)
self.Xjmp("JMP" , p.vi()) // JMP {p.vi()}
self.Link("_not_empty_array_{n}")
} else {
panic("only implement check empty array here!")
}
}
func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX
self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP)
@ -1640,12 +1684,34 @@ func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true)// MOVQ DI, (VP)
self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP)
// because growslice not zero memory {oldcap, newlen} when append et not has ptrdata.
// but we should zero it, avoid decode it as random values.
if rt.UnpackType(p.vt()).PtrData == 0 {
self.Emit("SUBQ" , _AX, _SI) // MOVQ AX, SI
self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
self.Emit("MOVQ" , _DI, _VP) // MOVQ DI, VP
self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
self.From("MULQ" , _CX) // MULQ CX
self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX
self.From("MULQ" , _CX) // MULQ CX
self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
self.Emit("MOVQ" , _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP)
self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap}
self.Sjmp("JMP", "_append_slice_end_{n}") // JMP _append_slice_end_{n}
}
self.Link("_index_{n}") // _index_{n}:
self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP
self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
self.From("MULQ" , _CX) // MULQ CX
self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
self.Link("_append_slice_end_{n}")
}
func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) {
@ -1787,10 +1853,14 @@ func (self *_Assembler) lspace(subfix string) {
}
func (self *_Assembler) _asm_OP_match_char(p *_Instr) {
self.match_char(p.vb())
}
func (self *_Assembler) match_char(char byte) {
self.check_eof(1)
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()}
self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
}
func (self *_Assembler) _asm_OP_check_char(p *_Instr) {

View File

@ -315,6 +315,7 @@ func (self *_Assembler) compile() {
_OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err,
_OP_go_skip : (*_Assembler)._asm_OP_go_skip,
_OP_add : (*_Assembler)._asm_OP_add,
_OP_check_empty : (*_Assembler)._asm_OP_check_empty,
_OP_debug : (*_Assembler)._asm_OP_debug,
}
@ -348,8 +349,8 @@ func (self *_Assembler) epilogue() {
self.Emit("MOVQ", _EP, _CX) // MOVQ BX, CX
self.Emit("MOVQ", _ET, _BX) // MOVQ AX, BX
self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX
self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP)
self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP)
self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP)
self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP)
self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP)
self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP)
self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP
@ -480,6 +481,7 @@ func (self *_Assembler) call_vf(fn obj.Addr) {
var (
_V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow))))
_I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError)))
_I_json_MismatchTypeError = jit.Itab(_T_error, reflect.TypeOf(new(MismatchTypeError)))
)
func (self *_Assembler) type_error() {
@ -489,7 +491,12 @@ func (self *_Assembler) type_error() {
}
func (self *_Assembler) mismatch_error() {
self.Link(_LB_mismatch_error) // _type_error:
self.Link(_LB_mismatch_error) // _type_error:
self.Emit("MOVQ", _VAR_et, _ET) // MOVQ _VAR_et, ET
self.Emit("MOVQ", _VAR_ic, _EP) // MOVQ _VAR_ic, EP
self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchType, CX
self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX
self.Sjmp("JE" , _LB_error) // JE _LB_error
self.Emit("MOVQ", _ARG_sp, _AX)
self.Emit("MOVQ", _ARG_sl, _BX)
self.Emit("MOVQ", _VAR_ic, _CX)
@ -1119,7 +1126,7 @@ func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX
self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sp, BX
self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX
self.Emit("MOVQ" , _ST, _R8) // MOVQ ST, R8
self.Emit("MOVQ" , _ST, _R8) // MOVQ ST, R8
self.Emit("MOVQ" , _ARG_fv, _R9) // MOVQ fv, R9
self.save(_REG_rt...)
self.Emit("MOVQ", _F_decodeTypedPointer, _IL) // MOVQ ${fn}, R11
@ -1129,7 +1136,13 @@ func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
self.Emit("MOVQ" , _BX, _ET) // MOVQ BX, ET
self.Emit("MOVQ" , _CX, _EP) // MOVQ CX, EP
self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
self.Sjmp("JNZ" , _LB_error) // JNZ _error
self.Sjmp("JE", "_decode_dynamic_end_{n}") // JE, _decode_dynamic_end_{n}
self.Emit("MOVQ", _I_json_MismatchTypeError, _CX) // MOVQ _I_json_MismatchTypeError, CX
self.Emit("CMPQ", _ET, _CX) // CMPQ ET, CX
self.Sjmp("JNE", _LB_error) // JNE LB_error
self.Emit("MOVQ", _EP, _VAR_ic) // MOVQ EP, VAR_ic
self.Emit("MOVQ", _ET, _VAR_et) // MOVQ ET, VAR_et
self.Link("_decode_dynamic_end_{n}")
}
/** OpCode Assembler Functions **/
@ -1155,6 +1168,8 @@ func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
var (
_F_FieldMap_GetCaseInsensitive obj.Addr
_Empty_Slice = make([]byte, 0)
_Zero_Base = int64(uintptr(((*rt.GoSlice)(unsafe.Pointer(&_Empty_Slice))).Ptr))
)
const (
@ -1482,18 +1497,21 @@ func (self *_Assembler) _asm_OP_map_init(_ *_Instr) {
func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) {
self.parse_signed(int8Type, "", p.vi()) // PARSE int8
self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv
}
func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) {
self.parse_signed(int16Type, "", p.vi()) // PARSE int16
self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv
}
func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
self.parse_signed(int32Type, "", p.vi()) // PARSE int32
self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv
} else {
@ -1504,6 +1522,7 @@ func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
self.parse_signed(int64Type, "", p.vi()) // PARSE int64
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv
} else {
@ -1515,18 +1534,21 @@ func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) {
self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8
self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv
}
func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) {
self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16
self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv
}
func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32
self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv
} else {
@ -1537,6 +1559,7 @@ func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) {
self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64
self.match_char('"')
if vt := p.vt(); !mapfast(vt) {
self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv
} else {
@ -1549,11 +1572,13 @@ func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) {
self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER
self.range_single_X0() // RANGE float32
self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
}
func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) {
self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER
self.match_char('"')
self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
}
@ -1612,6 +1637,24 @@ func (self *_Assembler) _asm_OP_slice_init(p *_Instr) {
self.Link("_done_{n}") // _done_{n}
}
func (self *_Assembler) _asm_OP_check_empty(p *_Instr) {
rbracket := p.vb()
if rbracket == ']' {
self.check_eof(1)
self.Emit("LEAQ", jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(rbracket))) // CMPB (IP)(IC), ']'
self.Sjmp("JNE" , "_not_empty_array_{n}") // JNE _not_empty_array_{n}
self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
self.StorePtr(_Zero_Base, jit.Ptr(_VP, 0), _AX) // MOVQ $zerobase, (VP)
self.Emit("PXOR", _X0, _X0) // PXOR X0, X0
self.Emit("MOVOU", _X0, jit.Ptr(_VP, 8)) // MOVOU X0, 8(VP)
self.Xjmp("JMP" , p.vi()) // JMP {p.vi()}
self.Link("_not_empty_array_{n}")
} else {
panic("only implement check empty array here!")
}
}
func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX
self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP)
@ -1626,6 +1669,28 @@ func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.WritePtrAX(8, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
self.Emit("MOVQ" , _BX, jit.Ptr(_VP, 8)) // MOVQ BX, 8(VP)
self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP)
// because growslice not zero memory {oldcap, newlen} when append et not has ptrdata.
// but we should zero it, avoid decode it as random values.
if rt.UnpackType(p.vt()).PtrData == 0 {
self.Emit("MOVQ" , _CX, _DI) // MOVQ CX, DI
self.Emit("SUBQ" , _BX, _DI) // MOVQ BX, DI
self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX
self.From("MULQ" , _CX) // MULQ CX
self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
self.Emit("MOVQ" , _DI, _AX) // MOVQ SI, AX
self.From("MULQ" , _CX) // MULQ BX
self.Emit("MOVQ" , _AX, _BX) // ADDQ AX, BX
self.Emit("MOVQ" , _VP, _AX) // MOVQ VP, AX
self.mem_clear_fn(true) // CALL_GO memclr{Has,NoHeap}
self.Sjmp("JMP", "_append_slice_end_{n}")
}
self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX
self.Link("_index_{n}") // _index_{n}:
self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
@ -1633,6 +1698,7 @@ func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
self.From("MULQ" , _CX) // MULQ CX
self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
self.Link("_append_slice_end_{n}")
}
func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) {
@ -1767,8 +1833,12 @@ func (self *_Assembler) lspace(subfix string) {
}
func (self *_Assembler) _asm_OP_match_char(p *_Instr) {
self.match_char(p.vb())
}
func (self *_Assembler) match_char(char byte) {
self.check_eof(1)
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(char))) // CMPB (IP)(IC), ${p.vb()}
self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
}

View File

@ -98,6 +98,7 @@
_OP_dismatch_err
_OP_go_skip
_OP_add
_OP_check_empty
_OP_debug
)
@ -174,6 +175,9 @@
_OP_check_char_0 : "check_char_0",
_OP_dismatch_err : "dismatch_err",
_OP_add : "add",
_OP_go_skip : "go_skip",
_OP_check_empty : "check_empty",
_OP_debug : "debug",
}
func (self _Op) String() string {
@ -639,11 +643,6 @@ func (self *_Compiler) compileMapOp(p *_Program, sp int, vt reflect.Type, op _Op
skip2 := p.pc()
p.rtt(op, vt)
/* match the closing quote if needed */
if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p {
p.chr(_OP_match_char, '"')
}
/* match the value separator */
p.add(_OP_lspace)
p.chr(_OP_match_char, ':')
@ -660,11 +659,6 @@ func (self *_Compiler) compileMapOp(p *_Program, sp int, vt reflect.Type, op _Op
skip3 := p.pc()
p.rtt(op, vt)
/* match the closing quote if needed */
if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p {
p.chr(_OP_match_char, '"')
}
/* match the value separator */
p.add(_OP_lspace)
p.chr(_OP_match_char, ':')
@ -689,12 +683,37 @@ func (self *_Compiler) compilePtr(p *_Program, sp int, et reflect.Type) {
/* dereference all the way down */
for et.Kind() == reflect.Ptr {
if et.Implements(jsonUnmarshalerType) {
p.rtt(_OP_unmarshal_p, et)
return
}
if et.Implements(encodingTextUnmarshalerType) {
p.add(_OP_lspace)
self.compileUnmarshalTextPtr(p, et)
return
}
et = et.Elem()
p.rtt(_OP_deref, et)
}
/* compile the element type */
self.compileOne(p, sp + 1, et)
/* check for recursive nesting */
ok := self.tab[et]
if ok {
p.rtt(_OP_recurse, et)
} else {
/* enter the recursion */
p.add(_OP_lspace)
self.tab[et] = true
/* not inline the pointer type
* recursing the defined pointer type's elem will casue issue379.
*/
self.compileOps(p, sp, et)
}
delete(self.tab, et)
j := p.pc()
p.add(_OP_goto)
p.pin(i)
@ -791,11 +810,11 @@ func (self *_Compiler) compileSliceList(p *_Program, sp int, vt reflect.Type) {
}
func (self *_Compiler) compileSliceBody(p *_Program, sp int, et reflect.Type) {
p.rtt(_OP_slice_init, et)
p.add(_OP_save)
p.add(_OP_lspace)
j := p.pc()
p.chr(_OP_check_char, ']')
p.chr(_OP_check_empty, ']')
p.rtt(_OP_slice_init, et)
p.add(_OP_save)
p.rtt(_OP_slice_append, et)
self.compileOne(p, sp + 1, et)
p.add(_OP_load)
@ -808,9 +827,9 @@ func (self *_Compiler) compileSliceBody(p *_Program, sp int, et reflect.Type) {
self.compileOne(p, sp + 1, et)
p.add(_OP_load)
p.int(_OP_goto, k0)
p.pin(j)
p.pin(k1)
p.add(_OP_drop)
p.pin(j)
}
func (self *_Compiler) compileString(p *_Program, vt reflect.Type) {

View File

@ -17,6 +17,7 @@
package decoder
import (
`unsafe`
`encoding/json`
`reflect`
`runtime`
@ -127,8 +128,17 @@ func (self *Decoder) Decode(val interface{}) error {
return &json.InvalidUnmarshalError{Type: vv.Type.Pack()}
}
etp := rt.PtrElem(vv.Type)
/* check the defined pointer type for issue 379 */
if vv.Type.IsNamed() {
newp := vp
etp = vv.Type
vp = unsafe.Pointer(&newp)
}
/* create a new stack, and call the decoder */
sb, etp := newStack(), rt.PtrElem(vv.Type)
sb := newStack()
nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f)
/* return the stack back */
self.i = nb

View File

@ -26,7 +26,7 @@
)
const (
_MinSlice = 16
_MinSlice = 2
_MaxStack = 4096 // 4k slots
_MaxStackBytes = _MaxStack * _PtrBytes
_MaxDigitNums = 800 // used in atof fallback algorithm

View File

@ -21,15 +21,15 @@
`io`
`sync`
`github.com/bytedance/sonic/option`
`github.com/bytedance/sonic/internal/native/types`
)
var (
defaultBufferSize uint = 4096
growSliceFactorShift uint = 1
minLeftBufferShift uint = 2
minLeftBufferShift uint = 1
)
// StreamDecoder is the decoder context object for streaming input.
type StreamDecoder struct {
r io.Reader
buf []byte
@ -41,7 +41,7 @@ type StreamDecoder struct {
var bufPool = sync.Pool{
New: func () interface{} {
return make([]byte, 0, defaultBufferSize)
return make([]byte, 0, option.DefaultDecoderBufferSize)
},
}
@ -206,8 +206,8 @@ func realloc(buf *[]byte) {
c := uint(cap(*buf))
if c - l <= c >> minLeftBufferShift {
e := l+(l>>minLeftBufferShift)
if e < defaultBufferSize {
e = defaultBufferSize
if e < option.DefaultDecoderBufferSize {
e = option.DefaultDecoderBufferSize
}
tmp := make([]byte, l, e)
copy(tmp, *buf)

View File

@ -514,7 +514,8 @@ func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Ty
switch vt.Kind() {
case reflect.Interface : self.call_marshaler_i(fn, it)
case reflect.Ptr, reflect.Map: self.call_marshaler_v(fn, it, vt, true)
default : self.call_marshaler_v(fn, it, vt, false)
// struct/array of 1 direct iface type can be direct
default : self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect())
}
}

View File

@ -539,7 +539,8 @@ func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Ty
switch vt.Kind() {
case reflect.Interface : self.call_marshaler_i(fn, it)
case reflect.Ptr, reflect.Map : self.call_marshaler_v(fn, it, vt, true)
default : self.call_marshaler_v(fn, it, vt, false)
// struct/array of 1 direct iface type can be direct
default : self.call_marshaler_v(fn, it, vt, !rt.UnpackType(vt).Indirect())
}
}

View File

@ -21,6 +21,7 @@
`encoding/json`
`reflect`
`runtime`
`unsafe`
`github.com/bytedance/sonic/internal/native`
`github.com/bytedance/sonic/internal/native/types`
@ -161,8 +162,10 @@ func Quote(s string) string {
// Encode returns the JSON encoding of val, encoded with opts.
func Encode(val interface{}, opts Options) ([]byte, error) {
var ret []byte
buf := newBytes()
err := EncodeInto(&buf, val, opts)
err := encodeInto(&buf, val, opts)
/* check for errors */
if err != nil {
@ -170,12 +173,20 @@ func Encode(val interface{}, opts Options) ([]byte, error) {
return nil, err
}
if opts & EscapeHTML != 0 || opts & ValidateString != 0 {
/* htmlescape or correct UTF-8 if opts enable */
old := buf
buf = encodeFinish(old, opts)
pbuf := ((*rt.GoSlice)(unsafe.Pointer(&buf))).Ptr
pold := ((*rt.GoSlice)(unsafe.Pointer(&old))).Ptr
/* return when allocated a new buffer */
if pbuf != pold {
freeBytes(old)
return buf, nil
}
/* make a copy of the result */
ret := make([]byte, len(buf))
ret = make([]byte, len(buf))
copy(ret, buf)
freeBytes(buf)
@ -186,6 +197,15 @@ func Encode(val interface{}, opts Options) ([]byte, error) {
// EncodeInto is like Encode but uses a user-supplied buffer instead of allocating
// a new one.
func EncodeInto(buf *[]byte, val interface{}, opts Options) error {
err := encodeInto(buf, val, opts)
if err != nil {
return err
}
*buf = encodeFinish(*buf, opts)
return err
}
func encodeInto(buf *[]byte, val interface{}, opts Options) error {
stk := newStack()
efv := rt.UnpackEface(val)
err := encodeTypedPointer(buf, efv.Type, &efv.Value, stk, uint64(opts))
@ -196,25 +216,22 @@ func EncodeInto(buf *[]byte, val interface{}, opts Options) error {
}
freeStack(stk)
/* EscapeHTML needs to allocate a new buffer*/
if opts & EscapeHTML != 0 {
dest := HTMLEscape(nil, *buf)
freeBytes(*buf) // free origin used buffer
*buf = dest
}
if opts & ValidateString != 0 && !utf8.Validate(*buf) {
dest := utf8.CorrectWith(nil, *buf, `\ufffd`)
freeBytes(*buf) // free origin used buffer
*buf = dest
}
/* avoid GC ahead */
runtime.KeepAlive(buf)
runtime.KeepAlive(efv)
return err
}
func encodeFinish(buf []byte, opts Options) []byte {
if opts & EscapeHTML != 0 {
buf = HTMLEscape(nil, buf)
}
if opts & ValidateString != 0 && !utf8.Validate(buf) {
buf = utf8.CorrectWith(nil, buf, `\ufffd`)
}
return buf
}
var typeByte = rt.UnpackType(reflect.TypeOf(byte(0)))
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029

View File

@ -30,7 +30,6 @@
const (
_MaxStack = 4096 // 4k states
_MaxBuffer = 1048576 // 1MB buffer size
_StackSize = unsafe.Sizeof(_Stack{})
)
@ -92,7 +91,7 @@ func newBytes() []byte {
if ret := bytesPool.Get(); ret != nil {
return ret.([]byte)
} else {
return make([]byte, 0, _MaxBuffer)
return make([]byte, 0, option.DefaultEncoderBufferSize)
}
}
@ -112,7 +111,7 @@ func newBuffer() *bytes.Buffer {
if ret := bufferPool.Get(); ret != nil {
return ret.(*bytes.Buffer)
} else {
return bytes.NewBuffer(make([]byte, 0, _MaxBuffer))
return bytes.NewBuffer(make([]byte, 0, option.DefaultEncoderBufferSize))
}
}

View File

@ -21,7 +21,7 @@
`io`
)
// StreamEncoder uses io.Writer as
// StreamEncoder uses io.Writer as input.
type StreamEncoder struct {
w io.Writer
Encoder

View File

@ -72,6 +72,18 @@ func (self *BaseAssembler) NOPn(n int) {
}
}
func (self *BaseAssembler) StorePtr(ptr int64, to obj.Addr, tmp obj.Addr) {
if (to.Type != obj.TYPE_MEM) || (tmp.Type != obj.TYPE_REG) {
panic("must store imm to memory, tmp must be register")
}
if (ptr >> 32) != 0 {
self.Emit("MOVQ", Imm(ptr), tmp)
self.Emit("MOVQ", tmp, to)
} else {
self.Emit("MOVQ", Imm(ptr), to);
}
}
func (self *BaseAssembler) Byte(v ...byte) {
for ; len(v) >= 8; v = v[8:] { self.From("QUAD", Imm(rt.Get64(v))) }
for ; len(v) >= 4; v = v[4:] { self.From("LONG", Imm(int64(rt.Get32(v)))) }

View File

@ -122,7 +122,7 @@ func __validate_one(s *string, p *int, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape
//goland:noinspection GoUnusedParameter
func __get_by_path(s *string, p *int, path *[]interface{}) (ret int)
func __get_by_path(s *string, p *int, path *[]interface{}, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape

File diff suppressed because it is too large Load Diff

View File

@ -9,53 +9,53 @@
func __native_entry__() uintptr
var (
_subr__f32toa = __native_entry__() + 29744
_subr__f64toa = __native_entry__() + 496
_subr__get_by_path = __native_entry__() + 27424
_subr__html_escape = __native_entry__() + 9968
_subr__i64toa = __native_entry__() + 4112
_subr__lspace = __native_entry__() + 80
_subr__quote = __native_entry__() + 5792
_subr__skip_array = __native_entry__() + 20576
_subr__skip_number = __native_entry__() + 23920
_subr__skip_object = __native_entry__() + 22496
_subr__skip_one = __native_entry__() + 24080
_subr__skip_one_fast = __native_entry__() + 24320
_subr__u64toa = __native_entry__() + 4384
_subr__unquote = __native_entry__() + 7488
_subr__validate_one = __native_entry__() + 24144
_subr__validate_utf8 = __native_entry__() + 28464
_subr__validate_utf8_fast = __native_entry__() + 29136
_subr__value = __native_entry__() + 14672
_subr__vnumber = __native_entry__() + 18320
_subr__vsigned = __native_entry__() + 19856
_subr__vstring = __native_entry__() + 16864
_subr__vunsigned = __native_entry__() + 20208
_subr__f32toa = __native_entry__() + 31264
_subr__f64toa = __native_entry__() + 192
_subr__get_by_path = __native_entry__() + 25856
_subr__html_escape = __native_entry__() + 9040
_subr__i64toa = __native_entry__() + 3488
_subr__lspace = __native_entry__() + 16
_subr__quote = __native_entry__() + 4880
_subr__skip_array = __native_entry__() + 17952
_subr__skip_number = __native_entry__() + 21952
_subr__skip_object = __native_entry__() + 20368
_subr__skip_one = __native_entry__() + 22112
_subr__skip_one_fast = __native_entry__() + 22352
_subr__u64toa = __native_entry__() + 3600
_subr__unquote = __native_entry__() + 6672
_subr__validate_one = __native_entry__() + 22176
_subr__validate_utf8 = __native_entry__() + 30000
_subr__validate_utf8_fast = __native_entry__() + 30672
_subr__value = __native_entry__() + 12224
_subr__vnumber = __native_entry__() + 15616
_subr__vsigned = __native_entry__() + 17232
_subr__vstring = __native_entry__() + 14064
_subr__vunsigned = __native_entry__() + 17600
)
const (
_stack__f32toa = 56
_stack__f32toa = 48
_stack__f64toa = 80
_stack__get_by_path = 312
_stack__get_by_path = 304
_stack__html_escape = 64
_stack__i64toa = 16
_stack__lspace = 8
_stack__quote = 80
_stack__quote = 56
_stack__skip_array = 128
_stack__skip_number = 72
_stack__skip_object = 128
_stack__skip_one = 128
_stack__skip_one_fast = 208
_stack__skip_one_fast = 200
_stack__u64toa = 8
_stack__unquote = 128
_stack__unquote = 88
_stack__validate_one = 128
_stack__validate_utf8 = 48
_stack__validate_utf8_fast = 24
_stack__value = 368
_stack__vnumber = 280
_stack__value = 328
_stack__vnumber = 240
_stack__vsigned = 16
_stack__vstring = 128
_stack__vunsigned = 24
_stack__vstring = 136
_stack__vunsigned = 16
)
var (

View File

@ -122,7 +122,7 @@ func __validate_one(s *string, p *int, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape
//goland:noinspection GoUnusedParameter
func __get_by_path(s *string, p *int, path *[]interface{}) (ret int)
func __get_by_path(s *string, p *int, path *[]interface{}, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape

File diff suppressed because it is too large Load Diff

View File

@ -9,53 +9,53 @@
func __native_entry__() uintptr
var (
_subr__f32toa = __native_entry__() + 35216
_subr__f64toa = __native_entry__() + 752
_subr__get_by_path = __native_entry__() + 30384
_subr__html_escape = __native_entry__() + 11712
_subr__i64toa = __native_entry__() + 4368
_subr__lspace = __native_entry__() + 224
_subr__quote = __native_entry__() + 6160
_subr__skip_array = __native_entry__() + 22864
_subr__skip_number = __native_entry__() + 26928
_subr__skip_object = __native_entry__() + 24864
_subr__skip_one = __native_entry__() + 27088
_subr__skip_one_fast = __native_entry__() + 27504
_subr__u64toa = __native_entry__() + 4640
_subr__unquote = __native_entry__() + 8960
_subr__validate_one = __native_entry__() + 27152
_subr__validate_utf8 = __native_entry__() + 31552
_subr__validate_utf8_fast = __native_entry__() + 32496
_subr__value = __native_entry__() + 16816
_subr__vnumber = __native_entry__() + 20608
_subr__vsigned = __native_entry__() + 22144
_subr__vstring = __native_entry__() + 19312
_subr__vunsigned = __native_entry__() + 22496
_subr__f32toa = __native_entry__() + 33888
_subr__f64toa = __native_entry__() + 288
_subr__get_by_path = __native_entry__() + 28336
_subr__html_escape = __native_entry__() + 10496
_subr__i64toa = __native_entry__() + 3584
_subr__lspace = __native_entry__() + 64
_subr__quote = __native_entry__() + 5072
_subr__skip_array = __native_entry__() + 20688
_subr__skip_number = __native_entry__() + 24912
_subr__skip_object = __native_entry__() + 22736
_subr__skip_one = __native_entry__() + 25072
_subr__skip_one_fast = __native_entry__() + 25488
_subr__u64toa = __native_entry__() + 3696
_subr__unquote = __native_entry__() + 7888
_subr__validate_one = __native_entry__() + 25136
_subr__validate_utf8 = __native_entry__() + 30320
_subr__validate_utf8_fast = __native_entry__() + 31280
_subr__value = __native_entry__() + 15024
_subr__vnumber = __native_entry__() + 18352
_subr__vsigned = __native_entry__() + 19968
_subr__vstring = __native_entry__() + 17024
_subr__vunsigned = __native_entry__() + 20336
)
const (
_stack__f32toa = 56
_stack__f32toa = 48
_stack__f64toa = 80
_stack__get_by_path = 320
_stack__get_by_path = 296
_stack__html_escape = 72
_stack__i64toa = 16
_stack__lspace = 8
_stack__quote = 72
_stack__skip_array = 120
_stack__skip_number = 80
_stack__skip_object = 120
_stack__skip_one = 120
_stack__skip_one_fast = 216
_stack__quote = 56
_stack__skip_array = 128
_stack__skip_number = 72
_stack__skip_object = 128
_stack__skip_one = 128
_stack__skip_one_fast = 208
_stack__u64toa = 8
_stack__unquote = 128
_stack__validate_one = 120
_stack__unquote = 72
_stack__validate_one = 128
_stack__validate_utf8 = 48
_stack__validate_utf8_fast = 200
_stack__value = 368
_stack__vnumber = 280
_stack__validate_utf8_fast = 176
_stack__value = 328
_stack__vnumber = 240
_stack__vsigned = 16
_stack__vstring = 104
_stack__vunsigned = 24
_stack__vstring = 112
_stack__vunsigned = 16
)
var (

View File

@ -94,7 +94,7 @@ func SkipOneFast(s *string, p *int) int
//go:nosplit
//go:noescape
//goland:noinspection GoUnusedParameter
func GetByPath(s *string, p *int, path *[]interface{}) int
func GetByPath(s *string, p *int, path *[]interface{}, m *types.StateMachine) int
//go:nosplit
//go:noescape

View File

@ -72,7 +72,7 @@ TEXT ·SkipOneFast(SB), NOSPLIT, $0 - 24
JMP github·combytedancesonicinternalnativeavx·__skip_one_fast(SB)
JMP github·combytedancesonicinternalnativesse·__skip_one_fast(SB)
TEXT ·GetByPath(SB), NOSPLIT, $0 - 32
TEXT ·GetByPath(SB), NOSPLIT, $0 - 40
CMPB github·combytedancesonicinternalcpu·HasAVX2(SB), $0
JE 2(PC)
JMP github·combytedancesonicinternalnativeavx2·__get_by_path(SB)

View File

@ -120,7 +120,7 @@ func __validate_one(s *string, p *int, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape
//goland:noinspection GoUnusedParameter
func __get_by_path(s *string, p *int, path *[]interface{}) (ret int)
func __get_by_path(s *string, p *int, path *[]interface{}, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape

View File

@ -122,7 +122,7 @@ func __validate_one(s *string, p *int, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape
//goland:noinspection GoUnusedParameter
func __get_by_path(s *string, p *int, path *[]interface{}) (ret int)
func __get_by_path(s *string, p *int, path *[]interface{}, m *types.StateMachine) (ret int)
//go:nosplit
//go:noescape

File diff suppressed because it is too large Load Diff

View File

@ -9,53 +9,53 @@
func __native_entry__() uintptr
var (
_subr__f32toa = __native_entry__() + 28688
_subr__f64toa = __native_entry__() + 464
_subr__get_by_path = __native_entry__() + 26432
_subr__html_escape = __native_entry__() + 9584
_subr__i64toa = __native_entry__() + 3744
_subr__lspace = __native_entry__() + 80
_subr__quote = __native_entry__() + 5472
_subr__skip_array = __native_entry__() + 19184
_subr__skip_number = __native_entry__() + 22528
_subr__skip_object = __native_entry__() + 21088
_subr__skip_one = __native_entry__() + 22688
_subr__skip_one_fast = __native_entry__() + 22912
_subr__u64toa = __native_entry__() + 4016
_subr__unquote = __native_entry__() + 7184
_subr__validate_one = __native_entry__() + 22736
_subr__validate_utf8 = __native_entry__() + 27456
_subr__validate_utf8_fast = __native_entry__() + 28128
_subr__value = __native_entry__() + 13216
_subr__vnumber = __native_entry__() + 16928
_subr__vsigned = __native_entry__() + 18464
_subr__vstring = __native_entry__() + 15408
_subr__vunsigned = __native_entry__() + 18816
_subr__f32toa = __native_entry__() + 31760
_subr__f64toa = __native_entry__() + 160
_subr__get_by_path = __native_entry__() + 26384
_subr__html_escape = __native_entry__() + 9072
_subr__i64toa = __native_entry__() + 3424
_subr__lspace = __native_entry__() + 16
_subr__quote = __native_entry__() + 4864
_subr__skip_array = __native_entry__() + 18112
_subr__skip_number = __native_entry__() + 22128
_subr__skip_object = __native_entry__() + 20512
_subr__skip_one = __native_entry__() + 22288
_subr__skip_one_fast = __native_entry__() + 22512
_subr__u64toa = __native_entry__() + 3552
_subr__unquote = __native_entry__() + 6704
_subr__validate_one = __native_entry__() + 22336
_subr__validate_utf8 = __native_entry__() + 30528
_subr__validate_utf8_fast = __native_entry__() + 31200
_subr__value = __native_entry__() + 12272
_subr__vnumber = __native_entry__() + 15728
_subr__vsigned = __native_entry__() + 17376
_subr__vstring = __native_entry__() + 14112
_subr__vunsigned = __native_entry__() + 17760
)
const (
_stack__f32toa = 56
_stack__f32toa = 48
_stack__f64toa = 80
_stack__get_by_path = 264
_stack__get_by_path = 240
_stack__html_escape = 64
_stack__i64toa = 16
_stack__lspace = 8
_stack__quote = 80
_stack__quote = 64
_stack__skip_array = 128
_stack__skip_number = 72
_stack__skip_object = 128
_stack__skip_one = 128
_stack__skip_one_fast = 160
_stack__skip_one_fast = 136
_stack__u64toa = 8
_stack__unquote = 128
_stack__unquote = 88
_stack__validate_one = 128
_stack__validate_utf8 = 48
_stack__validate_utf8_fast = 24
_stack__value = 368
_stack__vnumber = 280
_stack__value = 328
_stack__vnumber = 240
_stack__vsigned = 16
_stack__vstring = 128
_stack__vunsigned = 24
_stack__vstring = 136
_stack__vunsigned = 16
)
var (

View File

@ -76,6 +76,10 @@
ERR_FLOAT_INFINITY ParsingError = 8
ERR_MISMATCH ParsingError = 9
ERR_INVALID_UTF8 ParsingError = 10
// error code used in ast
ERR_NOT_FOUND ParsingError = 33
ERR_UNSUPPORT_TYPE ParsingError = 34
)
var _ParsingErrors = []string{

View File

@ -152,8 +152,8 @@ func resolveFields(vt reflect.Type) []FieldMeta {
}
/* get the index to the last offset */
fvt := fv.typ
idx := len(path) - 1
fvt := path[idx].Type
/* do not dereference into fields */
if path[idx].Kind == F_deref {

View File

@ -87,7 +87,6 @@ func GuardSlice(buf *[]byte, n int) {
copy(tmp, *buf)
*buf = tmp
}
return
}
//go:nosplit

View File

@ -22,14 +22,23 @@
)
var (
reflectRtypeItab = findReflectRtypeItab()
reflectRtypeItab = findReflectRtypeItab()
)
// GoType.KindFlags const
const (
F_direct = 1 << 5
F_kind_mask = (1 << 5) - 1
)
// GoType.Flags const
const (
tflagUncommon uint8 = 1 << 0
tflagExtraStar uint8 = 1 << 1
tflagNamed uint8 = 1 << 2
tflagRegularMemory uint8 = 1 << 3
)
type GoType struct {
Size uintptr
PtrData uintptr
@ -44,6 +53,10 @@ type GoType struct {
PtrToSelf int32
}
func (self *GoType) IsNamed() bool {
return (self.Flags & tflagNamed) != 0
}
func (self *GoType) Kind() reflect.Kind {
return reflect.Kind(self.KindFlags & F_kind_mask)
}

View File

@ -152,6 +152,7 @@ type StackMapBuilder struct {
b Bitmap
}
//go:nocheckptr
func (self *StackMapBuilder) Build() (p *StackMap) {
nb := len(self.b.B)
bm := mallocgc(_StackMapSize + uintptr(nb) - 1, byteType, false)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

View File

@ -1,5 +1,5 @@
// go:build go1.15 && !go1.18
// +build go1.15,!go1.18
//go:build go1.15 && !go1.16
// +build go1.15,!go1.16
/*
* Copyright 2021 ByteDance Inc.
@ -28,7 +28,7 @@
)
const (
_Magic uint32 = 0xfffffff0
_Magic uint32 = 0xfffffffa
)
type pcHeader struct {
@ -38,7 +38,6 @@ type pcHeader struct {
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
@ -64,9 +63,7 @@ funcnametab []byte
noptrbss, enoptrbss uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
@ -91,7 +88,7 @@ funcnametab []byte
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
@ -103,8 +100,7 @@ type _func struct {
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
_ [2]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
@ -131,8 +127,8 @@ funcID uint8 // set for certain special runtime functions
}
type funcTab struct {
entry uint32
funcoff uint32
entry uintptr
funcoff uintptr
}
type bitVector struct {
@ -170,6 +166,11 @@ type findfuncbucket struct {
_SUBBUCKETS [16]byte
}
type compilationUnit struct {
fileNames []string
}
// func name table format:
// nameOff[0] -> namePartA namePartB namePartC \x00
// nameOff[1] -> namePartA namePartB namePartC \x00
@ -192,10 +193,6 @@ func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) {
return
}
type compilationUnit struct {
fileNames []string
}
// CU table format:
// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1]
// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1]
@ -266,68 +263,76 @@ funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET)
return
}
func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab) {
func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab, pclntabSize int64, startLocations []uint32) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
var size int64 = int64(len(funcs)*2*4 + 4)
var startLocations = make([]uint32, len(funcs))
pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize))
startLocations = make([]uint32, len(funcs))
for i, f := range funcs {
size = rnd(size, int64(_PtrSize))
pclntabSize = rnd(pclntabSize, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(size)
size += int64(uint8(_FUNC_SIZE)+f.nfuncdata*4+uint8(f.npcdata)*4)
startLocations[i] = uint32(pclntabSize)
pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4)
}
ftab = make([]funcTab, 0, len(funcs)+1)
// write a map of pc->func info offsets
for i, f := range funcs {
ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])})
ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])})
}
// Final entry of table is just end pc offset.
lastFunc := funcs[len(funcs)-1]
ftab = append(ftab, funcTab{uint32(lastFunc.entryOff + lastFuncSize), 0})
ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0})
return
}
// Pcln table format: [...]funcTab + [...]_Func
func makePclntable(funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataOffs [][]uint32) (pclntab []byte) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
var size int64 = int64(len(funcs)*2*4 + 4)
var startLocations = make([]uint32, len(funcs))
for i := range funcs {
size = rnd(size, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(size)
size += int64(int(_FUNC_SIZE)+len(funcdataOffs[i])*4+len(pcdataOffs[i])*4)
}
func makePclntable(size int64, startLocations []uint32, funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) {
pclntab = make([]byte, size, size)
// write a map of pc->func info offsets
offs := 0
for i, f := range funcs {
byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff))
byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i]))
offs += 8
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry))
byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i]))
offs += 16
}
// Final entry of table is just end pc offset.
lastFunc := funcs[len(funcs)-1]
byteOrder.PutUint32(pclntab[offs:offs+4], uint32(lastFunc.entryOff+lastFuncSize))
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize))
offs += 8
// write func info table
for i, f := range funcs {
off := startLocations[i]
// write _func structure to pclntab
fb := rt.BytesFrom(unsafe.Pointer(&f), int(_FUNC_SIZE), int(_FUNC_SIZE))
copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb)
off += uint32(_FUNC_SIZE)
byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry))
off += 8
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset))
off += 4
pclntab[off] = f.funcID
// NOTICE: _[2]byte alignment
off += 3
pclntab[off] = f.nfuncdata
off += 1
// NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3
for j := 3; j < len(pcdataOffs[i]); j++ {
@ -335,12 +340,17 @@ func makePclntable(funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, fu
off += 4
}
off = uint32(rnd(int64(off), int64(_PtrSize)))
// funcdata refs as offsets from gofunc
for _, funcdata := range funcdataOffs[i] {
byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata))
off += 4
if funcdata == _INVALID_FUNCDATA_OFFSET {
byteOrder.PutUint64(pclntab[off:off+8], 0)
} else {
byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata))
}
off += 8
}
}
return
@ -364,14 +374,14 @@ func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) {
tab := make([]findfuncbucket, 0, nbuckets)
var s, e = 0, 0
for i := 0; i<int(nbuckets); i++ {
var pc = min + uint32((i+1)*_BUCKETSIZE)
var pc = min + uintptr((i+1)*_BUCKETSIZE)
// find the end func of the bucket
for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {}
// store the start func of the bucket
var fb = findfuncbucket{idx: uint32(s)}
for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ {
pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE)
pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE)
var ss = s
// find the end func of the subbucket
for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {}
@ -408,29 +418,6 @@ func makeModuledata(name string, filenames []string, funcs []Func, text []byte)
funcnametab, nameOffs := makeFuncnameTab(funcs)
mod.funcnametab = funcnametab
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
pctab, pcdataOffs, _funcs := makePctab(funcs, cuOffs, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
lastFuncsize := funcs[len(funcs)-1].TextSize
ftab := makeFtab(_funcs, lastFuncsize)
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// make pclnt table
pclntab := makePclntable(_funcs, lastFuncsize, pcdataOffs, funcdataOffs)
mod.pclntable = pclntab
// mmap() text and funcdata segements
p := os.Getpagesize()
size := int(rnd(int64(len(text)), int64(p)))
@ -441,18 +428,41 @@ funcnametab, nameOffs := makeFuncnameTab(funcs)
// make it executable
mprotect(addr, size)
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
lastFuncsize := funcs[len(funcs)-1].TextSize
ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize)
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// cache funcdata and findfuncbucket
moduleCache.Lock()
moduleCache.m[mod] = cache
moduleCache.Unlock()
mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart]))
mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart]))
mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart))
funcdataAddr := uintptr(rt.IndexByte(cache, fstart))
// make pclnt table
pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs)
mod.pclntable = pclntab
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pc header
mod.pcHeader = &pcHeader {
@ -461,7 +471,6 @@ funcnametab, nameOffs := makeFuncnameTab(funcs)
ptrSize : _PtrSize,
nfunc : len(funcs),
nfiles: uint(len(cu)),
textStart: mod.text,
funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
cuOffset: getOffsetOf(moduledata{}, "cutab"),
filetabOffset: getOffsetOf(moduledata{}, "filetab"),
@ -478,7 +487,7 @@ funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
// makePctab generates pcdelta->valuedelta tables for functions,
// and returns the table and the entry offset of every kind pcdata in the table.
func makePctab(funcs []Func, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
func makePctab(funcs []Func, addr uintptr, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
_funcs = make([]_func, len(funcs))
// Pctab offsets of 0 are considered invalid in the runtime. We respect
@ -523,7 +532,7 @@ func makePctab(funcs []Func, cuOffset []uint32, nameOffset []int32) (pctab []byt
writer(f.PcInlTreeIndex)
writer(f.PcArgLiveIndex)
_f.entryOff = f.EntryOff
_f.entry = addr + uintptr(f.EntryOff)
_f.nameOff = nameOffset[i]
_f.args = f.ArgsSize
_f.deferreturn = f.DeferReturn
@ -531,7 +540,6 @@ func makePctab(funcs []Func, cuOffset []uint32, nameOffset []int32) (pctab []byt
_f.npcdata = uint32(_N_PCDATA)
_f.cuOffset = cuOffset[i]
_f.funcID = f.ID
_f.flag = f.Flag
_f.nfuncdata = uint8(_N_FUNCDATA)
}

View File

@ -0,0 +1,549 @@
//go:build go1.16 && !go1.18
// +build go1.16,!go1.18
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`encoding`
`os`
`unsafe`
`github.com/bytedance/sonic/internal/rt`
)
const (
_Magic uint32 = 0xfffffffa
)
type pcHeader struct {
magic uint32 // 0xFFFFFFF0
pad1, pad2 uint8 // 0,0
minLC uint8 // min instruction size
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
pctabOffset uintptr // offset to the pctab variable from pcHeader
pclnOffset uintptr // offset to the pclntab variable from pcHeader
}
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
funcID uint8 // set for certain special runtime functions
_ [2]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}
type funcTab struct {
entry uintptr
funcoff uintptr
}
type bitVector struct {
n int32 // # of bits
bytedata *uint8
}
type ptabEntry struct {
name int32
typ int32
}
type textSection struct {
vaddr uintptr // prelinked section vaddr
end uintptr // vaddr + section length
baseaddr uintptr // relocated section address
}
type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}
// findfuncbucket is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
idx uint32
_SUBBUCKETS [16]byte
}
type compilationUnit struct {
fileNames []string
}
// func name table format:
// nameOff[0] -> namePartA namePartB namePartC \x00
// nameOff[1] -> namePartA namePartB namePartC \x00
// ...
func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) {
offs = make([]int32, len(funcs))
offset := 0
for i, f := range funcs {
offs[i] = int32(offset)
a, b, c := funcNameParts(f.Name)
tab = append(tab, a...)
tab = append(tab, b...)
tab = append(tab, c...)
tab = append(tab, 0)
offset += len(a) + len(b) + len(c) + 1
}
return
}
// CU table format:
// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1]
// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1]
// ...
//
// file name table format:
// filetabOffset[0] -> CUs[0].fileNames[0] \x00
// ...
// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00
// ...
// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00
func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) {
cuOffsets = make([]uint32, len(cus))
cuOffset := 0
fileOffset := 0
for i, cu := range cus {
cuOffsets[i] = uint32(cuOffset)
for _, name := range cu.fileNames {
cutab = append(cutab, uint32(fileOffset))
fileOffset += len(name) + 1
filetab = append(filetab, name...)
filetab = append(filetab, 0)
}
cuOffset += len(cu.fileNames)
}
return
}
func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) {
fstart = len(*out)
*out = append(*out, byte(0))
offs := uint32(1)
funcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
var writer = func(fd encoding.BinaryMarshaler) {
var ab []byte
var err error
if fd != nil {
ab, err = fd.MarshalBinary()
if err != nil {
panic(err)
}
funcdataOffs[i] = append(funcdataOffs[i], offs)
} else {
ab = []byte{0}
funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET)
}
*out = append(*out, ab...)
offs += uint32(len(ab))
}
writer(f.ArgsPointerMaps)
writer(f.LocalsPointerMaps)
writer(f.StackObjects)
writer(f.InlTree)
writer(f.OpenCodedDeferInfo)
writer(f.ArgInfo)
writer(f.ArgLiveInfo)
writer(f.WrapInfo)
}
return
}
func makeFtab(funcs []_func, lastFuncSize uint32) (ftab []funcTab, pclntabSize int64, startLocations []uint32) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize))
startLocations = make([]uint32, len(funcs))
for i, f := range funcs {
pclntabSize = rnd(pclntabSize, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(pclntabSize)
pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4)
}
ftab = make([]funcTab, 0, len(funcs)+1)
// write a map of pc->func info offsets
for i, f := range funcs {
ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])})
}
// Final entry of table is just end pc offset.
lastFunc := funcs[len(funcs)-1]
ftab = append(ftab, funcTab{lastFunc.entry + uintptr(lastFuncSize), 0})
return
}
// Pcln table format: [...]funcTab + [...]_Func
func makePclntable(size int64, startLocations []uint32, funcs []_func, lastFuncSize uint32, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) {
pclntab = make([]byte, size, size)
// write a map of pc->func info offsets
offs := 0
for i, f := range funcs {
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry))
byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i]))
offs += 16
}
// Final entry of table is just end pc offset.
lastFunc := funcs[len(funcs)-1]
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(lastFunc.entry)+uint64(lastFuncSize))
offs += 8
// write func info table
for i, f := range funcs {
off := startLocations[i]
// write _func structure to pclntab
byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry))
off += 8
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset))
off += 4
pclntab[off] = f.funcID
// NOTICE: _[2]byte alignment
off += 3
pclntab[off] = f.nfuncdata
off += 1
// NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3
for j := 3; j < len(pcdataOffs[i]); j++ {
byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j]))
off += 4
}
off = uint32(rnd(int64(off), int64(_PtrSize)))
// funcdata refs as offsets from gofunc
for _, funcdata := range funcdataOffs[i] {
if funcdata == _INVALID_FUNCDATA_OFFSET {
byteOrder.PutUint64(pclntab[off:off+8], 0)
} else {
byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata))
}
off += 8
}
}
return
}
// findfunc table used to map pc to belonging func,
// returns the index in the func table.
//
// All text section are divided into buckets sized _BUCKETSIZE(4K):
// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64),
// and it has a base idx to plus the offset stored in jth subbucket.
// see findfunc() in runtime/symtab.go
func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) {
start = len(*out)
max := ftab[len(ftab)-1].entry
min := ftab[0].entry
nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE
n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE
tab := make([]findfuncbucket, 0, nbuckets)
var s, e = 0, 0
for i := 0; i<int(nbuckets); i++ {
var pc = min + uintptr((i+1)*_BUCKETSIZE)
// find the end func of the bucket
for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {}
// store the start func of the bucket
var fb = findfuncbucket{idx: uint32(s)}
for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ {
pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE)
var ss = s
// find the end func of the subbucket
for ; ss < len(ftab)-1 && ftab[ss+1].entry <= pc; ss++ {}
// store the start func of the subbucket
fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx)
s = ss
}
s = e
tab = append(tab, fb)
}
// write findfuncbucket
if len(tab) > 0 {
size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab)
*out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...)
}
return
}
func makeModuledata(name string, filenames []string, funcs []Func, text []byte) (mod *moduledata) {
mod = new(moduledata)
mod.modulename = name
// make filename table
cu := make([]string, 0, len(filenames))
for _, f := range filenames {
cu = append(cu, f)
}
cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}})
mod.cutab = cutab
mod.filetab = filetab
// make funcname table
funcnametab, nameOffs := makeFuncnameTab(funcs)
mod.funcnametab = funcnametab
// mmap() text and funcdata segements
p := os.Getpagesize()
size := int(rnd(int64(len(text)), int64(p)))
addr := mmap(size)
// copy the machine code
s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size)
copy(s, text)
// make it executable
mprotect(addr, size)
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOffs, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
lastFuncsize := funcs[len(funcs)-1].TextSize
ftab, pclntSize, startLocations := makeFtab(_funcs, lastFuncsize)
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// cache funcdata and findfuncbucket
moduleCache.Lock()
moduleCache.m[mod] = cache
moduleCache.Unlock()
mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart))
funcdataAddr := uintptr(rt.IndexByte(cache, fstart))
// make pclnt table
pclntab := makePclntable(pclntSize, startLocations, _funcs, lastFuncsize, pcdataOffs, funcdataAddr, funcdataOffs)
mod.pclntable = pclntab
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pc header
mod.pcHeader = &pcHeader {
magic : _Magic,
minLC : _MinLC,
ptrSize : _PtrSize,
nfunc : len(funcs),
nfiles: uint(len(cu)),
funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
cuOffset: getOffsetOf(moduledata{}, "cutab"),
filetabOffset: getOffsetOf(moduledata{}, "filetab"),
pctabOffset: getOffsetOf(moduledata{}, "pctab"),
pclnOffset: getOffsetOf(moduledata{}, "pclntable"),
}
// sepecial case: gcdata and gcbss must by non-empty
mod.gcdata = uintptr(unsafe.Pointer(&emptyByte))
mod.gcbss = uintptr(unsafe.Pointer(&emptyByte))
return
}
// makePctab generates pcdelta->valuedelta tables for functions,
// and returns the table and the entry offset of every kind pcdata in the table.
func makePctab(funcs []Func, addr uintptr, cuOffset []uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
_funcs = make([]_func, len(funcs))
// Pctab offsets of 0 are considered invalid in the runtime. We respect
// that by just padding a single byte at the beginning of runtime.pctab,
// that way no real offsets can be zero.
pctab = make([]byte, 1, 12*len(funcs)+1)
pcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
_f := &_funcs[i]
var writer = func(pc *Pcdata) {
var ab []byte
var err error
if pc != nil {
ab, err = pc.MarshalBinary()
if err != nil {
panic(err)
}
pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab)))
} else {
ab = []byte{0}
pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET)
}
pctab = append(pctab, ab...)
}
if f.Pcsp != nil {
_f.pcsp = uint32(len(pctab))
}
writer(f.Pcsp)
if f.Pcfile != nil {
_f.pcfile = uint32(len(pctab))
}
writer(f.Pcfile)
if f.Pcline != nil {
_f.pcln = uint32(len(pctab))
}
writer(f.Pcline)
writer(f.PcUnsafePoint)
writer(f.PcStackMapIndex)
writer(f.PcInlTreeIndex)
writer(f.PcArgLiveIndex)
_f.entry = addr + uintptr(f.EntryOff)
_f.nameOff = nameOffset[i]
_f.args = f.ArgsSize
_f.deferreturn = f.DeferReturn
// NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)]
_f.npcdata = uint32(_N_PCDATA)
_f.cuOffset = cuOffset[i]
_f.funcID = f.ID
_f.nfuncdata = uint8(_N_FUNCDATA)
}
return
}
func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {}

View File

@ -1,5 +1,5 @@
//go:build go1.15 && !go1.18
// +build go1.15,!go1.18
//go:build go1.15 && !go1.16
// +build go1.15,!go1.16
/*
* Copyright 2021 ByteDance Inc.
@ -20,14 +20,9 @@
package loader
import (
`github.com/bytedance/sonic/internal/loader`
)
func (self Loader) LoadOne(text []byte, funcName string, frameSize int, argSize int, argStackmap []bool, localStackmap []bool) Function {
return Function(loader.Loader(text).Load(funcName, frameSize, argSize, argStackmap, localStackmap))
}
func Load(modulename string, filenames []string, funcs []Func, text []byte) (out []Function) {
panic("not implemented")
}

View File

@ -1,5 +1,5 @@
//go:build go1.18 && !go1.21
// +build go1.18,!go1.21
//go:build go1.16 && !go1.21
// +build go1.16,!go1.21
/*
* Copyright 2021 ByteDance Inc.

View File

@ -1,12 +1,12 @@
/**
* Copyright 2023 ByteDance Inc.
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@ -17,16 +17,21 @@
package loader
import (
_ `unsafe`
`sync`
_ `unsafe`
)
//go:linkname lastmoduledatap runtime.lastmoduledatap
//goland:noinspection GoUnusedGlobalVariable
var lastmoduledatap *moduledata
var moduledataMux sync.Mutex
func registerModule(mod *moduledata) {
moduledataMux.Lock()
lastmoduledatap.next = mod
lastmoduledatap = mod
moduledataMux.Unlock()
}
//go:linkname moduledataverify1 runtime.moduledataverify1

View File

@ -16,6 +16,14 @@
package option
var (
// DefaultDecoderBufferSize is the initial buffer size of StreamDecoder
DefaultDecoderBufferSize uint = 128 * 1024
// DefaultEncoderBufferSize is the initial buffer size of Encoder
DefaultEncoderBufferSize uint = 128 * 1024
)
// CompileOptions includes all options for encoder or decoder compiler.
type CompileOptions struct {
// the maximum depth for compilation inline

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

View File

@ -18,6 +18,7 @@
import (
`unsafe`
`runtime`
`github.com/bytedance/sonic/internal/native`
`github.com/bytedance/sonic/internal/native/types`
@ -43,7 +44,8 @@ func intoBytesUnsafe(s string, m *[]byte) types.ParsingError {
pos := -1
slv := (*rt.GoSlice)(unsafe.Pointer(m))
str := (*rt.GoString)(unsafe.Pointer(&s))
ret := native.Unquote(str.Ptr, str.Len, slv.Ptr, &pos, 0)
/* unquote as the default configuration, replace invalid unicode with \ufffd */
ret := native.Unquote(str.Ptr, str.Len, slv.Ptr, &pos, types.F_UNICODE_REPLACE)
/* check for errors */
if ret < 0 {
@ -52,5 +54,6 @@ func intoBytesUnsafe(s string, m *[]byte) types.ParsingError {
/* update the length */
slv.Len = ret
runtime.KeepAlive(s)
return 0
}

View File

@ -19,6 +19,22 @@ linters:
- nolintlint
- revive
- wastedassign
linters-settings:
gosec:
# To select a subset of rules to run.
# Available rules: https://github.com/securego/gosec#available-rules
# Default: [] - means include all rules
includes:
- G102
- G106
- G108
- G109
- G111
- G112
- G201
- G203
issues:
exclude-rules:
- linters:
@ -37,3 +53,6 @@ issues:
- path: _test\.go
linters:
- gosec # security is not make sense in tests
- linters:
- revive
path: _test\.go

View File

@ -1,5 +1,26 @@
# Gin ChangeLog
## Gin v1.9.1
### BUG FIXES
* fix Request.Context() checks [#3512](https://github.com/gin-gonic/gin/pull/3512)
### SECURITY
* fix lack of escaping of filename in Content-Disposition [#3556](https://github.com/gin-gonic/gin/pull/3556)
### ENHANCEMENTS
* refactor: use bytes.ReplaceAll directly [#3455](https://github.com/gin-gonic/gin/pull/3455)
* convert strings and slices using the officially recommended way [#3344](https://github.com/gin-gonic/gin/pull/3344)
* improve render code coverage [#3525](https://github.com/gin-gonic/gin/pull/3525)
### DOCS
* docs: changed documentation link for trusted proxies [#3575](https://github.com/gin-gonic/gin/pull/3575)
* chore: improve linting, testing, and GitHub Actions setup [#3583](https://github.com/gin-gonic/gin/pull/3583)
## Gin v1.9.0
### BREAK CHANGES

View File

@ -6,7 +6,6 @@
[![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin)
[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin)
[![GoDoc](https://pkg.go.dev/badge/github.com/gin-gonic/gin?status.svg)](https://pkg.go.dev/github.com/gin-gonic/gin?tab=doc)
[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Sourcegraph](https://sourcegraph.com/github.com/gin-gonic/gin/-/badge.svg)](https://sourcegraph.com/github.com/gin-gonic/gin?badge)
[![Open Source Helpers](https://www.codetriage.com/gin-gonic/gin/badges/users.svg)](https://www.codetriage.com/gin-gonic/gin)
[![Release](https://img.shields.io/github/release/gin-gonic/gin.svg?style=flat-square)](https://github.com/gin-gonic/gin/releases)
@ -31,7 +30,7 @@ Gin is a web framework written in [Go](https://go.dev/). It features a martini-l
### Prerequisites
- **[Go](https://go.dev/)**: ~~any one of the **three latest major** [releases](https://go.dev/doc/devel/release)~~ (now version **1.16+** is required).
- **[Go](https://go.dev/)**: any one of the **three latest major** [releases](https://go.dev/doc/devel/release) (we test it with these).
### Getting Gin
@ -176,4 +175,4 @@ Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framewor
Gin is the work of hundreds of contributors. We appreciate your help!
Please see [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
Please see [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.

View File

@ -1,10 +0,0 @@
// Copyright 2022 Gin Core Team. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package gin
type any = interface{}

View File

@ -1,10 +0,0 @@
// Copyright 2022 Gin Core Team. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package binding
type any = interface{}

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !nomsgpack
// +build !nomsgpack
package binding

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build nomsgpack
// +build nomsgpack
package binding

View File

@ -15,7 +15,7 @@
// EnableDecoderUseNumber is used to call the UseNumber method on the JSON
// Decoder instance. UseNumber causes the Decoder to unmarshal a number into an
// interface{} as a Number instead of as a float64.
// any as a Number instead of as a float64.
var EnableDecoderUseNumber = false
// EnableDecoderDisallowUnknownFields is used to call the DisallowUnknownFields method

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !nomsgpack
// +build !nomsgpack
package binding

View File

@ -1,5 +0,0 @@
coverage:
notify:
gitter:
default:
url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165

View File

@ -652,7 +652,7 @@ func (c *Context) BindYAML(obj any) error {
}
// BindTOML is a shortcut for c.MustBindWith(obj, binding.TOML).
func (c *Context) BindTOML(obj interface{}) error {
func (c *Context) BindTOML(obj any) error {
return c.MustBindWith(obj, binding.TOML)
}
@ -717,7 +717,7 @@ func (c *Context) ShouldBindYAML(obj any) error {
}
// ShouldBindTOML is a shortcut for c.ShouldBindWith(obj, binding.TOML).
func (c *Context) ShouldBindTOML(obj interface{}) error {
func (c *Context) ShouldBindTOML(obj any) error {
return c.ShouldBindWith(obj, binding.TOML)
}
@ -995,7 +995,7 @@ func (c *Context) YAML(code int, obj any) {
}
// TOML serializes the given struct as TOML into the response body.
func (c *Context) TOML(code int, obj interface{}) {
func (c *Context) TOML(code int, obj any) {
c.Render(code, render.TOML{Data: obj})
}
@ -1052,11 +1052,17 @@ func (c *Context) FileFromFS(filepath string, fs http.FileSystem) {
http.FileServer(fs).ServeHTTP(c.Writer, c.Request)
}
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
// FileAttachment writes the specified file into the body stream in an efficient way
// On the client side, the file will typically be downloaded with the given filename
func (c *Context) FileAttachment(filepath, filename string) {
if isASCII(filename) {
c.Writer.Header().Set("Content-Disposition", `attachment; filename="`+filename+`"`)
c.Writer.Header().Set("Content-Disposition", `attachment; filename="`+escapeQuotes(filename)+`"`)
} else {
c.Writer.Header().Set("Content-Disposition", `attachment; filename*=UTF-8''`+url.QueryEscape(filename))
}
@ -1174,9 +1180,16 @@ func (c *Context) SetAccepted(formats ...string) {
/***** GOLANG.ORG/X/NET/CONTEXT *****/
/************************************/
// hasRequestContext returns whether c.Request has Context and fallback.
func (c *Context) hasRequestContext() bool {
hasFallback := c.engine != nil && c.engine.ContextWithFallback
hasRequestContext := c.Request != nil && c.Request.Context() != nil
return hasFallback && hasRequestContext
}
// Deadline returns that there is no deadline (ok==false) when c.Request has no Context.
func (c *Context) Deadline() (deadline time.Time, ok bool) {
if !c.engine.ContextWithFallback || c.Request == nil || c.Request.Context() == nil {
if !c.hasRequestContext() {
return
}
return c.Request.Context().Deadline()
@ -1184,7 +1197,7 @@ func (c *Context) Deadline() (deadline time.Time, ok bool) {
// Done returns nil (chan which will wait forever) when c.Request has no Context.
func (c *Context) Done() <-chan struct{} {
if !c.engine.ContextWithFallback || c.Request == nil || c.Request.Context() == nil {
if !c.hasRequestContext() {
return nil
}
return c.Request.Context().Done()
@ -1192,7 +1205,7 @@ func (c *Context) Done() <-chan struct{} {
// Err returns nil when c.Request has no Context.
func (c *Context) Err() error {
if !c.engine.ContextWithFallback || c.Request == nil || c.Request.Context() == nil {
if !c.hasRequestContext() {
return nil
}
return c.Request.Context().Err()
@ -1213,7 +1226,7 @@ func (c *Context) Value(key any) any {
return val
}
}
if !c.engine.ContextWithFallback || c.Request == nil || c.Request.Context() == nil {
if !c.hasRequestContext() {
return nil
}
return c.Request.Context().Value(key)

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build appengine
// +build appengine
package gin

View File

@ -12,7 +12,7 @@
"strings"
)
const ginSupportMinGoVer = 16
const ginSupportMinGoVer = 18
// IsDebugging returns true if the framework is running in debug mode.
// Use SetMode(gin.ReleaseMode) to disable debug mode.
@ -67,7 +67,7 @@ func getMinVer(v string) (uint64, error) {
func debugPrintWARNINGDefault() {
if v, e := getMinVer(runtime.Version()); e == nil && v < ginSupportMinGoVer {
debugPrint(`[WARNING] Now Gin requires Go 1.16+.
debugPrint(`[WARNING] Now Gin requires Go 1.18+.
`)
}

View File

@ -13,7 +13,7 @@
// BindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) BindWith(obj any, b binding.Binding) error {
log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to
log.Println(`BindWith(\"any, binding.Binding\") error is going to
be deprecated, please check issue #662 and either use MustBindWith() if you
want HTTP 400 to be automatically returned if any error occur, or use
ShouldBindWith() if you need to manage the error.`)

View File

@ -39,7 +39,7 @@ func (fs onlyFilesFS) Open(name string) (http.File, error) {
}
// Readdir overrides the http.File default implementation.
func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
func (f neuteredReaddirFile) Readdir(_ int) ([]os.FileInfo, error) {
// this disables directory listing
return nil, nil
}

View File

@ -515,7 +515,7 @@ func (engine *Engine) RunUnix(file string) (err error) {
if engine.isUnsafeTrustedProxies() {
debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" +
"Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.")
"Please check https://github.com/gin-gonic/gin/blob/master/docs/doc.md#dont-trust-all-proxies for details.")
}
listener, err := net.Listen("unix", file)
@ -538,7 +538,7 @@ func (engine *Engine) RunFd(fd int) (err error) {
if engine.isUnsafeTrustedProxies() {
debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" +
"Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.")
"Please check https://github.com/gin-gonic/gin/blob/master/docs/doc.md#dont-trust-all-proxies for details.")
}
f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd))
@ -559,7 +559,7 @@ func (engine *Engine) RunListener(listener net.Listener) (err error) {
if engine.isUnsafeTrustedProxies() {
debugPrint("[WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value.\n" +
"Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details.")
"Please check https://github.com/gin-gonic/gin/blob/master/docs/doc.md#dont-trust-all-proxies for details.")
}
err = http.Serve(listener, engine.Handler())

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
//go:build !go1.20
package bytesconv
import (

View File

@ -0,0 +1,23 @@
// Copyright 2023 Gin Core Team. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
//go:build go1.20
package bytesconv
import (
"unsafe"
)
// StringToBytes converts string to byte slice without a memory allocation.
// For more details, see https://github.com/golang/go/issues/53003#issuecomment-1140276077.
func StringToBytes(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s))
}
// BytesToString converts byte slice to string without a memory allocation.
// For more details, see https://github.com/golang/go/issues/53003#issuecomment-1140276077.
func BytesToString(b []byte) string {
return unsafe.String(unsafe.SliceData(b), len(b))
}

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go_json
// +build go_json
package json

Some files were not shown because too many files have changed in this diff Show More