diff --git a/go.mod b/go.mod index 28284561..075a9ea4 100644 --- a/go.mod +++ b/go.mod @@ -10,11 +10,11 @@ require ( github.com/didip/tollbooth v4.0.2+incompatible github.com/dustin/go-humanize v1.0.1 github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 - github.com/evanw/esbuild v0.23.1 + github.com/evanw/esbuild v0.24.0 github.com/felixge/fgtrace v0.2.0 github.com/flosch/pongo2/v6 v6.0.0 github.com/go-gcfg/gcfg v1.2.3 - github.com/gomarkdown/markdown v0.0.0-20240730141124-034f12af3bf6 + github.com/gomarkdown/markdown v0.0.0-20240930133441-72d49d9543d8 github.com/lib/pq v1.10.9 github.com/microcosm-cc/bluemonday v1.0.27 github.com/mitchellh/go-homedir v1.1.0 @@ -25,12 +25,12 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/terminar/permissionsqlite/v2 v2.0.0-20240814235136-eff14bb4cea9 github.com/tylerb/graceful v1.2.15 - github.com/valyala/fasthttp v1.55.0 + github.com/valyala/fasthttp v1.56.0 github.com/wellington/sass v0.0.0-20160911051022-cab90b3986d6 github.com/xyproto/ask v1.1.0 github.com/xyproto/datablock v1.2.0 github.com/xyproto/env/v2 v2.5.0 - github.com/xyproto/files v1.6.0 + github.com/xyproto/files v1.7.0 github.com/xyproto/gluamapper v1.2.1 github.com/xyproto/gopher-lua v1.0.2 github.com/xyproto/jpath v0.6.1 @@ -67,15 +67,15 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dlclark/regexp2 v1.11.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-mysql-org/go-mysql v1.9.0 // indirect + github.com/go-mysql-org/go-mysql v1.9.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/gomodule/redigo v1.9.2 // indirect - github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 // indirect + github.com/google/pprof v0.0.0-20240929191954-255acd752d31 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.10 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/libdns/libdns v0.2.2 // indirect github.com/mattetti/filebuffer v1.0.1 // indirect @@ -108,7 +108,7 @@ require ( github.com/xyproto/simplehstore v1.8.2 // indirect github.com/xyproto/simplemaria v1.3.4 // indirect github.com/xyproto/symwalk v1.1.1 // indirect - github.com/xyproto/vt100 v1.14.5 // indirect + github.com/xyproto/vt100 v1.14.7 // indirect github.com/zeebo/blake3 v0.2.4 // indirect go.etcd.io/bbolt v1.3.11 // indirect go.uber.org/atomic v1.11.0 // indirect diff --git a/go.sum b/go.sum index c85d0237..11835988 100644 --- a/go.sum +++ b/go.sum @@ -55,8 +55,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/evanw/esbuild v0.23.1 h1:ociewhY6arjTarKLdrXfDTgy25oxhTZmzP8pfuBTfTA= -github.com/evanw/esbuild v0.23.1/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/evanw/esbuild v0.24.0 h1:GZ78naTLp7FKr+K7eNuM/SLs5maeiHYRPsTg6kmdsSE= +github.com/evanw/esbuild v0.24.0/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= github.com/felixge/fgtrace v0.2.0 h1:lq7RO6ELjR+S74+eD+ai/vhYvsjno7Vb84yzU6RPSeU= github.com/felixge/fgtrace v0.2.0/go.mod h1:q9vMuItthu3CRfNhirTCTwzBcJ8atUFkrJUhgQbjg8c= github.com/flosch/pongo2/v6 v6.0.0 h1:lsGru8IAzHgIAw6H2m4PCyleO58I40ow6apih0WprMU= @@ -69,8 +69,8 @@ github.com/go-gcfg/gcfg v1.2.3/go.mod h1:D+YdKk714qkU4V0pntcxhDsrHgQDmI91IEbXXqw github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-mysql-org/go-mysql v1.9.0 h1:2YniuBkyD+Ll8HWfZcaJ3JtibUohZTjwbb27ZWhYdOA= -github.com/go-mysql-org/go-mysql v1.9.0/go.mod h1:+SgFgTlqjqOQoMc98n9oyUWEgn2KkOL1VmXDoq2ONOs= +github.com/go-mysql-org/go-mysql v1.9.1 h1:W2ZKkHkoM4mmkasJCoSYfaE4RQNxXTb6VqiaMpKFrJc= +github.com/go-mysql-org/go-mysql v1.9.1/go.mod h1:+SgFgTlqjqOQoMc98n9oyUWEgn2KkOL1VmXDoq2ONOs= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= @@ -81,14 +81,14 @@ github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2V github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/gomarkdown/markdown v0.0.0-20240730141124-034f12af3bf6 h1:ZPy+2XJ8u0bB3sNFi+I72gMEMS7MTg7aZCCXPOjV8iw= -github.com/gomarkdown/markdown v0.0.0-20240730141124-034f12af3bf6/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= +github.com/gomarkdown/markdown v0.0.0-20240930133441-72d49d9543d8 h1:4txT5G2kqVAKMjzidIabL/8KqjIK71yj30YOeuxLn10= +github.com/gomarkdown/markdown v0.0.0-20240930133441-72d49d9543d8/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s= github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20240929191954-255acd752d31 h1:LcRdQWywSgfi5jPsYZ1r2avbbs5IQ5wtyhMBCcokyo4= +github.com/google/pprof v0.0.0-20240929191954-255acd752d31/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= @@ -97,8 +97,8 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk= github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -197,8 +197,8 @@ github.com/tylerb/graceful v1.2.15/go.mod h1:LPYTbOYmUTdabwRt0TGhLllQ0MUNbs0Y5q1 github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= -github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= +github.com/valyala/fasthttp v1.56.0 h1:bEZdJev/6LCBlpdORfrLu/WOZXXxvrUQSiyniuaoW8U= +github.com/valyala/fasthttp v1.56.0/go.mod h1:sReBt3XZVnudxuLOx4J/fMrJVorWRiWY2koQKgABiVI= github.com/wellington/sass v0.0.0-20160911051022-cab90b3986d6 h1:qPS12y9iMXyKr2flmOG7RgiyUGkQxQibp1hx7uug9IQ= github.com/wellington/sass v0.0.0-20160911051022-cab90b3986d6/go.mod h1:ncYBwTYUjmb7N+sZbf8WJYynLivoqFL+U2f8uOX2Yzk= github.com/xyproto/ask v1.0.0/go.mod h1:GTF2UyZg4J/JFVrY8jrfbk00yljvpDWra9gHiSuwI90= @@ -218,8 +218,8 @@ github.com/xyproto/datablock v1.2.0/go.mod h1:hQGlZYTpt2QOXcjPKri4bjwU1mebDvrY9N github.com/xyproto/env/v2 v2.0.0/go.mod h1:n0AhHu2mZjNMK2auKEF6eUAU6LJ/1PQfss8UuT7Jhzc= github.com/xyproto/env/v2 v2.5.0 h1:1KXF3UCaELjnXzfZ6iHDCdJoBeQPz4LXG4oHlBN+jvs= github.com/xyproto/env/v2 v2.5.0/go.mod h1:F81ZEzu15s3TWUZJ1uzBl9iNeq9zcfHvxMkQJaLZUl0= -github.com/xyproto/files v1.6.0 h1:/JNu6ylqS3wUPozQ+5AOwz6ant2Hxfkan+uiLqWuP/Y= -github.com/xyproto/files v1.6.0/go.mod h1:johy1UtusLfkGuQ98zGJIHkQ1hWPUre5GHQ0GrUzKnk= +github.com/xyproto/files v1.7.0 h1:8LHK6j6HN5gb4zmvElf6SWqPWLWUTbIoBbFh+bmlPHA= +github.com/xyproto/files v1.7.0/go.mod h1:johy1UtusLfkGuQ98zGJIHkQ1hWPUre5GHQ0GrUzKnk= github.com/xyproto/gluamapper v1.2.1 h1:7pDNxzX4P1QndBFkZEN7JmixrH0HSc7jKw2xyTZ0OjY= github.com/xyproto/gluamapper v1.2.1/go.mod h1:uN8tJzpgFmctChbuKGSlLGea/8p5q2v2+5WCnqcUS+8= github.com/xyproto/gopher-lua v1.0.0/go.mod h1:VCAgqVjLOz4AzuaxCORQNg4/0C3piilmVLcbMrJ9AJw= @@ -275,8 +275,8 @@ github.com/xyproto/tinysvg v1.1.1 h1:sO0F71oODDHoZ9KLC4ucGsdVH9xxgaWIonW/fLF7piM github.com/xyproto/tinysvg v1.1.1/go.mod h1:DKgmaYuFIvJab9ug4nH4ZG356VtUaKXG2mUU07GIurs= github.com/xyproto/unzip v1.0.0 h1:Gwk1y7hbvCL8lvCMUgxOyrcofker5UfoyQPcIaY8uqc= github.com/xyproto/unzip v1.0.0/go.mod h1:6/S2dvAL3XV9Y3HiaFXV7Utl9+IOObDrD6OaFoC63vg= -github.com/xyproto/vt100 v1.14.5 h1:jbEBTTcF3HgB+JmCpOGV+LLJG3mkgMhtxkZcvfSUr4M= -github.com/xyproto/vt100 v1.14.5/go.mod h1:AyPr0fGViddCve+QM2QGArqt/E19TTPBaOwTyKVWeOI= +github.com/xyproto/vt100 v1.14.7 h1:JxwBrDns/qmjSU3h0HDhra8ZNsmm+o6N9XSmA/iCFTU= +github.com/xyproto/vt100 v1.14.7/go.mod h1:cuR2XszXVf224IxIObMhhjJAxwtMpLwayF4GRbMF/oA= github.com/yosssi/gcss v0.1.0 h1:jRuino7qq7kqntBIhT+0xSUI5/sBgCA/zCQ1Tuzd6Gg= github.com/yosssi/gcss v0.1.0/go.mod h1:M3mTPOWZWjVROkXKZ2AiDzOBOXu2MqQeDXF/nKO44sI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= diff --git a/vendor/github.com/evanw/esbuild/internal/compat/js_table.go b/vendor/github.com/evanw/esbuild/internal/compat/js_table.go index f3461ab1..89180c3f 100644 --- a/vendor/github.com/evanw/esbuild/internal/compat/js_table.go +++ b/vendor/github.com/evanw/esbuild/internal/compat/js_table.go @@ -813,15 +813,17 @@ var jsTable = map[JSFeature]map[Engine][]versionRange{ Safari: {{start: v{12, 0, 0}}}, }, RegexpUnicodePropertyEscapes: { - // Note: The latest version of "Chrome" failed this test: RegExp Unicode Property Escapes: Unicode 15.1 - // Note: The latest version of "Firefox" failed this test: RegExp Unicode Property Escapes: Unicode 15.1 + // Note: The latest version of "Chrome" failed this test: RegExp Unicode Property Escapes: Unicode 16.0 + // Note: The latest version of "Edge" failed this test: RegExp Unicode Property Escapes: Unicode 16.0 + // Note: The latest version of "Firefox" failed 2 tests including: RegExp Unicode Property Escapes: Unicode 15.1 // Note: The latest version of "Hermes" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11 // Note: The latest version of "IE" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11 - // Note: The latest version of "IOS" failed this test: RegExp Unicode Property Escapes: Unicode 15.1 + // Note: The latest version of "IOS" failed this test: RegExp Unicode Property Escapes: Unicode 16.0 + // Note: The latest version of "Node" failed this test: RegExp Unicode Property Escapes: Unicode 16.0 // Note: The latest version of "Rhino" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11 - // Note: The latest version of "Safari" failed this test: RegExp Unicode Property Escapes: Unicode 15.1 - ES: {{start: v{2018, 0, 0}}}, - Node: {{start: v{18, 20, 0}, end: v{19, 0, 0}}, {start: v{20, 12, 0}, end: v{21, 0, 0}}, {start: v{21, 3, 0}}}, + // Note: The latest version of "Safari" failed this test: RegExp Unicode Property Escapes: Unicode 16.0 + ES: {{start: v{2018, 0, 0}}}, + Opera: {{start: v{111, 0, 0}}}, }, RestArgument: { // Note: The latest version of "Hermes" failed this test: rest parameters: function 'length' property diff --git a/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go b/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go index f44f1f83..f8d3fe32 100644 --- a/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go +++ b/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go @@ -374,6 +374,15 @@ type Class struct { BodyLoc logger.Loc CloseBraceLoc logger.Loc + // If true, JavaScript decorators (i.e. not TypeScript experimental + // decorators) should be lowered. This is the case either if JavaScript + // decorators are not supported in the configured target environment, or + // if "useDefineForClassFields" is set to false and this class has + // decorators on it. Note that this flag is not necessarily set to true if + // "useDefineForClassFields" is false and a class has an "accessor" even + // though the accessor feature comes from the decorator specification. + ShouldLowerStandardDecorators bool + // If true, property field initializers cannot be assumed to have no side // effects. For example: // diff --git a/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go b/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go index 8da1a703..45daca94 100644 --- a/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go +++ b/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go @@ -6287,6 +6287,7 @@ func (p *parser) parseClass(classKeyword logger.Range, name *ast.LocRef, classOp bodyLoc := p.lexer.Loc() p.lexer.Expect(js_lexer.TOpenBrace) properties := []js_ast.Property{} + hasPropertyDecorator := false // Allow "in" and private fields inside class bodies oldAllowIn := p.allowIn @@ -6316,6 +6317,9 @@ func (p *parser) parseClass(classKeyword logger.Range, name *ast.LocRef, classOp firstDecoratorLoc := p.lexer.Loc() scopeIndex := len(p.scopesInOrder) opts.decorators = p.parseDecorators(p.currentScope, classKeyword, opts.decoratorContext) + if len(opts.decorators) > 0 { + hasPropertyDecorator = true + } // This property may turn out to be a type in TypeScript, which should be ignored if property, ok := p.parseProperty(p.saveExprCommentsHere(), js_ast.PropertyField, opts, nil); ok { @@ -6353,6 +6357,33 @@ func (p *parser) parseClass(classKeyword logger.Range, name *ast.LocRef, classOp closeBraceLoc := p.saveExprCommentsHere() p.lexer.Expect(js_lexer.TCloseBrace) + + // TypeScript has legacy behavior that uses assignment semantics instead of + // define semantics for class fields when "useDefineForClassFields" is enabled + // (in which case TypeScript behaves differently than JavaScript, which is + // arguably "wrong"). + // + // This legacy behavior exists because TypeScript added class fields to + // TypeScript before they were added to JavaScript. They decided to go with + // assignment semantics for whatever reason. Later on TC39 decided to go with + // define semantics for class fields instead. This behaves differently if the + // base class has a setter with the same name. + // + // The value of "useDefineForClassFields" defaults to false when it's not + // specified and the target is earlier than "ES2022" since the class field + // language feature was added in ES2022. However, TypeScript's "target" + // setting currently defaults to "ES3" which unfortunately means that the + // "useDefineForClassFields" setting defaults to false (i.e. to "wrong"). + // + // We default "useDefineForClassFields" to true (i.e. to "correct") instead. + // This is partially because our target defaults to "esnext", and partially + // because this is a legacy behavior that no one should be using anymore. + // Users that want the wrong behavior can either set "useDefineForClassFields" + // to false in "tsconfig.json" explicitly, or set TypeScript's "target" to + // "ES2021" or earlier in their in "tsconfig.json" file. + useDefineForClassFields := !p.options.ts.Parse || p.options.ts.Config.UseDefineForClassFields == config.True || + (p.options.ts.Config.UseDefineForClassFields == config.Unspecified && p.options.ts.Config.Target != config.TSTargetBelowES2022) + return js_ast.Class{ ClassKeyword: classKeyword, Decorators: classOpts.decorators, @@ -6362,31 +6393,16 @@ func (p *parser) parseClass(classKeyword logger.Range, name *ast.LocRef, classOp Properties: properties, CloseBraceLoc: closeBraceLoc, - // TypeScript has legacy behavior that uses assignment semantics instead of - // define semantics for class fields when "useDefineForClassFields" is enabled - // (in which case TypeScript behaves differently than JavaScript, which is - // arguably "wrong"). - // - // This legacy behavior exists because TypeScript added class fields to - // TypeScript before they were added to JavaScript. They decided to go with - // assignment semantics for whatever reason. Later on TC39 decided to go with - // define semantics for class fields instead. This behaves differently if the - // base class has a setter with the same name. - // - // The value of "useDefineForClassFields" defaults to false when it's not - // specified and the target is earlier than "ES2022" since the class field - // language feature was added in ES2022. However, TypeScript's "target" - // setting currently defaults to "ES3" which unfortunately means that the - // "useDefineForClassFields" setting defaults to false (i.e. to "wrong"). - // - // We default "useDefineForClassFields" to true (i.e. to "correct") instead. - // This is partially because our target defaults to "esnext", and partially - // because this is a legacy behavior that no one should be using anymore. - // Users that want the wrong behavior can either set "useDefineForClassFields" - // to false in "tsconfig.json" explicitly, or set TypeScript's "target" to - // "ES2021" or earlier in their in "tsconfig.json" file. - UseDefineForClassFields: !p.options.ts.Parse || p.options.ts.Config.UseDefineForClassFields == config.True || - (p.options.ts.Config.UseDefineForClassFields == config.Unspecified && p.options.ts.Config.Target != config.TSTargetBelowES2022), + // Always lower standard decorators if they are present and TypeScript's + // "useDefineForClassFields" setting is false even if the configured target + // environment supports decorators. This setting changes the behavior of + // class fields, and so we must lower decorators so they behave correctly. + ShouldLowerStandardDecorators: (len(classOpts.decorators) > 0 || hasPropertyDecorator) && + ((!p.options.ts.Parse && p.options.unsupportedJSFeatures.Has(compat.Decorators)) || + (p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators != config.True && + (p.options.unsupportedJSFeatures.Has(compat.Decorators) || !useDefineForClassFields))), + + UseDefineForClassFields: useDefineForClassFields, } } diff --git a/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go b/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go index 944b0e42..01458368 100644 --- a/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go +++ b/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go @@ -416,8 +416,7 @@ func (p *parser) computeClassLoweringInfo(class *js_ast.Class) (result classLowe // due to the complexity of the decorator specification. The specification is // also still evolving so trying to optimize it now is also potentially // premature. - if p.options.unsupportedJSFeatures.Has(compat.Decorators) && - (!p.options.ts.Parse || p.options.ts.Config.ExperimentalDecorators != config.True) { + if class.ShouldLowerStandardDecorators { for _, prop := range class.Properties { if len(prop.Decorators) > 0 { for _, prop := range class.Properties { @@ -1108,7 +1107,7 @@ func (ctx *lowerClassContext) analyzeProperty(p *parser, prop js_ast.Property, c analysis.private, _ = prop.Key.Data.(*js_ast.EPrivateIdentifier) mustLowerPrivate := analysis.private != nil && p.privateSymbolNeedsToBeLowered(analysis.private) analysis.shouldOmitFieldInitializer = p.options.ts.Parse && !prop.Kind.IsMethodDefinition() && prop.InitializerOrNil.Data == nil && - !ctx.class.UseDefineForClassFields && !mustLowerPrivate + !ctx.class.UseDefineForClassFields && !mustLowerPrivate && !ctx.class.ShouldLowerStandardDecorators // Class fields must be lowered if the environment doesn't support them if !prop.Kind.IsMethodDefinition() { @@ -1140,7 +1139,7 @@ func (ctx *lowerClassContext) analyzeProperty(p *parser, prop js_ast.Property, c // they will end up being lowered (if they are even being lowered at all) if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True { analysis.propExperimentalDecorators = prop.Decorators - } else if p.options.unsupportedJSFeatures.Has(compat.Decorators) { + } else if ctx.class.ShouldLowerStandardDecorators { analysis.propDecorators = prop.Decorators } @@ -1451,7 +1450,7 @@ func (ctx *lowerClassContext) processProperties(p *parser, classLoweringInfo cla propertyKeyTempRefs, decoratorTempRefs := ctx.hoistComputedProperties(p, classLoweringInfo) // Save the initializer index for each field and accessor element - if p.options.unsupportedJSFeatures.Has(compat.Decorators) && (!p.options.ts.Parse || p.options.ts.Config.ExperimentalDecorators != config.True) { + if ctx.class.ShouldLowerStandardDecorators { var counts [4]int // Count how many initializers there are in each section @@ -1484,8 +1483,7 @@ func (ctx *lowerClassContext) processProperties(p *parser, classLoweringInfo cla } // Evaluate the decorator expressions inline - if p.options.unsupportedJSFeatures.Has(compat.Decorators) && len(ctx.class.Decorators) > 0 && - (!p.options.ts.Parse || p.options.ts.Config.ExperimentalDecorators != config.True) { + if ctx.class.ShouldLowerStandardDecorators && len(ctx.class.Decorators) > 0 { name := ctx.nameToKeep if name == "" { name = "class" @@ -2079,7 +2077,7 @@ func (ctx *lowerClassContext) finishAndGenerateCode(p *parser, result visitClass if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True { classExperimentalDecorators = ctx.class.Decorators ctx.class.Decorators = nil - } else if p.options.unsupportedJSFeatures.Has(compat.Decorators) { + } else if ctx.class.ShouldLowerStandardDecorators { classDecorators = ctx.decoratorClassDecorators } diff --git a/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go b/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go index acf5a220..068acdea 100644 --- a/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go +++ b/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go @@ -710,7 +710,8 @@ func parseImportsExportsMap(source logger.Source, log logger.Log, json js_ast.Ex // Track "dead" conditional branches that can never be reached if foundDefault.Len != 0 || (foundImport.Len != 0 && foundRequire.Len != 0) { deadCondition.ranges = append(deadCondition.ranges, keyRange) - if deadCondition.reason == "" { + // Note: Don't warn about the "default" condition as it's supposed to be a catch-all condition + if deadCondition.reason == "" && key != "default" { if foundDefault.Len != 0 { deadCondition.reason = "\"default\"" deadCondition.notes = []logger.MsgData{ diff --git a/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go b/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go index ccc21309..b3f6c8b5 100644 --- a/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go +++ b/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go @@ -1132,12 +1132,19 @@ func (r resolverQuery) dirInfoCached(path string) *dirInfo { // Cache hit: stop now if !ok { + // Update the cache to indicate failure. Even if the read failed, we don't + // want to retry again later. The directory is inaccessible so trying again + // is wasted. Doing this before calling "dirInfoUncached" prevents stack + // overflow in case this directory is recursively encountered again. + r.dirCache[path] = nil + // Cache miss: read the info cached = r.dirInfoUncached(path) - // Update the cache unconditionally. Even if the read failed, we don't want to - // retry again later. The directory is inaccessible so trying again is wasted. - r.dirCache[path] = cached + // Only update the cache again on success + if cached != nil { + r.dirCache[path] = cached + } } if r.debugLogs != nil { @@ -1176,11 +1183,6 @@ func (r resolverQuery) parseTSConfig(file string, visited map[string]bool, confi if visited[file] { return nil, errParseErrorImportCycle } - if visited != nil { - // This is only non-nil for "build" API calls. This is nil for "transform" - // API calls, which tells us to not process "extends" fields. - visited[file] = true - } contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, file) if r.debugLogs != nil && originalError != nil { @@ -1199,7 +1201,20 @@ func (r resolverQuery) parseTSConfig(file string, visited map[string]bool, confi PrettyPath: PrettyPath(r.fs, keyPath), Contents: contents, } - return r.parseTSConfigFromSource(source, visited, configDir) + if visited != nil { + // This is only non-nil for "build" API calls. This is nil for "transform" + // API calls, which tells us to not process "extends" fields. + visited[file] = true + } + result, err := r.parseTSConfigFromSource(source, visited, configDir) + if visited != nil { + // Reset this to back false in case something uses TypeScript 5.0's multiple + // inheritance feature for "tsconfig.json" files. It should be valid to visit + // the same base "tsconfig.json" file multiple times from different multiple + // inheritance subtrees. + visited[file] = false + } + return result, err } func (r resolverQuery) parseTSConfigFromSource(source logger.Source, visited map[string]bool, configDir string) (*TSConfigJSON, error) { @@ -1294,7 +1309,8 @@ func (r resolverQuery) parseTSConfigFromSource(source logger.Source, visited map if entry, _ := entries.Get("package.json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry { // Check the "exports" map if packageJSON := r.parsePackageJSON(result.pkgDirPath); packageJSON != nil && packageJSON.exportsMap != nil { - if absolute, ok, _ := r.esmResolveAlgorithm(result.pkgIdent, "."+result.pkgSubpath, packageJSON, result.pkgDirPath, source.KeyPath.Text); ok { + if absolute, ok, _ := r.esmResolveAlgorithm(finalizeImportsExportsYarnPnPTSConfigExtends, + result.pkgIdent, "."+result.pkgSubpath, packageJSON, result.pkgDirPath, source.KeyPath.Text); ok { base, err := r.parseTSConfig(absolute.Primary.Text, visited, configDir) if result, shouldReturn := maybeFinishOurSearch(base, err, absolute.Primary.Text); shouldReturn { return result @@ -2236,6 +2252,7 @@ func (r resolverQuery) loadPackageImports(importPath string, dirInfoPackageJSON } absolute, ok, diffCase := r.finalizeImportsExportsResult( + finalizeImportsExportsNormal, dirInfoPackageJSON.absPath, conditions, *packageJSON.importsMap, packageJSON, resolvedPath, status, debug, "", "", "", @@ -2243,7 +2260,14 @@ func (r resolverQuery) loadPackageImports(importPath string, dirInfoPackageJSON return absolute, ok, diffCase, nil } -func (r resolverQuery) esmResolveAlgorithm(esmPackageName string, esmPackageSubpath string, packageJSON *packageJSON, absPkgPath string, absPath string) (PathPair, bool, *fs.DifferentCase) { +func (r resolverQuery) esmResolveAlgorithm( + kind finalizeImportsExportsKind, + esmPackageName string, + esmPackageSubpath string, + packageJSON *packageJSON, + absPkgPath string, + absPath string, +) (PathPair, bool, *fs.DifferentCase) { if r.debugLogs != nil { r.debugLogs.addNote(fmt.Sprintf("Looking for %q in \"exports\" map in %q", esmPackageSubpath, packageJSON.source.KeyPath.Text)) r.debugLogs.increaseIndent() @@ -2278,6 +2302,7 @@ func (r resolverQuery) esmResolveAlgorithm(esmPackageName string, esmPackageSubp resolvedPath, status, debug = r.esmHandlePostConditions(resolvedPath, status, debug) return r.finalizeImportsExportsResult( + kind, absPkgPath, conditions, *packageJSON.exportsMap, packageJSON, resolvedPath, status, debug, esmPackageName, esmPackageSubpath, absPath, @@ -2358,7 +2383,7 @@ func (r resolverQuery) loadNodeModules(importPath string, dirInfo *dirInfo, forb if pkgDirInfo := r.dirInfoCached(result.pkgDirPath); pkgDirInfo != nil { // Check the "exports" map if packageJSON := pkgDirInfo.packageJSON; packageJSON != nil && packageJSON.exportsMap != nil { - absolute, ok, diffCase := r.esmResolveAlgorithm(result.pkgIdent, "."+result.pkgSubpath, packageJSON, pkgDirInfo.absPath, absPath) + absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, result.pkgIdent, "."+result.pkgSubpath, packageJSON, pkgDirInfo.absPath, absPath) return absolute, ok, diffCase, nil } @@ -2393,7 +2418,7 @@ func (r resolverQuery) loadNodeModules(importPath string, dirInfo *dirInfo, forb // Check for self-references if dirInfoPackageJSON != nil { if packageJSON := dirInfoPackageJSON.packageJSON; packageJSON.name == esmPackageName && packageJSON.exportsMap != nil { - absolute, ok, diffCase := r.esmResolveAlgorithm(esmPackageName, esmPackageSubpath, packageJSON, + absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, esmPackageName, esmPackageSubpath, packageJSON, dirInfoPackageJSON.absPath, r.fs.Join(dirInfoPackageJSON.absPath, esmPackageSubpath)) return absolute, ok, diffCase, nil } @@ -2412,7 +2437,7 @@ func (r resolverQuery) loadNodeModules(importPath string, dirInfo *dirInfo, forb if pkgDirInfo := r.dirInfoCached(absPkgPath); pkgDirInfo != nil { // Check the "exports" map if packageJSON := pkgDirInfo.packageJSON; packageJSON != nil && packageJSON.exportsMap != nil { - absolute, ok, diffCase := r.esmResolveAlgorithm(esmPackageName, esmPackageSubpath, packageJSON, absPkgPath, absPath) + absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, esmPackageName, esmPackageSubpath, packageJSON, absPkgPath, absPath) return absolute, ok, diffCase, nil, true } @@ -2524,7 +2549,15 @@ func (r resolverQuery) checkForBuiltInNodeModules(importPath string) (PathPair, return PathPair{}, false, nil } +type finalizeImportsExportsKind uint8 + +const ( + finalizeImportsExportsNormal finalizeImportsExportsKind = iota + finalizeImportsExportsYarnPnPTSConfigExtends +) + func (r resolverQuery) finalizeImportsExportsResult( + kind finalizeImportsExportsKind, absDirPath string, conditions map[string]bool, importExportMap pjMap, @@ -2551,6 +2584,14 @@ func (r resolverQuery) finalizeImportsExportsResult( r.debugLogs.addNote(fmt.Sprintf("The resolved path %q is exact", absResolvedPath)) } + // Avoid calling "dirInfoCached" recursively for "tsconfig.json" extends with Yarn PnP + if kind == finalizeImportsExportsYarnPnPTSConfigExtends { + if r.debugLogs != nil { + r.debugLogs.addNote(fmt.Sprintf("Resolved to %q", absResolvedPath)) + } + return PathPair{Primary: logger.Path{Text: absResolvedPath, Namespace: "file"}}, true, nil + } + resolvedDirInfo := r.dirInfoCached(r.fs.Dir(absResolvedPath)) base := r.fs.Base(absResolvedPath) extensionOrder := r.options.ExtensionOrder diff --git a/vendor/github.com/go-mysql-org/go-mysql/client/auth.go b/vendor/github.com/go-mysql-org/go-mysql/client/auth.go index cb479f3b..006f71e1 100644 --- a/vendor/github.com/go-mysql-org/go-mysql/client/auth.go +++ b/vendor/github.com/go-mysql-org/go-mysql/client/auth.go @@ -115,11 +115,11 @@ func (c *Conn) readInitialHandshake() error { // the first packet *must* have at least 20 bytes of a scramble. // if a plugin provided less, we pad it to 20 with zeros rest := int(authPluginDataLen) - 8 - if max := 12 + 1; rest < max { - rest = max + if rest < 13 { + rest = 13 } - authPluginDataPart2 := data[pos : pos+rest] + authPluginDataPart2 := data[pos : pos+rest-1] pos += rest c.salt = append(c.salt, authPluginDataPart2...) diff --git a/vendor/github.com/go-mysql-org/go-mysql/driver/driver.go b/vendor/github.com/go-mysql-org/go-mysql/driver/driver.go index 94ebabbf..4054db2a 100644 --- a/vendor/github.com/go-mysql-org/go-mysql/driver/driver.go +++ b/vendor/github.com/go-mysql-org/go-mysql/driver/driver.go @@ -203,7 +203,7 @@ func (c *conn) CheckNamedValue(nv *sqldriver.NamedValue) error { } else { // we've found an error, if the error is driver.ErrSkip then // keep looking otherwise return the unknown error - if !goErrors.Is(sqldriver.ErrSkip, err) { + if !goErrors.Is(err, sqldriver.ErrSkip) { return err } } diff --git a/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go b/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go index a6679d0f..738ca9a9 100644 --- a/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go +++ b/vendor/github.com/go-mysql-org/go-mysql/mysql/util.go @@ -130,10 +130,13 @@ func AppendLengthEncodedInteger(b []byte, n uint64) []byte { func RandomBuf(size int) []byte { buf := make([]byte, size) - mrand.Seed(time.Now().UTC().UnixNano()) + // When this project supports golang 1.20 as a minimum, then this mrand.New(...) + // line can be eliminated and the random number can be generated by simply + // calling mrand.Intn() + random := mrand.New(mrand.NewSource(time.Now().UTC().UnixNano())) min, max := 30, 127 for i := 0; i < size; i++ { - buf[i] = byte(min + mrand.Intn(max-min)) + buf[i] = byte(min + random.Intn(max-min)) } return buf } @@ -197,11 +200,13 @@ func PutLengthEncodedInt(n uint64) []byte { case n <= 0xffffff: return []byte{0xfd, byte(n), byte(n >> 8), byte(n >> 16)} - case n <= 0xffffffffffffffff: + default: + // handles case n <= 0xffffffffffffffff + // using 'default' instead of 'case' to avoid static analysis error + // SA4003: every value of type uint64 is <= math.MaxUint64 return []byte{0xfe, byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24), byte(n >> 32), byte(n >> 40), byte(n >> 48), byte(n >> 56)} } - return nil } // LengthEncodedString returns the string read as a bytes slice, whether the value is NULL, diff --git a/vendor/github.com/gomarkdown/markdown/parser/inline.go b/vendor/github.com/gomarkdown/markdown/parser/inline.go index 5417de7f..42297bf6 100644 --- a/vendor/github.com/gomarkdown/markdown/parser/inline.go +++ b/vendor/github.com/gomarkdown/markdown/parser/inline.go @@ -736,7 +736,7 @@ func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) { } // '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~^$") +var EscapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~^$") func escape(p *Parser, data []byte, offset int) (int, ast.Node) { data = data[offset:] @@ -753,7 +753,7 @@ func escape(p *Parser, data []byte, offset int) (int, ast.Node) { return 2, &ast.Hardbreak{} } - if bytes.IndexByte(escapeChars, data[1]) < 0 { + if bytes.IndexByte(EscapeChars, data[1]) < 0 { return 0, nil } diff --git a/vendor/github.com/gomarkdown/markdown/parser/parser.go b/vendor/github.com/gomarkdown/markdown/parser/parser.go index 784e0c6e..305a25d1 100644 --- a/vendor/github.com/gomarkdown/markdown/parser/parser.go +++ b/vendor/github.com/gomarkdown/markdown/parser/parser.go @@ -140,7 +140,7 @@ func NewWithExtensions(extension Extensions) *Parser { p := Parser{ refs: make(map[string]*reference), refsRecord: make(map[string]struct{}), - maxNesting: 16, + maxNesting: 64, insideLink: false, Doc: &ast.Document{}, extensions: extension, diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e..684a3085 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,20 @@ This package provides various compression algorithms. # changelog +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657d..af53fb86 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d64..0d7b437f 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor @@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser { f.codebits = new([numCodes]int) f.step = nextBlock f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + return &f } +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderOpts(r) +} + // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..a79c4a52 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -469,6 +469,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/valyala/fasthttp/README.md b/vendor/github.com/valyala/fasthttp/README.md index cbdb9de2..f43ea6dc 100644 --- a/vendor/github.com/valyala/fasthttp/README.md +++ b/vendor/github.com/valyala/fasthttp/README.md @@ -600,7 +600,7 @@ This is an **unsafe** way, the result string and `[]byte` buffer share the same * *Which GO versions are supported by fasthttp?* - Go 1.18.x. Older versions won't be supported. + Go 1.21.x and newer. Older versions might work, but won't officially be supported. * *Please provide real benchmark data and server information* diff --git a/vendor/github.com/valyala/fasthttp/brotli.go b/vendor/github.com/valyala/fasthttp/brotli.go index 30b7d66d..1e8e0f08 100644 --- a/vendor/github.com/valyala/fasthttp/brotli.go +++ b/vendor/github.com/valyala/fasthttp/brotli.go @@ -97,7 +97,7 @@ var ( // - CompressBrotliBestCompression // - CompressBrotliDefaultCompression func AppendBrotliBytesLevel(dst, src []byte, level int) []byte { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} WriteBrotliLevel(w, src, level) //nolint:errcheck return w.b } @@ -167,7 +167,7 @@ func AppendBrotliBytes(dst, src []byte) []byte { // WriteUnbrotli writes unbrotlied p to w and returns the number of uncompressed // bytes written to w. func WriteUnbrotli(w io.Writer, p []byte) (int, error) { - r := &byteSliceReader{p} + r := &byteSliceReader{b: p} zr, err := acquireBrotliReader(r) if err != nil { return 0, err @@ -183,7 +183,7 @@ func WriteUnbrotli(w io.Writer, p []byte) (int, error) { // AppendUnbrotliBytes appends unbrotlied src to dst and returns the resulting dst. func AppendUnbrotliBytes(dst, src []byte) ([]byte, error) { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} _, err := WriteUnbrotli(w, src) return w.b, err } diff --git a/vendor/github.com/valyala/fasthttp/bytesconv.go b/vendor/github.com/valyala/fasthttp/bytesconv.go index dddf24fd..053da6a7 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" "io" - "math" "net" + "strconv" "sync" "time" ) @@ -127,21 +127,7 @@ func AppendUint(dst []byte, n int) []byte { panic("BUG: int must be positive") } - var b [20]byte - buf := b[:] - i := len(buf) - var q int - for n >= 10 { - i-- - q = n / 10 - buf[i] = '0' + byte(n-q*10) - n = q - } - i-- - buf[i] = '0' + byte(n) - - dst = append(dst, buf[i:]...) - return dst + return strconv.AppendUint(dst, uint64(n), 10) } // ParseUint parses uint from buf. @@ -185,61 +171,19 @@ func parseUintBuf(b []byte) (int, int, error) { return v, n, nil } -var ( - errEmptyFloat = errors.New("empty float number") - errDuplicateFloatPoint = errors.New("duplicate point found in float number") - errUnexpectedFloatEnd = errors.New("unexpected end of float number") - errInvalidFloatExponent = errors.New("invalid float number exponent") - errUnexpectedFloatChar = errors.New("unexpected char found in float number") -) - // ParseUfloat parses unsigned float from buf. func ParseUfloat(buf []byte) (float64, error) { - if len(buf) == 0 { - return -1, errEmptyFloat + // The implementation of parsing a float string is not easy. + // We believe that the conservative approach is to call strconv.ParseFloat. + // https://github.com/valyala/fasthttp/pull/1865 + res, err := strconv.ParseFloat(b2s(buf), 64) + if res < 0 { + return -1, errors.New("negative input is invalid") } - b := buf - var v uint64 - offset := 1.0 - var pointFound bool - for i, c := range b { - if c < '0' || c > '9' { - if c == '.' { - if pointFound { - return -1, errDuplicateFloatPoint - } - pointFound = true - continue - } - if c == 'e' || c == 'E' { - if i+1 >= len(b) { - return -1, errUnexpectedFloatEnd - } - b = b[i+1:] - minus := -1 - switch b[0] { - case '+': - b = b[1:] - minus = 1 - case '-': - b = b[1:] - default: - minus = 1 - } - vv, err := ParseUint(b) - if err != nil { - return -1, errInvalidFloatExponent - } - return float64(v) * offset * math.Pow10(minus*vv), nil - } - return -1, errUnexpectedFloatChar - } - v = 10*v + uint64(c-'0') - if pointFound { - offset /= 10 - } + if err != nil { + return -1, err } - return float64(v) * offset, nil + return res, err } var ( diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_table.go b/vendor/github.com/valyala/fasthttp/bytesconv_table.go index 5b230f1a..591470fc 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv_table.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv_table.go @@ -9,3 +9,5 @@ const toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11 const quotedArgShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01" const quotedPathShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x00\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01" const validHeaderFieldByteTable = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x01\x01\x01\x01\x00\x00\x01\x01\x00\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x00\x01\x00" +const validHeaderValueByteTable = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01" +const validMethodValueByteTable = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x01\x01\x01\x01\x00\x00\x01\x01\x00\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" diff --git a/vendor/github.com/valyala/fasthttp/client.go b/vendor/github.com/valyala/fasthttp/client.go index 5cae78dd..84a7f93e 100644 --- a/vendor/github.com/valyala/fasthttp/client.go +++ b/vendor/github.com/valyala/fasthttp/client.go @@ -175,14 +175,8 @@ var defaultClient Client type Client struct { noCopy noCopy - // Client name. Used in User-Agent request header. - // - // Default client name is used if not set. - Name string - - // NoDefaultUserAgentHeader when set to true, causes the default - // User-Agent header to be excluded from the Request. - NoDefaultUserAgentHeader bool + readerPool sync.Pool + writerPool sync.Pool // Callback for establishing new connections to hosts. // @@ -197,20 +191,36 @@ type Client struct { // If not set, DialTimeout is used. Dial DialFunc - // Attempt to connect to both ipv4 and ipv6 addresses if set to true. - // - // This option is used only if default TCP dialer is used, - // i.e. if Dial is blank. - // - // By default client connects only to ipv4 addresses, - // since unfortunately ipv6 remains broken in many networks worldwide :) - DialDualStack bool - // TLS config for https connections. // // Default TLS config is used if not set. TLSConfig *tls.Config + // RetryIf controls whether a retry should be attempted after an error. + // + // By default will use isIdempotent function. + // + // Deprecated: Use RetryIfErr instead. + // This field is only effective when the `RetryIfErr` field is not set. + RetryIf RetryIfFunc + + // When the client encounters an error during a request, the behavior—whether to retry + // and whether to reset the request timeout—should be determined + // based on the return value of this field. + // This field is only effective within the range of MaxIdemponentCallAttempts. + RetryIfErr RetryIfErrFunc + + // ConfigureClient configures the fasthttp.HostClient. + ConfigureClient func(hc *HostClient) error + + m map[string]*HostClient + ms map[string]*HostClient + + // Client name. Used in User-Agent request header. + // + // Default client name is used if not set. + Name string + // Maximum number of connections per each host which may be established. // // DefaultMaxConnsPerHost is used if not set. @@ -261,6 +271,30 @@ type Client struct { // By default response body size is unlimited. MaxResponseBodySize int + // Maximum duration for waiting for a free connection. + // + // By default will not waiting, return ErrNoFreeConns immediately. + MaxConnWaitTimeout time.Duration + + // Connection pool strategy. Can be either LIFO or FIFO (default). + ConnPoolStrategy ConnPoolStrategyType + + mLock sync.RWMutex + mOnce sync.Once + + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + + // Attempt to connect to both ipv4 and ipv6 addresses if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + // Header names are passed as-is without normalization // if this option is set. // @@ -288,31 +322,8 @@ type Client struct { // extra slashes are removed, special characters are encoded. DisablePathNormalizing bool - // Maximum duration for waiting for a free connection. - // - // By default will not waiting, return ErrNoFreeConns immediately. - MaxConnWaitTimeout time.Duration - - // RetryIf controls whether a retry should be attempted after an error. - // - // By default will use isIdempotent function. - RetryIf RetryIfFunc - - // Connection pool strategy. Can be either LIFO or FIFO (default). - ConnPoolStrategy ConnPoolStrategyType - // StreamResponseBody enables response body streaming. StreamResponseBody bool - - // ConfigureClient configures the fasthttp.HostClient. - ConfigureClient func(hc *HostClient) error - - mLock sync.RWMutex - mOnce sync.Once - m map[string]*HostClient - ms map[string]*HostClient - readerPool sync.Pool - writerPool sync.Pool } // Get returns the status code and body of url. @@ -535,6 +546,7 @@ func (c *Client) Do(req *Request, resp *Response) error { DisablePathNormalizing: c.DisablePathNormalizing, MaxConnWaitTimeout: c.MaxConnWaitTimeout, RetryIf: c.RetryIf, + RetryIfErr: c.RetryIfErr, ConnPoolStrategy: c.ConnPoolStrategy, StreamResponseBody: c.StreamResponseBody, clientReaderPool: &c.readerPool, @@ -595,7 +607,6 @@ func (c *Client) mCleaner(m map[string]*HostClient) { c.mLock.Lock() for k, v := range m { v.connsLock.Lock() - /* #nosec G601 */ if v.connsCount == 0 && atomic.LoadInt32(&v.pendingClientRequests) == 0 { delete(m, k) } @@ -653,11 +664,28 @@ type DialFunc func(addr string) (net.Conn, error) // - foobar.com:8080 type DialFuncWithTimeout func(addr string, timeout time.Duration) (net.Conn, error) -// RetryIfFunc signature of retry if function. -// +// RetryIfFunc defines the signature of the retry if function. // Request argument passed to RetryIfFunc, if there are any request errors. type RetryIfFunc func(request *Request) bool +// RetryIfErrFunc defines an interface used for implementing the following functionality: +// When the client encounters an error during a request, the behavior—whether to retry +// and whether to reset the request timeout—should be determined +// based on the return value of this interface. +// +// attempt indicates which attempt the current retry is due to a failure of. +// The first request counts as the first attempt. +// +// err represents the error encountered while attempting the `attempts`-th request. +// +// resetTimeout indicates whether to reuse the `Request`'s timeout as the timeout interval, +// rather than using the timeout after subtracting the time spent on previous failed requests. +// This return value is meaningful only when you use `Request.SetTimeout`, `DoTimeout`, or `DoDeadline`. +// +// retry indicates whether to retry the current request. If it is false, +// the request function will immediately return with the `err`. +type RetryIfErrFunc func(request *Request, attempts int, err error) (resetTimeout bool, retry bool) + // RoundTripper wraps every request/response. type RoundTripper interface { RoundTrip(hc *HostClient, req *Request, resp *Response) (retry bool, err error) @@ -684,23 +712,11 @@ const ( type HostClient struct { noCopy noCopy - // Comma-separated list of upstream HTTP server host addresses, - // which are passed to Dial or DialTimeout in a round-robin manner. - // - // Each address may contain port if default dialer is used. - // For example, - // - // - foobar.com:80 - // - foobar.com:443 - // - foobar.com:8080 - Addr string - - // Client name. Used in User-Agent request header. - Name string + readerPool sync.Pool + writerPool sync.Pool - // NoDefaultUserAgentHeader when set to true, causes the default - // User-Agent header to be excluded from the Request. - NoDefaultUserAgentHeader bool + // Transport defines a transport-like mechanism that wraps every request/response. + Transport RoundTripper // Callback for establishing new connections to hosts. // @@ -715,21 +731,45 @@ type HostClient struct { // If not set, DialTimeout is used. Dial DialFunc - // Attempt to connect to both ipv4 and ipv6 host addresses - // if set to true. + // Optional TLS config. + TLSConfig *tls.Config + + // RetryIf controls whether a retry should be attempted after an error. + // By default, it uses the isIdempotent function. // - // This option is used only if default TCP dialer is used, - // i.e. if Dial and DialTimeout are blank. + // Deprecated: Use RetryIfErr instead. + // This field is only effective when the `RetryIfErr` field is not set. + RetryIf RetryIfFunc + + // When the client encounters an error during a request, the behavior—whether to retry + // and whether to reset the request timeout—should be determined + // based on the return value of this field. + // This field is only effective within the range of MaxIdemponentCallAttempts. + RetryIfErr RetryIfErrFunc + + connsWait *wantConnQueue + + tlsConfigMap map[string]*tls.Config + + clientReaderPool *sync.Pool + clientWriterPool *sync.Pool + + // Comma-separated list of upstream HTTP server host addresses, + // which are passed to Dial or DialTimeout in a round-robin manner. // - // By default client connects only to ipv4 addresses, - // since unfortunately ipv6 remains broken in many networks worldwide :) - DialDualStack bool + // Each address may contain port if default dialer is used. + // For example, + // + // - foobar.com:80 + // - foobar.com:443 + // - foobar.com:8080 + Addr string - // Whether to use TLS (aka SSL or HTTPS) for host connections. - IsTLS bool + // Client name. Used in User-Agent request header. + Name string - // Optional TLS config. - TLSConfig *tls.Config + conns []*clientConn + addrs []string // Maximum number of connections which may be established to all hosts // listed in Addr. @@ -753,7 +793,10 @@ type HostClient struct { // Maximum number of attempts for idempotent calls. // - // DefaultMaxIdemponentCallAttempts is used if not set. + // A value of 0 or a negative value represents using DefaultMaxIdemponentCallAttempts. + // For example, a value of 1 means the request will be executed only once, + // while 2 means the request will be executed at most twice. + // The RetryIfErr and RetryIf fields can invalidate remaining attempts. MaxIdemponentCallAttempts int // Per-connection buffer size for responses' reading. @@ -785,6 +828,47 @@ type HostClient struct { // By default response body size is unlimited. MaxResponseBodySize int + // Maximum duration for waiting for a free connection. + // + // By default will not waiting, return ErrNoFreeConns immediately + MaxConnWaitTimeout time.Duration + + // Connection pool strategy. Can be either LIFO or FIFO (default). + ConnPoolStrategy ConnPoolStrategyType + + connsCount int + + connsLock sync.Mutex + + addrsLock sync.Mutex + tlsConfigMapLock sync.Mutex + + addrIdx uint32 + lastUseTime uint32 + + pendingRequests int32 + + // pendingClientRequests counts the number of requests that a Client is currently running using this HostClient. + // It will be incremented earlier than pendingRequests and will be used by Client to see if the HostClient is still in use. + pendingClientRequests int32 + + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial and DialTimeout are blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + // Header names are passed as-is without normalization // if this option is set. // @@ -820,51 +904,9 @@ type HostClient struct { // Client logs full errors by default. SecureErrorLogMessage bool - // Maximum duration for waiting for a free connection. - // - // By default will not waiting, return ErrNoFreeConns immediately - MaxConnWaitTimeout time.Duration - - // RetryIf controls whether a retry should be attempted after an error. - // - // By default will use isIdempotent function - RetryIf RetryIfFunc - - // Transport defines a transport-like mechanism that wraps every request/response. - Transport RoundTripper - - // Connection pool strategy. Can be either LIFO or FIFO (default). - ConnPoolStrategy ConnPoolStrategyType - // StreamResponseBody enables response body streaming. StreamResponseBody bool - lastUseTime uint32 - - connsLock sync.Mutex - connsCount int - conns []*clientConn - connsWait *wantConnQueue - - addrsLock sync.Mutex - addrs []string - addrIdx uint32 - - tlsConfigMap map[string]*tls.Config - tlsConfigMapLock sync.Mutex - - readerPool sync.Pool - writerPool sync.Pool - - clientReaderPool *sync.Pool - clientWriterPool *sync.Pool - - pendingRequests int32 - - // pendingClientRequests counts the number of requests that a Client is currently running using this HostClient. - // It will be incremented earlier than pendingRequests and will be used by Client to see if the HostClient is still in use. - pendingClientRequests int32 - connsCleanerRun bool } @@ -950,9 +992,9 @@ func clientGetURLTimeout(dst []byte, url string, timeout time.Duration, c client } type clientURLResponse struct { - statusCode int - body []byte err error + body []byte + statusCode int } func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDoer) (statusCode int, body []byte, err error) { @@ -1271,16 +1313,15 @@ func (c *HostClient) DoRedirects(req *Request, resp *Response, maxRedirectsCount // It is recommended obtaining req and resp via AcquireRequest // and AcquireResponse in performance-critical code. func (c *HostClient) Do(req *Request, resp *Response) error { - var err error - var retry bool + var ( + err error + retry bool + resetTimeout bool + ) maxAttempts := c.MaxIdemponentCallAttempts if maxAttempts <= 0 { maxAttempts = DefaultMaxIdemponentCallAttempts } - isRequestRetryable := isIdempotent - if c.RetryIf != nil { - isRequestRetryable = c.RetryIf - } attempts := 0 hasBodyStream := req.IsBodyStream() @@ -1292,6 +1333,10 @@ func (c *HostClient) Do(req *Request, resp *Response) error { if timeout > 0 { deadline = time.Now().Add(timeout) } + retryFunc := c.RetryIf + if retryFunc == nil { + retryFunc = isIdempotent + } atomic.AddInt32(&c.pendingRequests, 1) for { @@ -1313,22 +1358,23 @@ func (c *HostClient) Do(req *Request, resp *Response) error { if hasBodyStream { break } - if !isRequestRetryable(req) { - // Retry non-idempotent requests if the server closes - // the connection before sending the response. - // - // This case is possible if the server closes the idle - // keep-alive connection on timeout. - // - // Apache and nginx usually do this. - if err != io.EOF { - break - } - } + // Path prioritization based on ease of computation attempts++ + if attempts >= maxAttempts { break } + if c.RetryIfErr != nil { + resetTimeout, retry = c.RetryIfErr(req, attempts, err) + } else { + retry = retryFunc(req) + } + if !retry { + break + } + if timeout > 0 && resetTimeout { + deadline = time.Now().Add(timeout) + } } atomic.AddInt32(&c.pendingRequests, -1) @@ -1360,9 +1406,7 @@ func (c *HostClient) do(req *Request, resp *Response) (bool, error) { defer ReleaseResponse(resp) } - ok, err := c.doNonNilReqResp(req, resp) - - return ok, err + return c.doNonNilReqResp(req, resp) } func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) { @@ -1385,7 +1429,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) return false, ErrHostClientRedirectToDifferentScheme } - atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix)) + atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix)) // #nosec G115 // Free up resources occupied by response before sending the request, // so the GC may reclaim these resources (e.g. response body). @@ -1547,6 +1591,7 @@ func (c *HostClient) acquireConn(reqTimeout time.Duration, connectionClose bool) case <-w.ready: return w.conn, w.err case <-tc.C: + c.connsWait.failedWaiters.Add(1) if timeoutOverridden { return nil, ErrTimeout } @@ -1692,6 +1737,7 @@ func (c *HostClient) decConnsCount() { dialed = true break } + c.connsWait.failedWaiters.Add(-1) } } if !dialed { @@ -1744,8 +1790,19 @@ func (c *HostClient) releaseConn(cc *clientConn) { w := q.popFront() if w.waiting() { delivered = w.tryDeliver(cc, nil) - break + // This is the last resort to hand over conCount sema. + // We must ensure that there are no valid waiters in connsWait + // when we exit this loop. + // + // We did not apply the same looping pattern in the decConnsCount + // method because it needs to create a new time-spent connection, + // and the decConnsCount call chain will inevitably reach this point. + // When MaxConnWaitTimeout>0. + if delivered { + break + } } + c.connsWait.failedWaiters.Add(-1) } } if !delivered { @@ -1859,7 +1916,7 @@ func (c *HostClient) nextAddr() string { } addr := c.addrs[0] if len(c.addrs) > 1 { - addr = c.addrs[c.addrIdx%uint32(len(c.addrs))] + addr = c.addrs[c.addrIdx%uint32(len(c.addrs))] // #nosec G115 c.addrIdx++ } c.addrsLock.Unlock() @@ -2033,10 +2090,10 @@ func AddMissingPort(addr string, isTLS bool) string { // // Inspired by net/http/transport.go. type wantConn struct { + err error ready chan struct{} - mu sync.Mutex // protects conn, err, close(ready) conn *clientConn - err error + mu sync.Mutex // protects conn, err, close(ready) } // waiting reports whether w is still waiting for an answer (connection or error). @@ -2099,13 +2156,19 @@ type wantConnQueue struct { // in Okasaki's purely functional queue but without the // overhead of reversing the list when swapping stages. head []*wantConn - headPos int tail []*wantConn + headPos int + // failedWaiters is the number of waiters in the head or tail queue, + // but is invalid. + // These state waiters cannot truly be considered as waiters; the current + // implementation does not immediately remove them when they become + // invalid but instead only marks them. + failedWaiters atomic.Int64 } // len returns the number of items in the queue. func (q *wantConnQueue) len() int { - return len(q.head) - q.headPos + len(q.tail) + return len(q.head) - q.headPos + len(q.tail) - int(q.failedWaiters.Load()) } // pushBack adds w to the back of the queue. @@ -2149,6 +2212,7 @@ func (q *wantConnQueue) clearFront() (cleaned bool) { return cleaned } q.popFront() + q.failedWaiters.Add(-1) cleaned = true } } @@ -2168,15 +2232,26 @@ func (q *wantConnQueue) clearFront() (cleaned bool) { type PipelineClient struct { noCopy noCopy + // Logger for logging client errors. + // + // By default standard logger from log package is used. + Logger Logger + + // Callback for connection establishing to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Optional TLS config. + TLSConfig *tls.Config + // Address of the host to connect to. Addr string // PipelineClient name. Used in User-Agent request header. Name string - // NoDefaultUserAgentHeader when set to true, causes the default - // User-Agent header to be excluded from the Request. - NoDefaultUserAgentHeader bool + connClients []*pipelineConnClient // The maximum number of concurrent connections to the Addr. // @@ -2195,10 +2270,38 @@ type PipelineClient struct { // By default requests are sent immediately to the server. MaxBatchDelay time.Duration - // Callback for connection establishing to the host. + // Idle connection to the host is closed after this duration. // - // Default Dial is used if not set. - Dial DialFunc + // By default idle connection is closed after + // DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + connClientsLock sync.Mutex + + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool // Attempt to connect to both ipv4 and ipv6 host addresses // if set to true. @@ -2239,86 +2342,51 @@ type PipelineClient struct { // Whether to use TLS (aka SSL or HTTPS) for host connections. IsTLS bool +} - // Optional TLS config. - TLSConfig *tls.Config - - // Idle connection to the host is closed after this duration. - // - // By default idle connection is closed after - // DefaultMaxIdleConnDuration. - MaxIdleConnDuration time.Duration - - // Buffer size for responses' reading. - // This also limits the maximum header size. - // - // Default buffer size is used if 0. - ReadBufferSize int +type pipelineConnClient struct { + noCopy noCopy - // Buffer size for requests' writing. - // - // Default buffer size is used if 0. - WriteBufferSize int + workPool sync.Pool - // Maximum duration for full response reading (including body). - // - // By default response read timeout is unlimited. - ReadTimeout time.Duration + Logger Logger - // Maximum duration for full request writing (including body). - // - // By default request write timeout is unlimited. - WriteTimeout time.Duration + Dial DialFunc + TLSConfig *tls.Config + chW chan *pipelineWork + chR chan *pipelineWork - // Logger for logging client errors. - // - // By default standard logger from log package is used. - Logger Logger + tlsConfig *tls.Config - connClients []*pipelineConnClient - connClientsLock sync.Mutex -} + Addr string + Name string + MaxPendingRequests int + MaxBatchDelay time.Duration + MaxIdleConnDuration time.Duration + ReadBufferSize int + WriteBufferSize int + ReadTimeout time.Duration + WriteTimeout time.Duration -type pipelineConnClient struct { - noCopy noCopy + chLock sync.Mutex - Addr string - Name string + tlsConfigLock sync.Mutex NoDefaultUserAgentHeader bool - MaxPendingRequests int - MaxBatchDelay time.Duration - Dial DialFunc DialDualStack bool DisableHeaderNamesNormalizing bool DisablePathNormalizing bool IsTLS bool - TLSConfig *tls.Config - MaxIdleConnDuration time.Duration - ReadBufferSize int - WriteBufferSize int - ReadTimeout time.Duration - WriteTimeout time.Duration - Logger Logger - - workPool sync.Pool - - chLock sync.Mutex - chW chan *pipelineWork - chR chan *pipelineWork - - tlsConfigLock sync.Mutex - tlsConfig *tls.Config } type pipelineWork struct { - reqCopy Request respCopy Response + deadline time.Time + err error req *Request resp *Response t *time.Timer - deadline time.Time - err error done chan struct{} + reqCopy Request } // DoTimeout performs the given request and waits for response during @@ -2941,8 +3009,7 @@ func (t *transport) RoundTrip(hc *HostClient, req *Request, resp *Response) (ret err = ErrTimeout } - isConnRST := isConnectionReset(err) - if err != nil && !isConnRST { + if err != nil { hc.closeConn(cc) return true, err } @@ -2977,7 +3044,7 @@ func (t *transport) RoundTrip(hc *HostClient, req *Request, resp *Response) (ret return needRetry, err } - closeConn := resetConnection || req.ConnectionClose() || resp.ConnectionClose() || isConnRST + closeConn := resetConnection || req.ConnectionClose() || resp.ConnectionClose() if customStreamBody && resp.bodyStream != nil { rbs := resp.bodyStream resp.bodyStream = newCloseReaderWithError(rbs, func(wErr error) error { diff --git a/vendor/github.com/valyala/fasthttp/compress.go b/vendor/github.com/valyala/fasthttp/compress.go index 895dc51c..19bb4352 100644 --- a/vendor/github.com/valyala/fasthttp/compress.go +++ b/vendor/github.com/valyala/fasthttp/compress.go @@ -141,7 +141,7 @@ var ( // - CompressDefaultCompression // - CompressHuffmanOnly func AppendGzipBytesLevel(dst, src []byte, level int) []byte { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} WriteGzipLevel(w, src, level) //nolint:errcheck return w.b } @@ -212,7 +212,7 @@ func AppendGzipBytes(dst, src []byte) []byte { // WriteGunzip writes ungzipped p to w and returns the number of uncompressed // bytes written to w. func WriteGunzip(w io.Writer, p []byte) (int, error) { - r := &byteSliceReader{p} + r := &byteSliceReader{b: p} zr, err := acquireGzipReader(r) if err != nil { return 0, err @@ -228,7 +228,7 @@ func WriteGunzip(w io.Writer, p []byte) (int, error) { // AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst. func AppendGunzipBytes(dst, src []byte) ([]byte, error) { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} _, err := WriteGunzip(w, src) return w.b, err } @@ -244,7 +244,7 @@ func AppendGunzipBytes(dst, src []byte) ([]byte, error) { // - CompressDefaultCompression // - CompressHuffmanOnly func AppendDeflateBytesLevel(dst, src []byte, level int) []byte { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} WriteDeflateLevel(w, src, level) //nolint:errcheck return w.b } @@ -321,7 +321,7 @@ func AppendDeflateBytes(dst, src []byte) []byte { // WriteInflate writes inflated p to w and returns the number of uncompressed // bytes written to w. func WriteInflate(w io.Writer, p []byte) (int, error) { - r := &byteSliceReader{p} + r := &byteSliceReader{b: p} zr, err := acquireFlateReader(r) if err != nil { return 0, err @@ -337,7 +337,7 @@ func WriteInflate(w io.Writer, p []byte) (int, error) { // AppendInflateBytes appends inflated src to dst and returns the resulting dst. func AppendInflateBytes(dst, src []byte) ([]byte, error) { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} _, err := WriteInflate(w, src) return w.b, err } diff --git a/vendor/github.com/valyala/fasthttp/cookie.go b/vendor/github.com/valyala/fasthttp/cookie.go index e99ea675..3b1fe6b9 100644 --- a/vendor/github.com/valyala/fasthttp/cookie.go +++ b/vendor/github.com/valyala/fasthttp/cookie.go @@ -67,20 +67,22 @@ var cookiePool = &sync.Pool{ type Cookie struct { noCopy noCopy + expire time.Time + key []byte value []byte - expire time.Time - maxAge int domain []byte path []byte + bufK []byte + bufV []byte + + maxAge int + + sameSite CookieSameSite httpOnly bool secure bool - sameSite CookieSameSite partitioned bool - - bufKV argsKV - buf []byte } // CopyTo copies src cookie to c. @@ -154,14 +156,14 @@ func (c *Cookie) Path() []byte { // SetPath sets cookie path. func (c *Cookie) SetPath(path string) { - c.buf = append(c.buf[:0], path...) - c.path = normalizePath(c.path, c.buf) + c.bufK = append(c.bufK[:0], path...) + c.path = normalizePath(c.path, c.bufK) } // SetPathBytes sets cookie path. func (c *Cookie) SetPathBytes(path []byte) { - c.buf = append(c.buf[:0], path...) - c.path = normalizePath(c.path, c.buf) + c.bufK = append(c.bufK[:0], path...) + c.path = normalizePath(c.path, c.bufK) } // Domain returns cookie domain. @@ -282,11 +284,11 @@ func (c *Cookie) AppendBytes(dst []byte) []byte { dst = append(dst, '=') dst = AppendUint(dst, c.maxAge) } else if !c.expire.IsZero() { - c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire) + c.bufV = AppendHTTPDate(c.bufV[:0], c.expire) dst = append(dst, ';', ' ') dst = append(dst, strCookieExpires...) dst = append(dst, '=') - dst = append(dst, c.bufKV.value...) + dst = append(dst, c.bufV...) } if len(c.domain) > 0 { dst = appendCookiePart(dst, strCookieDomain, c.domain) @@ -334,8 +336,8 @@ func (c *Cookie) AppendBytes(dst []byte) []byte { // The returned value is valid until the Cookie reused or released (ReleaseCookie). // Do not store references to the returned value. Make copies instead. func (c *Cookie) Cookie() []byte { - c.buf = c.AppendBytes(c.buf[:0]) - return c.buf + c.bufK = c.AppendBytes(c.bufK[:0]) + return c.bufK } // String returns cookie representation. @@ -355,8 +357,8 @@ var errNoCookies = errors.New("no cookies found") // Parse parses Set-Cookie header. func (c *Cookie) Parse(src string) error { - c.buf = append(c.buf[:0], src...) - return c.ParseBytes(c.buf) + c.bufK = append(c.bufK[:0], src...) + return c.ParseBytes(c.bufK) } // ParseBytes parses Set-Cookie header. @@ -366,21 +368,20 @@ func (c *Cookie) ParseBytes(src []byte) error { var s cookieScanner s.b = src - kv := &c.bufKV - if !s.next(kv) { + if !s.next(&c.bufK, &c.bufV) { return errNoCookies } - c.key = append(c.key, kv.key...) - c.value = append(c.value, kv.value...) + c.key = append(c.key, c.bufK...) + c.value = append(c.value, c.bufV...) - for s.next(kv) { - if len(kv.key) != 0 { + for s.next(&c.bufK, &c.bufV) { + if len(c.bufK) != 0 { // Case insensitive switch on first char - switch kv.key[0] | 0x20 { + switch c.bufK[0] | 0x20 { case 'm': - if caseInsensitiveCompare(strCookieMaxAge, kv.key) { - maxAge, err := ParseUint(kv.value) + if caseInsensitiveCompare(strCookieMaxAge, c.bufK) { + maxAge, err := ParseUint(c.bufV) if err != nil { return err } @@ -388,8 +389,8 @@ func (c *Cookie) ParseBytes(src []byte) error { } case 'e': // "expires" - if caseInsensitiveCompare(strCookieExpires, kv.key) { - v := b2s(kv.value) + if caseInsensitiveCompare(strCookieExpires, c.bufK) { + v := b2s(c.bufV) // Try the same two formats as net/http // See: https://github.com/golang/go/blob/00379be17e63a5b75b3237819392d2dc3b313a27/src/net/http/cookie.go#L133-L135 exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC) @@ -403,52 +404,52 @@ func (c *Cookie) ParseBytes(src []byte) error { } case 'd': // "domain" - if caseInsensitiveCompare(strCookieDomain, kv.key) { - c.domain = append(c.domain, kv.value...) + if caseInsensitiveCompare(strCookieDomain, c.bufK) { + c.domain = append(c.domain, c.bufV...) } case 'p': // "path" - if caseInsensitiveCompare(strCookiePath, kv.key) { - c.path = append(c.path, kv.value...) + if caseInsensitiveCompare(strCookiePath, c.bufK) { + c.path = append(c.path, c.bufV...) } case 's': // "samesite" - if caseInsensitiveCompare(strCookieSameSite, kv.key) { - if len(kv.value) > 0 { + if caseInsensitiveCompare(strCookieSameSite, c.bufK) { + if len(c.bufV) > 0 { // Case insensitive switch on first char - switch kv.value[0] | 0x20 { + switch c.bufV[0] | 0x20 { case 'l': // "lax" - if caseInsensitiveCompare(strCookieSameSiteLax, kv.value) { + if caseInsensitiveCompare(strCookieSameSiteLax, c.bufV) { c.sameSite = CookieSameSiteLaxMode } case 's': // "strict" - if caseInsensitiveCompare(strCookieSameSiteStrict, kv.value) { + if caseInsensitiveCompare(strCookieSameSiteStrict, c.bufV) { c.sameSite = CookieSameSiteStrictMode } case 'n': // "none" - if caseInsensitiveCompare(strCookieSameSiteNone, kv.value) { + if caseInsensitiveCompare(strCookieSameSiteNone, c.bufV) { c.sameSite = CookieSameSiteNoneMode } } } } } - } else if len(kv.value) != 0 { + } else if len(c.bufV) != 0 { // Case insensitive switch on first char - switch kv.value[0] | 0x20 { + switch c.bufV[0] | 0x20 { case 'h': // "httponly" - if caseInsensitiveCompare(strCookieHTTPOnly, kv.value) { + if caseInsensitiveCompare(strCookieHTTPOnly, c.bufV) { c.httpOnly = true } case 's': // "secure" - if caseInsensitiveCompare(strCookieSecure, kv.value) { + if caseInsensitiveCompare(strCookieSecure, c.bufV) { c.secure = true - } else if caseInsensitiveCompare(strCookieSameSite, kv.value) { + } else if caseInsensitiveCompare(strCookieSameSite, c.bufV) { c.sameSite = CookieSameSiteDefaultMode } case 'p': // "partitioned" - if caseInsensitiveCompare(strCookiePartitioned, kv.value) { + if caseInsensitiveCompare(strCookiePartitioned, c.bufV) { c.partitioned = true } } @@ -505,7 +506,7 @@ func parseRequestCookies(cookies []argsKV, src []byte) []argsKV { s.b = src var kv *argsKV cookies, kv = allocArg(cookies) - for s.next(kv) { + for s.next(&kv.key, &kv.value) { if len(kv.key) > 0 || len(kv.value) > 0 { cookies, kv = allocArg(cookies) } @@ -517,7 +518,7 @@ type cookieScanner struct { b []byte } -func (s *cookieScanner) next(kv *argsKV) bool { +func (s *cookieScanner) next(key, val *[]byte) bool { b := s.b if len(b) == 0 { return false @@ -530,23 +531,23 @@ func (s *cookieScanner) next(kv *argsKV) bool { case '=': if isKey { isKey = false - kv.key = decodeCookieArg(kv.key, b[:i], false) + *key = decodeCookieArg(*key, b[:i], false) k = i + 1 } case ';': if isKey { - kv.key = kv.key[:0] + *key = (*key)[:0] } - kv.value = decodeCookieArg(kv.value, b[k:i], true) + *val = decodeCookieArg(*val, b[k:i], true) s.b = b[i+1:] return true } } if isKey { - kv.key = kv.key[:0] + *key = (*key)[:0] } - kv.value = decodeCookieArg(kv.value, b[k:], true) + *val = decodeCookieArg(*val, b[k:], true) s.b = b[len(b):] return true } diff --git a/vendor/github.com/valyala/fasthttp/fasthttpadaptor/adaptor.go b/vendor/github.com/valyala/fasthttp/fasthttpadaptor/adaptor.go index 5e856fb5..7153c6da 100644 --- a/vendor/github.com/valyala/fasthttp/fasthttpadaptor/adaptor.go +++ b/vendor/github.com/valyala/fasthttp/fasthttpadaptor/adaptor.go @@ -88,10 +88,10 @@ func NewFastHTTPHandler(h http.Handler) fasthttp.RequestHandler { } type netHTTPResponseWriter struct { - statusCode int - h http.Header w io.Writer + h http.Header ctx *fasthttp.RequestCtx + statusCode int } func (w *netHTTPResponseWriter) StatusCode() int { diff --git a/vendor/github.com/valyala/fasthttp/fasthttpadaptor/request.go b/vendor/github.com/valyala/fasthttp/fasthttpadaptor/request.go index 62a85234..35266664 100644 --- a/vendor/github.com/valyala/fasthttp/fasthttpadaptor/request.go +++ b/vendor/github.com/valyala/fasthttp/fasthttpadaptor/request.go @@ -5,6 +5,7 @@ import ( "io" "net/http" "net/url" + "strings" "github.com/valyala/fasthttp" ) @@ -58,6 +59,9 @@ func ConvertRequest(ctx *fasthttp.RequestCtx, r *http.Request, forServer bool) e case "Transfer-Encoding": r.TransferEncoding = append(r.TransferEncoding, sv) default: + if sk == fasthttp.HeaderCookie { + sv = strings.Clone(sv) + } r.Header.Set(sk, sv) } }) diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go index 1aaa8e1b..2df46640 100644 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go @@ -14,11 +14,11 @@ var ErrInmemoryListenerClosed = errors.New("InmemoryListener is already closed: // It may be used either for fast in-process client<->server communications // without network stack overhead or for client<->server tests. type InmemoryListener struct { - lock sync.Mutex - closed bool - conns chan acceptConn listenerAddr net.Addr + conns chan acceptConn addrLock sync.RWMutex + lock sync.Mutex + closed bool } type acceptConn struct { @@ -117,7 +117,7 @@ func (ln *InmemoryListener) DialWithLocalAddr(local net.Addr) (net.Conn, error) ln.lock.Lock() accepted := make(chan struct{}) if !ln.closed { - ln.conns <- acceptConn{sConn, accepted} + ln.conns <- acceptConn{conn: sConn, accepted: accepted} // Wait until the connection has been accepted. <-accepted } else { diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go index d401fe92..f4466f55 100644 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go @@ -42,9 +42,9 @@ func NewPipeConns() *PipeConns { // // PipeConns is NOT safe for concurrent use by multiple goroutines! type PipeConns struct { + stopCh chan struct{} c1 pipeConn c2 pipeConn - stopCh chan struct{} stopChLock sync.Mutex } @@ -93,8 +93,9 @@ func (pc *PipeConns) Close() error { } type pipeConn struct { - b *byteBuffer - bb []byte + localAddr net.Addr + remoteAddr net.Addr + b *byteBuffer rCh chan *byteBuffer wCh chan *byteBuffer @@ -106,11 +107,11 @@ type pipeConn struct { readDeadlineCh <-chan time.Time writeDeadlineCh <-chan time.Time - readDeadlineChLock sync.Mutex + bb []byte - localAddr net.Addr - remoteAddr net.Addr - addrLock sync.RWMutex + addrLock sync.RWMutex + + readDeadlineChLock sync.Mutex } func (c *pipeConn) Write(p []byte) (int, error) { diff --git a/vendor/github.com/valyala/fasthttp/fs.go b/vendor/github.com/valyala/fasthttp/fs.go index 7738ca2a..22f82178 100644 --- a/vendor/github.com/valyala/fasthttp/fs.go +++ b/vendor/github.com/valyala/fasthttp/fs.go @@ -256,14 +256,46 @@ type FS struct { // FS is filesystem to serve files from. eg: embed.FS os.DirFS FS fs.FS + // Path rewriting function. + // + // By default request path is not modified. + PathRewrite PathRewriteFunc + + // PathNotFound fires when file is not found in filesystem + // this functions tries to replace "Cannot open requested path" + // server response giving to the programmer the control of server flow. + // + // By default PathNotFound returns + // "Cannot open requested path" + PathNotFound RequestHandler + + // Suffixes list to add to compressedFileSuffix depending on encoding + // + // This value has sense only if Compress is set. + // + // FSCompressedFileSuffixes is used by default. + CompressedFileSuffixes map[string]string + + // If CleanStop is set, the channel can be closed to stop the cleanup handlers + // for the FS RequestHandlers created with NewRequestHandler. + // NEVER close this channel while the handler is still being used! + CleanStop chan struct{} + + h RequestHandler + // Path to the root directory to serve files from. Root string - // AllowEmptyRoot controls what happens when Root is empty. When false (default) it will default to the - // current working directory. An empty root is mostly useful when you want to use absolute paths - // on windows that are on different filesystems. On linux setting your Root to "/" already allows you to use - // absolute paths on any filesystem. - AllowEmptyRoot bool + // Path to the compressed root directory to serve files from. If this value + // is empty, Root is used. + CompressRoot string + + // Suffix to add to the name of cached compressed file. + // + // This value has sense only if Compress is set. + // + // FSCompressedFileSuffix is used by default. + CompressedFileSuffix string // List of index file names to try opening during directory access. // @@ -276,6 +308,26 @@ type FS struct { // By default the list is empty. IndexNames []string + // Expiration duration for inactive file handlers. + // + // FSHandlerCacheDuration is used by default. + CacheDuration time.Duration + + once sync.Once + + // AllowEmptyRoot controls what happens when Root is empty. When false (default) it will default to the + // current working directory. An empty root is mostly useful when you want to use absolute paths + // on windows that are on different filesystems. On linux setting your Root to "/" already allows you to use + // absolute paths on any filesystem. + AllowEmptyRoot bool + + // Uses brotli encoding and fallbacks to gzip in responses if set to true, uses gzip if set to false. + // + // This value has sense only if Compress is set. + // + // Brotli encoding is disabled by default. + CompressBrotli bool + // Index pages for directories without files matching IndexNames // are automatically generated if set. // @@ -298,66 +350,15 @@ type FS struct { // Transparent compression is disabled by default. Compress bool - // Uses brotli encoding and fallbacks to gzip in responses if set to true, uses gzip if set to false. - // - // This value has sense only if Compress is set. - // - // Brotli encoding is disabled by default. - CompressBrotli bool - - // Path to the compressed root directory to serve files from. If this value - // is empty, Root is used. - CompressRoot string - // Enables byte range requests if set to true. // // Byte range requests are disabled by default. AcceptByteRange bool - // Path rewriting function. - // - // By default request path is not modified. - PathRewrite PathRewriteFunc - - // PathNotFound fires when file is not found in filesystem - // this functions tries to replace "Cannot open requested path" - // server response giving to the programmer the control of server flow. - // - // By default PathNotFound returns - // "Cannot open requested path" - PathNotFound RequestHandler - // SkipCache if true, will cache no file handler. // // By default is false. SkipCache bool - - // Expiration duration for inactive file handlers. - // - // FSHandlerCacheDuration is used by default. - CacheDuration time.Duration - - // Suffix to add to the name of cached compressed file. - // - // This value has sense only if Compress is set. - // - // FSCompressedFileSuffix is used by default. - CompressedFileSuffix string - - // Suffixes list to add to compressedFileSuffix depending on encoding - // - // This value has sense only if Compress is set. - // - // FSCompressedFileSuffixes is used by default. - CompressedFileSuffixes map[string]string - - // If CleanStop is set, the channel can be closed to stop the cleanup handlers - // for the FS RequestHandlers created with NewRequestHandler. - // NEVER close this channel while the handler is still being used! - CleanStop chan struct{} - - once sync.Once - h RequestHandler } // FSCompressedFileSuffix is the suffix FS adds to the original file names @@ -502,40 +503,41 @@ func (fs *FS) initRequestHandler() { } type fsHandler struct { - filesystem fs.FS - root string - indexNames []string + smallFileReaderPool sync.Pool + filesystem fs.FS + + cacheManager cacheManager + pathRewrite PathRewriteFunc pathNotFound RequestHandler - generateIndexPages bool - compress bool - compressBrotli bool - compressRoot string - acceptByteRange bool compressedFileSuffixes map[string]string - cacheManager cacheManager - - smallFileReaderPool sync.Pool + root string + compressRoot string + indexNames []string + generateIndexPages bool + compress bool + compressBrotli bool + acceptByteRange bool } type fsFile struct { - h *fsHandler - f fs.File - filename string // fs.FileInfo.Name() return filename, isn't filepath. - dirIndex []byte - contentType string - contentLength int - compressed bool - - lastModified time.Time + lastModified time.Time + + t time.Time + f fs.File + h *fsHandler + filename string // fs.FileInfo.Name() return filename, isn't filepath. + contentType string + dirIndex []byte lastModifiedStr []byte - t time.Time - readersCount int + bigFiles []*bigFileReader + contentLength int + readersCount int - bigFiles []*bigFileReader bigFilesLock sync.Mutex + compressed bool } func (ff *fsFile) NewReader() (io.Reader, error) { @@ -817,6 +819,7 @@ func newCacheManager(fs *FS) cacheManager { cache: make(map[string]*fsFile), cacheBrotli: make(map[string]*fsFile), cacheGzip: make(map[string]*fsFile), + cacheZstd: make(map[string]*fsFile), } go instance.handleCleanCache(fs.CleanStop) @@ -845,10 +848,11 @@ func (*noopCacheManager) SetFileToCache(cacheKind CacheKind, path string, ff *fs } type inMemoryCacheManager struct { - cacheDuration time.Duration cache map[string]*fsFile cacheBrotli map[string]*fsFile cacheGzip map[string]*fsFile + cacheZstd map[string]*fsFile + cacheDuration time.Duration cacheLock sync.Mutex } @@ -867,6 +871,8 @@ func (cm *inMemoryCacheManager) getFsCache(cacheKind CacheKind) map[string]*fsFi fileCache = cm.cacheBrotli case gzipCacheKind: fileCache = cm.cacheGzip + case zstdCacheKind: + fileCache = cm.cacheZstd } return fileCache @@ -957,6 +963,7 @@ func (cm *inMemoryCacheManager) cleanCache(pendingFiles []*fsFile) []*fsFile { pendingFiles, filesToRelease = cleanCacheNolock(cm.cache, pendingFiles, filesToRelease, cm.cacheDuration) pendingFiles, filesToRelease = cleanCacheNolock(cm.cacheBrotli, pendingFiles, filesToRelease, cm.cacheDuration) pendingFiles, filesToRelease = cleanCacheNolock(cm.cacheGzip, pendingFiles, filesToRelease, cm.cacheDuration) + pendingFiles, filesToRelease = cleanCacheNolock(cm.cacheZstd, pendingFiles, filesToRelease, cm.cacheDuration) cm.cacheLock.Unlock() @@ -1015,7 +1022,6 @@ func (h *fsHandler) handleRequest(ctx *RequestCtx) { path = ctx.Path() } hasTrailingSlash := len(path) > 0 && path[len(path)-1] == '/' - path = stripTrailingSlashes(path) if n := bytes.IndexByte(path, 0); n >= 0 { ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path) @@ -1054,9 +1060,13 @@ func (h *fsHandler) handleRequest(ctx *RequestCtx) { } } - pathStr := string(path) + originalPathStr := string(path) + pathStr := originalPathStr + if hasTrailingSlash { + pathStr = originalPathStr[:len(originalPathStr)-1] + } - ff, ok := h.cacheManager.GetFileFromCache(fileCacheKind, pathStr) + ff, ok := h.cacheManager.GetFileFromCache(fileCacheKind, originalPathStr) if !ok { filePath := h.pathToFilePath(pathStr) @@ -1091,7 +1101,7 @@ func (h *fsHandler) handleRequest(ctx *RequestCtx) { return } - ff = h.cacheManager.SetFileToCache(fileCacheKind, pathStr, ff) + ff = h.cacheManager.SetFileToCache(fileCacheKind, originalPathStr, ff) } if !ctx.IfModifiedSince(ff.lastModified) { @@ -1396,7 +1406,7 @@ func (h *fsHandler) compressAndOpenFSFile(filePath, fileEncoding string) (*fsFil } if compressedFilePath != filePath { - if err := os.MkdirAll(filepath.Dir(compressedFilePath), os.ModePerm); err != nil { + if err := os.MkdirAll(filepath.Dir(compressedFilePath), 0o750); err != nil { return nil, err } } diff --git a/vendor/github.com/valyala/fasthttp/header.go b/vendor/github.com/valyala/fasthttp/header.go index b0262944..b6d8ffec 100644 --- a/vendor/github.com/valyala/fasthttp/header.go +++ b/vendor/github.com/valyala/fasthttp/header.go @@ -26,18 +26,9 @@ const ( type ResponseHeader struct { noCopy noCopy - disableNormalizing bool - noHTTP11 bool - connectionClose bool - noDefaultContentType bool - noDefaultDate bool - - statusCode int - statusMessage []byte - protocol []byte - contentLength int - contentLengthBytes []byte - secureErrorLogMessage bool + statusMessage []byte + protocol []byte + contentLengthBytes []byte contentType []byte contentEncoding []byte @@ -46,9 +37,20 @@ type ResponseHeader struct { h []argsKV trailer []argsKV - bufKV argsKV cookies []argsKV + bufK []byte + bufV []byte + + statusCode int + contentLength int + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + noDefaultContentType bool + noDefaultDate bool + secureErrorLogMessage bool } // RequestHeader represents HTTP request header. @@ -61,19 +63,7 @@ type ResponseHeader struct { type RequestHeader struct { noCopy noCopy - disableNormalizing bool - noHTTP11 bool - connectionClose bool - noDefaultContentType bool - disableSpecialHeader bool - - // These two fields have been moved close to other bool fields - // for reducing RequestHeader object size. - cookiesCollected bool - - contentLength int - contentLengthBytes []byte - secureErrorLogMessage bool + contentLengthBytes []byte method []byte requestURI []byte @@ -85,19 +75,34 @@ type RequestHeader struct { h []argsKV trailer []argsKV - bufKV argsKV cookies []argsKV // stores an immutable copy of headers as they were received from the // wire. rawHeaders []byte + bufK []byte + bufV []byte + + contentLength int + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + noDefaultContentType bool + disableSpecialHeader bool + + // These two fields have been moved close to other bool fields + // for reducing RequestHeader object size. + cookiesCollected bool + + secureErrorLogMessage bool } // SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength' // header. func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { - b := h.bufKV.value[:0] + b := h.bufV[:0] b = append(b, strBytes...) b = append(b, ' ') b = AppendUint(b, startPos) @@ -105,9 +110,9 @@ func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { b = AppendUint(b, endPos) b = append(b, '/') b = AppendUint(b, contentLength) - h.bufKV.value = b + h.bufV = b - h.setNonSpecial(strContentRange, h.bufKV.value) + h.setNonSpecial(strContentRange, h.bufV) } // SetByteRange sets 'Range: bytes=startPos-endPos' header. @@ -115,7 +120,7 @@ func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { // - If startPos is negative, then 'bytes=-startPos' value is set. // - If endPos is negative, then 'bytes=startPos-' value is set. func (h *RequestHeader) SetByteRange(startPos, endPos int) { - b := h.bufKV.value[:0] + b := h.bufV[:0] b = append(b, strBytes...) b = append(b, '=') if startPos >= 0 { @@ -127,9 +132,9 @@ func (h *RequestHeader) SetByteRange(startPos, endPos int) { if endPos >= 0 { b = AppendUint(b, endPos) } - h.bufKV.value = b + h.bufV = b - h.setNonSpecial(strRange, h.bufKV.value) + h.setNonSpecial(strRange, h.bufV) } // StatusCode returns response status code. @@ -170,8 +175,8 @@ func (h *ResponseHeader) SetProtocol(protocol []byte) { // SetLastModified sets 'Last-Modified' header to the given value. func (h *ResponseHeader) SetLastModified(t time.Time) { - h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t) - h.setNonSpecial(strLastModified, h.bufKV.value) + h.bufV = AppendHTTPDate(h.bufV[:0], t) + h.setNonSpecial(strLastModified, h.bufV) } // ConnectionClose returns true if 'Connection: close' header is set. @@ -408,30 +413,30 @@ func (h *RequestHeader) SetContentEncodingBytes(contentEncoding []byte) { // 'multipart/form-data; boundary=...' // where ... is substituted by the given boundary. func (h *RequestHeader) SetMultipartFormBoundary(boundary string) { - b := h.bufKV.value[:0] + b := h.bufV[:0] b = append(b, strMultipartFormData...) b = append(b, ';', ' ') b = append(b, strBoundary...) b = append(b, '=') b = append(b, boundary...) - h.bufKV.value = b + h.bufV = b - h.SetContentTypeBytes(h.bufKV.value) + h.SetContentTypeBytes(h.bufV) } // SetMultipartFormBoundaryBytes sets the following Content-Type: // 'multipart/form-data; boundary=...' // where ... is substituted by the given boundary. func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) { - b := h.bufKV.value[:0] + b := h.bufV[:0] b = append(b, strMultipartFormData...) b = append(b, ';', ' ') b = append(b, strBoundary...) b = append(b, '=') b = append(b, boundary...) - h.bufKV.value = b + h.bufV = b - h.SetContentTypeBytes(h.bufKV.value) + h.SetContentTypeBytes(h.bufV) } // SetTrailer sets header Trailer value for chunked response @@ -537,20 +542,26 @@ func (h *ResponseHeader) AddTrailerBytes(trailer []byte) error { err = ErrBadTrailer continue } - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.trailer = appendArgBytes(h.trailer, h.bufKV.key, nil, argsNoValue) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.trailer = appendArgBytes(h.trailer, h.bufK, nil, argsNoValue) } return err } -// validHeaderFieldByte returns true if c is a valid tchar as defined -// by section 5.6.2 of [RFC9110]. +// validHeaderFieldByte returns true if c valid header field byte +// as defined by RFC 7230. func validHeaderFieldByte(c byte) bool { return c < 128 && validHeaderFieldByteTable[c] == 1 } +// validHeaderValueByte returns true if c valid header value byte +// as defined by RFC 7230. +func validHeaderValueByte(c byte) bool { + return validHeaderValueByteTable[c] == 1 +} + // VisitHeaderParams calls f for each parameter in the given header bytes. // It stops processing when f returns false or an invalid parameter is found. // Parameter values may be quoted, in which case \ is treated as an escape @@ -879,9 +890,9 @@ func (h *RequestHeader) AddTrailerBytes(trailer []byte) error { err = ErrBadTrailer continue } - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.trailer = appendArgBytes(h.trailer, h.bufKV.key, nil, argsNoValue) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.trailer = appendArgBytes(h.trailer, h.bufK, nil, argsNoValue) } return err @@ -945,8 +956,8 @@ func (h *ResponseHeader) IsHTTP11() bool { // HasAcceptEncoding returns true if the header contains // the given Accept-Encoding value. func (h *RequestHeader) HasAcceptEncoding(acceptEncoding string) bool { - h.bufKV.value = append(h.bufKV.value[:0], acceptEncoding...) - return h.HasAcceptEncodingBytes(h.bufKV.value) + h.bufV = append(h.bufV[:0], acceptEncoding...) + return h.HasAcceptEncodingBytes(h.bufV) } // HasAcceptEncodingBytes returns true if the header contains @@ -1275,8 +1286,8 @@ func (h *RequestHeader) VisitAll(f func(key, value []byte)) { h.collectCookies() if len(h.cookies) > 0 { - h.bufKV.value = appendRequestCookieBytes(h.bufKV.value[:0], h.cookies) - f(strCookie, h.bufKV.value) + h.bufV = appendRequestCookieBytes(h.bufV[:0], h.cookies) + f(strCookie, h.bufV) } visitArgs(h.h, f) if h.ConnectionClose() { @@ -1304,15 +1315,15 @@ func (h *RequestHeader) VisitAllInOrder(f func(key, value []byte)) { // Del deletes header with the given key. func (h *ResponseHeader) Del(key string) { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.del(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + h.del(h.bufK) } // DelBytes deletes header with the given key. func (h *ResponseHeader) DelBytes(key []byte) { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.del(h.bufKV.key) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.del(h.bufK) } func (h *ResponseHeader) del(key []byte) { @@ -1338,15 +1349,15 @@ func (h *ResponseHeader) del(key []byte) { // Del deletes header with the given key. func (h *RequestHeader) Del(key string) { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.del(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + h.del(h.bufK) } // DelBytes deletes header with the given key. func (h *RequestHeader) DelBytes(key []byte) { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.del(h.bufKV.key) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.del(h.bufK) } func (h *RequestHeader) del(key []byte) { @@ -1554,8 +1565,8 @@ func (h *ResponseHeader) AddBytesKV(key, value []byte) { return } - k := getHeaderKeyBytes(&h.bufKV, b2s(key), h.disableNormalizing) - h.h = appendArgBytes(h.h, k, value, argsHasValue) + h.bufK = getHeaderKeyBytes(h.bufK, b2s(key), h.disableNormalizing) + h.h = appendArgBytes(h.h, h.bufK, value, argsHasValue) } // Set sets the given 'key: value' header. @@ -1565,8 +1576,8 @@ func (h *ResponseHeader) AddBytesKV(key, value []byte) { // // Use Add for setting multiple header values under the same key. func (h *ResponseHeader) Set(key, value string) { - initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) - h.SetCanonical(h.bufKV.key, h.bufKV.value) + h.bufK, h.bufV = initHeaderKV(h.bufK, h.bufV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufK, h.bufV) } // SetBytesK sets the given 'key: value' header. @@ -1576,8 +1587,8 @@ func (h *ResponseHeader) Set(key, value string) { // // Use AddBytesK for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesK(key []byte, value string) { - h.bufKV.value = append(h.bufKV.value[:0], value...) - h.SetBytesKV(key, h.bufKV.value) + h.bufV = append(h.bufV[:0], value...) + h.SetBytesKV(key, h.bufV) } // SetBytesV sets the given 'key: value' header. @@ -1587,8 +1598,8 @@ func (h *ResponseHeader) SetBytesK(key []byte, value string) { // // Use AddBytesV for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesV(key string, value []byte) { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.SetCanonical(k, value) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + h.SetCanonical(h.bufK, value) } // SetBytesKV sets the given 'key: value' header. @@ -1598,9 +1609,9 @@ func (h *ResponseHeader) SetBytesV(key string, value []byte) { // // Use AddBytesKV for setting multiple header values under the same key. func (h *ResponseHeader) SetBytesKV(key, value []byte) { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.SetCanonical(h.bufKV.key, value) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.SetCanonical(h.bufK, value) } // SetCanonical sets the given 'key: value' header assuming that @@ -1765,8 +1776,8 @@ func (h *RequestHeader) AddBytesKV(key, value []byte) { return } - k := getHeaderKeyBytes(&h.bufKV, b2s(key), h.disableNormalizing) - h.h = appendArgBytes(h.h, k, value, argsHasValue) + h.bufK = getHeaderKeyBytes(h.bufK, b2s(key), h.disableNormalizing) + h.h = appendArgBytes(h.h, h.bufK, value, argsHasValue) } // Set sets the given 'key: value' header. @@ -1776,8 +1787,8 @@ func (h *RequestHeader) AddBytesKV(key, value []byte) { // // Use Add for setting multiple header values under the same key. func (h *RequestHeader) Set(key, value string) { - initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) - h.SetCanonical(h.bufKV.key, h.bufKV.value) + h.bufK, h.bufV = initHeaderKV(h.bufK, h.bufV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufK, h.bufV) } // SetBytesK sets the given 'key: value' header. @@ -1787,8 +1798,8 @@ func (h *RequestHeader) Set(key, value string) { // // Use AddBytesK for setting multiple header values under the same key. func (h *RequestHeader) SetBytesK(key []byte, value string) { - h.bufKV.value = append(h.bufKV.value[:0], value...) - h.SetBytesKV(key, h.bufKV.value) + h.bufV = append(h.bufV[:0], value...) + h.SetBytesKV(key, h.bufV) } // SetBytesV sets the given 'key: value' header. @@ -1798,8 +1809,8 @@ func (h *RequestHeader) SetBytesK(key []byte, value string) { // // Use AddBytesV for setting multiple header values under the same key. func (h *RequestHeader) SetBytesV(key string, value []byte) { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.SetCanonical(k, value) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + h.SetCanonical(h.bufK, value) } // SetBytesKV sets the given 'key: value' header. @@ -1809,9 +1820,9 @@ func (h *RequestHeader) SetBytesV(key string, value []byte) { // // Use AddBytesKV for setting multiple header values under the same key. func (h *RequestHeader) SetBytesKV(key, value []byte) { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - h.SetCanonical(h.bufKV.key, value) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + h.SetCanonical(h.bufK, value) } // SetCanonical sets the given 'key: value' header assuming that @@ -1832,8 +1843,8 @@ func (h *RequestHeader) SetCanonical(key, value []byte) { // either though ReleaseResponse or your request handler returning. // Do not store references to the returned value. Make copies instead. func (h *ResponseHeader) Peek(key string) []byte { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - return h.peek(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + return h.peek(h.bufK) } // PeekBytes returns header value for the given key. @@ -1842,9 +1853,9 @@ func (h *ResponseHeader) Peek(key string) []byte { // either though ReleaseResponse or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *ResponseHeader) PeekBytes(key []byte) []byte { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - return h.peek(h.bufKV.key) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + return h.peek(h.bufK) } // Peek returns header value for the given key. @@ -1853,8 +1864,8 @@ func (h *ResponseHeader) PeekBytes(key []byte) []byte { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) Peek(key string) []byte { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - return h.peek(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + return h.peek(h.bufK) } // PeekBytes returns header value for the given key. @@ -1863,9 +1874,9 @@ func (h *RequestHeader) Peek(key string) []byte { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) PeekBytes(key []byte) []byte { - h.bufKV.key = append(h.bufKV.key[:0], key...) - normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) - return h.peek(h.bufKV.key) + h.bufK = append(h.bufK[:0], key...) + normalizeHeaderKey(h.bufK, h.disableNormalizing) + return h.peek(h.bufK) } func (h *ResponseHeader) peek(key []byte) []byte { @@ -1926,8 +1937,8 @@ func (h *RequestHeader) peek(key []byte) []byte { // Any future calls to the Peek* will modify the returned value. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) PeekAll(key string) [][]byte { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - return h.peekAll(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + return h.peekAll(h.bufK) } func (h *RequestHeader) peekAll(key []byte) [][]byte { @@ -1974,8 +1985,8 @@ func (h *RequestHeader) peekAll(key []byte) [][]byte { // Any future calls to the Peek* will modify the returned value. // Do not store references to returned value. Make copies instead. func (h *ResponseHeader) PeekAll(key string) [][]byte { - k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - return h.peekAll(k) + h.bufK = getHeaderKeyBytes(h.bufK, key, h.disableNormalizing) + return h.peekAll(h.bufK) } func (h *ResponseHeader) peekAll(key []byte) [][]byte { @@ -2323,7 +2334,7 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { // n == 1 on the first read for the request. if n == 1 { // We didn't read a single byte. - return ErrNothingRead{err} + return ErrNothingRead{error: err} } return fmt.Errorf("error when reading request headers: %w", err) @@ -2403,8 +2414,8 @@ func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *ResponseHeader) Header() []byte { - h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) - return h.bufKV.value + h.bufV = h.AppendBytes(h.bufV[:0]) + return h.bufV } // writeTrailer writes response trailer to w. @@ -2421,13 +2432,13 @@ func (h *ResponseHeader) writeTrailer(w *bufio.Writer) error { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *ResponseHeader) TrailerHeader() []byte { - h.bufKV.value = h.bufKV.value[:0] + h.bufV = h.bufV[:0] for _, t := range h.trailer { value := h.peek(t.key) - h.bufKV.value = appendHeaderLine(h.bufKV.value, t.key, value) + h.bufV = appendHeaderLine(h.bufV, t.key, value) } - h.bufKV.value = append(h.bufKV.value, strCRLF...) - return h.bufKV.value + h.bufV = append(h.bufV, strCRLF...) + return h.bufV } // String returns response header representation. @@ -2535,8 +2546,8 @@ func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) Header() []byte { - h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) - return h.bufKV.value + h.bufV = h.AppendBytes(h.bufV[:0]) + return h.bufV } // writeTrailer writes request trailer to w. @@ -2553,13 +2564,13 @@ func (h *RequestHeader) writeTrailer(w *bufio.Writer) error { // either though ReleaseRequest or your request handler returning. // Do not store references to returned value. Make copies instead. func (h *RequestHeader) TrailerHeader() []byte { - h.bufKV.value = h.bufKV.value[:0] + h.bufV = h.bufV[:0] for _, t := range h.trailer { value := h.peek(t.key) - h.bufKV.value = appendHeaderLine(h.bufKV.value, t.key, value) + h.bufV = appendHeaderLine(h.bufV, t.key, value) } - h.bufKV.value = append(h.bufKV.value, strCRLF...) - return h.bufKV.value + h.bufV = append(h.bufV, strCRLF...) + return h.bufV } // RawHeaders returns raw header key/value bytes. @@ -2849,6 +2860,15 @@ func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { return len(buf) - len(bNext), nil } +func isValidMethod(method []byte) bool { + for _, ch := range method { + if validMethodValueByteTable[ch] == 0 { + return false + } + } + return true +} + func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) { bNext := buf var b []byte @@ -2868,6 +2888,14 @@ func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) { return 0, fmt.Errorf("cannot find http request method in %q", buf) } h.method = append(h.method[:0], b[:n]...) + + if !isValidMethod(h.method) { + if h.secureErrorLogMessage { + return 0, errors.New("unsupported http request method") + } + return 0, fmt.Errorf("unsupported http request method %q in %q", h.method, buf) + } + b = b[n+1:] // parse requestURI @@ -2945,75 +2973,90 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { var s headerScanner s.b = buf s.disableNormalizing = h.disableNormalizing - var err error var kv *argsKV -outer: for s.next() { - if len(s.key) > 0 { - for _, ch := range s.key { - if !validHeaderFieldByte(ch) { - err = fmt.Errorf("invalid header key %q", s.key) - continue outer - } + if len(s.key) == 0 { + h.connectionClose = true + return 0, fmt.Errorf("invalid header key %q", s.key) + } + + for _, ch := range s.key { + if !validHeaderFieldByte(ch) { + h.connectionClose = true + return 0, fmt.Errorf("invalid header key %q", s.key) + } + } + for _, ch := range s.value { + if !validHeaderValueByte(ch) { + h.connectionClose = true + return 0, fmt.Errorf("invalid header value %q", s.value) } + } - switch s.key[0] | 0x20 { - case 'c': - if caseInsensitiveCompare(s.key, strContentType) { - h.contentType = append(h.contentType[:0], s.value...) - continue - } - if caseInsensitiveCompare(s.key, strContentEncoding) { - h.contentEncoding = append(h.contentEncoding[:0], s.value...) - continue - } - if caseInsensitiveCompare(s.key, strContentLength) { - if h.contentLength != -1 { - if h.contentLength, err = parseContentLength(s.value); err != nil { - h.contentLength = -2 - } else { - h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) - } - } - continue - } - if caseInsensitiveCompare(s.key, strConnection) { - if bytes.Equal(s.value, strClose) { + switch s.key[0] | 0x20 { + case 'c': + if caseInsensitiveCompare(s.key, strContentType) { + h.contentType = append(h.contentType[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strContentEncoding) { + h.contentEncoding = append(h.contentEncoding[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strContentLength) { + if h.contentLength != -1 { + var err error + h.contentLength, err = parseContentLength(s.value) + if err != nil { + h.contentLength = -2 h.connectionClose = true - } else { - h.connectionClose = false - h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + return 0, err } - continue - } - case 's': - if caseInsensitiveCompare(s.key, strServer) { - h.server = append(h.server[:0], s.value...) - continue + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) } - if caseInsensitiveCompare(s.key, strSetCookie) { - h.cookies, kv = allocArg(h.cookies) - kv.key = getCookieKey(kv.key, s.value) - kv.value = append(kv.value[:0], s.value...) - continue + continue + } + if caseInsensitiveCompare(s.key, strConnection) { + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } - case 't': - if caseInsensitiveCompare(s.key, strTransferEncoding) { - if len(s.value) > 0 && !bytes.Equal(s.value, strIdentity) { - h.contentLength = -1 - h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) - } - continue + continue + } + case 's': + if caseInsensitiveCompare(s.key, strServer) { + h.server = append(h.server[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strSetCookie) { + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, s.value) + kv.value = append(kv.value[:0], s.value...) + continue + } + case 't': + if caseInsensitiveCompare(s.key, strTransferEncoding) { + if len(s.value) > 0 && !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) } - if caseInsensitiveCompare(s.key, strTrailer) { - err = h.SetTrailerBytes(s.value) - continue + continue + } + if caseInsensitiveCompare(s.key, strTrailer) { + err := h.SetTrailerBytes(s.value) + if err != nil { + h.connectionClose = true + return 0, err } + continue } - h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } + if s.err != nil { h.connectionClose = true return 0, s.err @@ -3032,7 +3075,7 @@ outer: h.connectionClose = !hasHeaderValue(v, strKeepAlive) } - return len(buf) - len(s.b), err + return len(buf) - len(s.b), nil } func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { @@ -3043,103 +3086,109 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { var s headerScanner s.b = buf s.disableNormalizing = h.disableNormalizing - var err error -outer: for s.next() { - if len(s.key) > 0 { - for _, ch := range s.key { - if !validHeaderFieldByte(ch) { - err = fmt.Errorf("invalid header key %q", s.key) - continue outer - } + if len(s.key) == 0 { + h.connectionClose = true + return 0, fmt.Errorf("invalid header key %q", s.key) + } + + for _, ch := range s.key { + if !validHeaderFieldByte(ch) { + h.connectionClose = true + return 0, fmt.Errorf("invalid header key %q", s.key) } + } + for _, ch := range s.value { + if !validHeaderValueByte(ch) { + h.connectionClose = true + return 0, fmt.Errorf("invalid header value %q", s.value) + } + } + + if h.disableSpecialHeader { + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + continue + } - if h.disableSpecialHeader { - h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + switch s.key[0] | 0x20 { + case 'h': + if caseInsensitiveCompare(s.key, strHost) { + h.host = append(h.host[:0], s.value...) continue } - - switch s.key[0] | 0x20 { - case 'h': - if caseInsensitiveCompare(s.key, strHost) { - h.host = append(h.host[:0], s.value...) - continue - } - case 'u': - if caseInsensitiveCompare(s.key, strUserAgent) { - h.userAgent = append(h.userAgent[:0], s.value...) - continue - } - case 'c': - if caseInsensitiveCompare(s.key, strContentType) { - h.contentType = append(h.contentType[:0], s.value...) - continue - } - if caseInsensitiveCompare(s.key, strContentLength) { - if contentLengthSeen { - return 0, errors.New("duplicate Content-Length header") - } - contentLengthSeen = true - - if h.contentLength != -1 { - var nerr error - if h.contentLength, nerr = parseContentLength(s.value); nerr != nil { - if err == nil { - err = nerr - } - h.contentLength = -2 - } else { - h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) - } - } - continue + case 'u': + if caseInsensitiveCompare(s.key, strUserAgent) { + h.userAgent = append(h.userAgent[:0], s.value...) + continue + } + case 'c': + if caseInsensitiveCompare(s.key, strContentType) { + h.contentType = append(h.contentType[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strContentLength) { + if contentLengthSeen { + h.connectionClose = true + return 0, errors.New("duplicate Content-Length header") } - if caseInsensitiveCompare(s.key, strConnection) { - if bytes.Equal(s.value, strClose) { + contentLengthSeen = true + + if h.contentLength != -1 { + var err error + h.contentLength, err = parseContentLength(s.value) + if err != nil { + h.contentLength = -2 h.connectionClose = true - } else { - h.connectionClose = false - h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + return 0, err } - continue + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + continue + } + if caseInsensitiveCompare(s.key, strConnection) { + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } - case 't': - if caseInsensitiveCompare(s.key, strTransferEncoding) { - isIdentity := caseInsensitiveCompare(s.value, strIdentity) - isChunked := caseInsensitiveCompare(s.value, strChunked) - - if !isIdentity && !isChunked { - if h.secureErrorLogMessage { - return 0, errors.New("unsupported Transfer-Encoding") - } - return 0, fmt.Errorf("unsupported Transfer-Encoding: %q", s.value) + continue + } + case 't': + if caseInsensitiveCompare(s.key, strTransferEncoding) { + isIdentity := caseInsensitiveCompare(s.value, strIdentity) + isChunked := caseInsensitiveCompare(s.value, strChunked) + + if !isIdentity && !isChunked { + h.connectionClose = true + if h.secureErrorLogMessage { + return 0, errors.New("unsupported Transfer-Encoding") } + return 0, fmt.Errorf("unsupported Transfer-Encoding: %q", s.value) + } - if isChunked { - h.contentLength = -1 - h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) - } - continue + if isChunked { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) } - if caseInsensitiveCompare(s.key, strTrailer) { - if nerr := h.SetTrailerBytes(s.value); nerr != nil { - if err == nil { - err = nerr - } - } - continue + continue + } + if caseInsensitiveCompare(s.key, strTrailer) { + err := h.SetTrailerBytes(s.value) + if err != nil { + h.connectionClose = true + return 0, err } + continue } } h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } - if s.err != nil && err == nil { - err = s.err - } - if err != nil { + + if s.err != nil { h.connectionClose = true - return 0, err + return 0, s.err } if h.contentLength < 0 { @@ -3187,16 +3236,15 @@ func parseContentLength(b []byte) (int, error) { } type headerScanner struct { + err error + b []byte key []byte value []byte - err error // hLen stores header subslice len hLen int - disableNormalizing bool - // by checking whether the next line contains a colon or not to tell // it's a header entry or a multi line value of current header entry. // the side effect of this operation is that we know the index of the @@ -3205,7 +3253,8 @@ type headerScanner struct { nextColon int nextNewLine int - initialized bool + disableNormalizing bool + initialized bool } func (s *headerScanner) next() bool { @@ -3253,7 +3302,7 @@ func (s *headerScanner) next() bool { s.key = s.b[:n] normalizeHeaderKey(s.key, s.disableNormalizing) n++ - for len(s.b) > n && s.b[n] == ' ' { + for len(s.b) > n && (s.b[n] == ' ' || s.b[n] == '\t') { n++ // the newline index is a relative index, and lines below trimmed `s.b` by `n`, // so the relative newline index also shifted forward. it's safe to decrease @@ -3307,13 +3356,14 @@ func (s *headerScanner) next() bool { if n > 0 && s.value[n-1] == rChar { n-- } - for n > 0 && s.value[n-1] == ' ' { + for n > 0 && (s.value[n-1] == ' ' || s.value[n-1] == '\t') { n-- } s.value = s.value[:n] if isMultiLineValue { s.value, s.b, s.hLen = normalizeHeaderValue(s.value, oldB, s.hLen) } + return true } @@ -3371,17 +3421,18 @@ func nextLine(b []byte) ([]byte, []byte, error) { return b[:n], b[nNext+1:], nil } -func initHeaderKV(kv *argsKV, key, value string, disableNormalizing bool) { - kv.key = getHeaderKeyBytes(kv, key, disableNormalizing) +func initHeaderKV(bufK, bufV []byte, key, value string, disableNormalizing bool) ([]byte, []byte) { + bufK = getHeaderKeyBytes(bufK, key, disableNormalizing) // https://tools.ietf.org/html/rfc7230#section-3.2.4 - kv.value = append(kv.value[:0], value...) - kv.value = removeNewLines(kv.value) + bufV = append(bufV[:0], value...) + bufV = removeNewLines(bufV) + return bufK, bufV } -func getHeaderKeyBytes(kv *argsKV, key string, disableNormalizing bool) []byte { - kv.key = append(kv.key[:0], key...) - normalizeHeaderKey(kv.key, disableNormalizing) - return kv.key +func getHeaderKeyBytes(bufK []byte, key string, disableNormalizing bool) []byte { + bufK = append(bufK[:0], key...) + normalizeHeaderKey(bufK, disableNormalizing) + return bufK } func normalizeHeaderValue(ov, ob []byte, headerLength int) (nv, nb []byte, nhl int) { @@ -3392,6 +3443,7 @@ func normalizeHeaderValue(ov, ob []byte, headerLength int) (nv, nb []byte, nhl i } write := 0 shrunk := 0 + once := false lineStart := false for read := 0; read < length; read++ { c := ov[read] @@ -3400,10 +3452,17 @@ func normalizeHeaderValue(ov, ob []byte, headerLength int) (nv, nb []byte, nhl i shrunk++ if c == nChar { lineStart = true + once = false } continue - case lineStart && c == '\t': - c = ' ' + case lineStart && (c == '\t' || c == ' '): + if !once { + c = ' ' + once = true + } else { + shrunk++ + continue + } default: lineStart = false } diff --git a/vendor/github.com/valyala/fasthttp/headers.go b/vendor/github.com/valyala/fasthttp/headers.go index 4f916290..e06b7349 100644 --- a/vendor/github.com/valyala/fasthttp/headers.go +++ b/vendor/github.com/valyala/fasthttp/headers.go @@ -136,7 +136,7 @@ const ( // WebSockets. HeaderSecWebSocketAccept = "Sec-WebSocket-Accept" - HeaderSecWebSocketExtensions = "Sec-WebSocket-Extensions" /* #nosec G101 */ + HeaderSecWebSocketExtensions = "Sec-WebSocket-Extensions" // #nosec G101 HeaderSecWebSocketKey = "Sec-WebSocket-Key" HeaderSecWebSocketProtocol = "Sec-WebSocket-Protocol" HeaderSecWebSocketVersion = "Sec-WebSocket-Version" diff --git a/vendor/github.com/valyala/fasthttp/http.go b/vendor/github.com/valyala/fasthttp/http.go index 75d8b6ca..f8217014 100644 --- a/vendor/github.com/valyala/fasthttp/http.go +++ b/vendor/github.com/valyala/fasthttp/http.go @@ -38,21 +38,28 @@ func SetBodySizePoolLimit(reqBodyLimit, respBodyLimit int) { type Request struct { noCopy noCopy - // Request header. - // - // Copying Header by value is forbidden. Use pointer to Header instead. - Header RequestHeader - - uri URI - postArgs Args - bodyStream io.Reader w requestBodyWriter body *bytebufferpool.ByteBuffer - bodyRaw []byte multipartForm *multipart.Form multipartFormBoundary string + + postArgs Args + + bodyRaw []byte + + uri URI + + // Request header. + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header RequestHeader + + // Request timeout. Usually set by DoDeadline or DoTimeout + // if <= 0, means not set + timeout time.Duration + secureErrorLogMessage bool // Group bool members in order to reduce Request object size. @@ -65,10 +72,6 @@ type Request struct { // Client/HostClient shouldn't use this field but should depend on the uri.scheme instead. isTLS bool - // Request timeout. Usually set by DoDeadline or DoTimeout - // if <= 0, means not set - timeout time.Duration - // Use Host header (request.Header.SetHost) instead of the host from SetRequestURI, SetHost, or URI().SetHost UseHostHeader bool @@ -88,6 +91,17 @@ type Request struct { type Response struct { noCopy noCopy + bodyStream io.Reader + + // Remote TCPAddr from concurrently net.Conn. + raddr net.Addr + // Local TCPAddr from concurrently net.Conn. + laddr net.Addr + w responseBodyWriter + body *bytebufferpool.ByteBuffer + + bodyRaw []byte + // Response header. // // Copying Header by value is forbidden. Use pointer to Header instead. @@ -101,11 +115,6 @@ type Response struct { // Use SetBodyStream to set the body stream. StreamBody bool - bodyStream io.Reader - w responseBodyWriter - body *bytebufferpool.ByteBuffer - bodyRaw []byte - // Response.Read() skips reading body if set to true. // Use it for reading HEAD responses. // @@ -115,11 +124,6 @@ type Response struct { keepBodyBuffer bool secureErrorLogMessage bool - - // Remote TCPAddr from concurrently net.Conn. - raddr net.Addr - // Local TCPAddr from concurrently net.Conn. - laddr net.Addr } // SetHost sets host for the request. @@ -1435,19 +1439,14 @@ func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { if !resp.mustSkipBody() { err = resp.ReadBody(r, maxBodySize) if err != nil { - if isConnectionReset(err) { - return nil - } return err } } - if resp.Header.ContentLength() == -1 && !resp.StreamBody { + // A response without a body can't have trailers. + if resp.Header.ContentLength() == -1 && !resp.StreamBody && !resp.mustSkipBody() { err = resp.Header.ReadTrailer(r) if err != nil && err != io.EOF { - if isConnectionReset(err) { - return nil - } return err } } @@ -1596,10 +1595,10 @@ func (req *Request) Write(w *bufio.Writer) error { nl := len(uri.username) + len(uri.password) + 1 nb := nl + len(strBasicSpace) tl := nb + base64.StdEncoding.EncodedLen(nl) - if tl > cap(req.Header.bufKV.value) { - req.Header.bufKV.value = make([]byte, 0, tl) + if tl > cap(req.Header.bufV) { + req.Header.bufV = make([]byte, 0, tl) } - buf := req.Header.bufKV.value[:0] + buf := req.Header.bufV[:0] buf = append(buf, uri.username...) buf = append(buf, strColon...) buf = append(buf, uri.password...) @@ -2279,12 +2278,13 @@ func readBodyWithStreaming(r *bufio.Reader, contentLength, maxBodySize int, dst readN = 8 * 1024 } - if contentLength >= 0 && maxBodySize >= contentLength { - b, err = appendBodyFixedSize(r, dst, readN) - } else { - b, err = readBodyIdentity(r, readN, dst) - } - + // A fixed-length pre-read function should be used here; otherwise, + // it may read content beyond the request body into areas outside + // the br buffer. This could affect the handling of the next request + // in the br buffer, if there is one. The original two branches can + // be handled with this single branch. by the way, + // fix issue: https://github.com/valyala/fasthttp/issues/1816 + b, err = appendBodyFixedSize(r, dst, readN) if err != nil { return b, err } diff --git a/vendor/github.com/valyala/fasthttp/lbclient.go b/vendor/github.com/valyala/fasthttp/lbclient.go index a9a40a2b..690f4d0c 100644 --- a/vendor/github.com/valyala/fasthttp/lbclient.go +++ b/vendor/github.com/valyala/fasthttp/lbclient.go @@ -27,10 +27,6 @@ type BalancingClient interface { type LBClient struct { noCopy noCopy - // Clients must contain non-zero clients list. - // Incoming requests are balanced among these clients. - Clients []BalancingClient - // HealthCheck is a callback called after each request. // // The request, response and the error returned by the client @@ -42,15 +38,20 @@ type LBClient struct { // By default HealthCheck returns false if err != nil. HealthCheck func(req *Request, resp *Response, err error) bool + // Clients must contain non-zero clients list. + // Incoming requests are balanced among these clients. + Clients []BalancingClient + + cs []*lbClient + // Timeout is the request timeout used when calling LBClient.Do. // // DefaultLBClientTimeout is used by default. Timeout time.Duration - cs []*lbClient + mu sync.RWMutex once sync.Once - mu sync.RWMutex } // DefaultLBClientTimeout is the default request timeout used by LBClient @@ -138,7 +139,7 @@ func (cc *LBClient) get() *lbClient { minT := atomic.LoadUint64(&minC.total) for _, c := range cs[1:] { n := c.PendingRequests() - t := atomic.LoadUint64(&c.total) /* #nosec G601 */ + t := atomic.LoadUint64(&c.total) if n < minN || (n == minN && t < minT) { minC = c minN = n diff --git a/vendor/github.com/valyala/fasthttp/peripconn.go b/vendor/github.com/valyala/fasthttp/peripconn.go index 46bddbf8..d09aaf83 100644 --- a/vendor/github.com/valyala/fasthttp/peripconn.go +++ b/vendor/github.com/valyala/fasthttp/peripconn.go @@ -9,8 +9,8 @@ import ( type perIPConnCounter struct { perIPConnPool sync.Pool perIPTLSConnPool sync.Pool - lock sync.Mutex m map[uint32]int + lock sync.Mutex } func (cc *perIPConnCounter) Register(ip uint32) int { @@ -41,29 +41,31 @@ func (cc *perIPConnCounter) Unregister(ip uint32) { type perIPConn struct { net.Conn - ip uint32 perIPConnCounter *perIPConnCounter + + ip uint32 } type perIPTLSConn struct { *tls.Conn - ip uint32 perIPConnCounter *perIPConnCounter + + ip uint32 } func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) net.Conn { - if tlcConn, ok := conn.(*tls.Conn); ok { + if tlsConn, ok := conn.(*tls.Conn); ok { v := counter.perIPTLSConnPool.Get() if v == nil { return &perIPTLSConn{ perIPConnCounter: counter, - Conn: tlcConn, + Conn: tlsConn, ip: ip, } } - c := v.(*perIPConn) - c.Conn = conn + c := v.(*perIPTLSConn) + c.Conn = tlsConn c.ip = ip return c } diff --git a/vendor/github.com/valyala/fasthttp/round2_64.go b/vendor/github.com/valyala/fasthttp/round2_64.go index a05df232..05e88df2 100644 --- a/vendor/github.com/valyala/fasthttp/round2_64.go +++ b/vendor/github.com/valyala/fasthttp/round2_64.go @@ -12,12 +12,12 @@ func roundUpForSliceCap(n int) int { return n } - x := uint64(n - 1) + x := uint64(n - 1) // #nosec G115 x |= x >> 1 x |= x >> 2 x |= x >> 4 x |= x >> 8 x |= x >> 16 - return int(x + 1) + return int(x + 1) // #nosec G115 } diff --git a/vendor/github.com/valyala/fasthttp/server.go b/vendor/github.com/valyala/fasthttp/server.go index fe194a55..8cd6fef5 100644 --- a/vendor/github.com/valyala/fasthttp/server.go +++ b/vendor/github.com/valyala/fasthttp/server.go @@ -148,6 +148,18 @@ type ServeHandler func(c net.Conn) error type Server struct { noCopy noCopy + perIPConnCounter perIPConnCounter + + ctxPool sync.Pool + readerPool sync.Pool + writerPool sync.Pool + hijackConnPool sync.Pool + + // Logger, which is used by RequestCtx.Logger(). + // + // By default standard logger from log package is used. + Logger Logger + // Handler for processing incoming requests. // // Take into account that no `panic` recovery is done by `fasthttp` (thus any `panic` will take down the entire server). @@ -181,11 +193,43 @@ type Server struct { // like they are normal requests. ContinueHandler func(header *RequestHeader) bool + // ConnState specifies an optional callback function that is + // called when a client connection changes state. See the + // ConnState type and associated constants for details. + ConnState func(net.Conn, ConnState) + + // TLSConfig optionally provides a TLS configuration for use + // by ServeTLS, ServeTLSEmbed, ListenAndServeTLS, ListenAndServeTLSEmbed, + // AppendCert, AppendCertEmbed and NextProto. + // + // Note that this value is cloned by ServeTLS, ServeTLSEmbed, ListenAndServeTLS + // and ListenAndServeTLSEmbed, so it's not possible to modify the configuration + // with methods like tls.Config.SetSessionTicketKeys. + // To use SetSessionTicketKeys, use Server.Serve with a TLS Listener + // instead. + TLSConfig *tls.Config + + // FormValueFunc, which is used by RequestCtx.FormValue and support for customizing + // the behaviour of the RequestCtx.FormValue function. + // + // NetHttpFormValueFunc gives a FormValueFunc func implementation that is consistent with net/http. + FormValueFunc FormValueFunc + + nextProtos map[string]ServeHandler + + concurrencyCh chan struct{} + + idleConns map[net.Conn]time.Time + done chan struct{} + // Server name for sending in response headers. // // Default server name is used if left blank. Name string + // We need to know our listeners and idle connections so we can close them in Shutdown(). + ln []net.Listener + // The maximum number of concurrent connections the server may serve. // // DefaultConcurrency is used if not set. @@ -262,6 +306,21 @@ type Server struct { // Request body size is limited by DefaultMaxRequestBodySize by default. MaxRequestBodySize int + // SleepWhenConcurrencyLimitsExceeded is a duration to be slept of if + // the concurrency limit in exceeded (default [when is 0]: don't sleep + // and accept new connections immediately). + SleepWhenConcurrencyLimitsExceeded time.Duration + + idleConnsMu sync.Mutex + + mu sync.Mutex + + concurrency uint32 + open int32 + stop int32 + + rejectedRequestsCount uint32 + // Whether to disable keep-alive connections. // // The server will close all the incoming connections after sending @@ -340,11 +399,6 @@ type Server struct { // * cONTENT-lenGTH -> Content-Length DisableHeaderNamesNormalizing bool - // SleepWhenConcurrencyLimitsExceeded is a duration to be slept of if - // the concurrency limit in exceeded (default [when is 0]: don't sleep - // and accept new connections immediately). - SleepWhenConcurrencyLimitsExceeded time.Duration - // NoDefaultServerHeader, when set to true, causes the default Server header // to be excluded from the Response. // @@ -382,57 +436,6 @@ type Server struct { // and calls the handler sooner when given body is // larger than the current limit. StreamRequestBody bool - - // ConnState specifies an optional callback function that is - // called when a client connection changes state. See the - // ConnState type and associated constants for details. - ConnState func(net.Conn, ConnState) - - // Logger, which is used by RequestCtx.Logger(). - // - // By default standard logger from log package is used. - Logger Logger - - // TLSConfig optionally provides a TLS configuration for use - // by ServeTLS, ServeTLSEmbed, ListenAndServeTLS, ListenAndServeTLSEmbed, - // AppendCert, AppendCertEmbed and NextProto. - // - // Note that this value is cloned by ServeTLS, ServeTLSEmbed, ListenAndServeTLS - // and ListenAndServeTLSEmbed, so it's not possible to modify the configuration - // with methods like tls.Config.SetSessionTicketKeys. - // To use SetSessionTicketKeys, use Server.Serve with a TLS Listener - // instead. - TLSConfig *tls.Config - - // FormValueFunc, which is used by RequestCtx.FormValue and support for customizing - // the behaviour of the RequestCtx.FormValue function. - // - // NetHttpFormValueFunc gives a FormValueFunc func implementation that is consistent with net/http. - FormValueFunc FormValueFunc - - nextProtos map[string]ServeHandler - - concurrency uint32 - concurrencyCh chan struct{} - perIPConnCounter perIPConnCounter - - ctxPool sync.Pool - readerPool sync.Pool - writerPool sync.Pool - hijackConnPool sync.Pool - - // We need to know our listeners and idle connections so we can close them in Shutdown(). - ln []net.Listener - - idleConns map[net.Conn]time.Time - idleConnsMu sync.Mutex - - mu sync.Mutex - open int32 - stop int32 - done chan struct{} - - rejectedRequestsCount uint32 } // TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout @@ -585,37 +588,39 @@ func CompressHandlerBrotliLevel(h RequestHandler, brotliLevel, otherLevel int) R type RequestCtx struct { noCopy noCopy - // Incoming request. - // - // Copying Request by value is forbidden. Use pointer to Request instead. - Request Request - // Outgoing response. // // Copying Response by value is forbidden. Use pointer to Response instead. Response Response - userValues userData - - connID uint64 - connRequestNum uint64 - connTime time.Time - remoteAddr net.Addr + connTime time.Time time time.Time - logger ctxLogger - s *Server - c net.Conn - fbr firstByteReader + logger ctxLogger + remoteAddr net.Addr + + c net.Conn + s *Server timeoutResponse *Response timeoutCh chan struct{} timeoutTimer *time.Timer - hijackHandler HijackHandler + hijackHandler HijackHandler + formValueFunc FormValueFunc + fbr firstByteReader + + userValues userData + + // Incoming request. + // + // Copying Request by value is forbidden. Use pointer to Request instead. + Request Request + + connID uint64 + connRequestNum uint64 hijackNoResponse bool - formValueFunc FormValueFunc } // HijackHandler must process the hijacked connection c. @@ -1592,14 +1597,14 @@ func (s *Server) NextProto(key string, nph ServeHandler) { func (s *Server) getNextProto(c net.Conn) (proto string, err error) { if tlsConn, ok := c.(connTLSer); ok { if s.ReadTimeout > 0 { - if err := c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil { - panic(fmt.Sprintf("BUG: error in SetReadDeadline(%v): %v", s.ReadTimeout, err)) + if err = c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil { + return } } if s.WriteTimeout > 0 { - if err := c.SetWriteDeadline(time.Now().Add(s.WriteTimeout)); err != nil { - panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%v): %v", s.WriteTimeout, err)) + if err = c.SetWriteDeadline(time.Now().Add(s.WriteTimeout)); err != nil { + return } } @@ -2029,8 +2034,8 @@ func (s *Server) ServeConn(c net.Conn) error { c = pic } - n := atomic.AddUint32(&s.concurrency, 1) - if n > uint32(s.getConcurrency()) { + n := int(atomic.AddUint32(&s.concurrency, 1)) // #nosec G115 + if n > s.getConcurrency() { atomic.AddUint32(&s.concurrency, ^uint32(0)) s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded") c.Close() @@ -2132,8 +2137,8 @@ func (s *Server) serveConn(c net.Conn) (err error) { // Remove read or write deadlines that might have previously been set. // The next handler is responsible for setting its own deadlines. if s.ReadTimeout > 0 || s.WriteTimeout > 0 { - if err := c.SetDeadline(zeroTime); err != nil { - panic(fmt.Sprintf("BUG: error in SetDeadline(zeroTime): %v", err)) + if err = c.SetDeadline(zeroTime); err != nil { + return } } @@ -2172,7 +2177,7 @@ func (s *Server) serveConn(c net.Conn) (err error) { // If this is a keep-alive connection set the idle timeout. if connRequestNum > 1 { if d := s.idleTimeout(); d > 0 { - if err := c.SetReadDeadline(time.Now().Add(d)); err != nil { + if err = c.SetReadDeadline(time.Now().Add(d)); err != nil { break } } @@ -2192,7 +2197,7 @@ func (s *Server) serveConn(c net.Conn) (err error) { // If reading from a keep-alive connection returns nothing it means // the connection was closed (either timeout or from the other side). if err != io.EOF { - err = ErrNothingRead{err} + err = ErrNothingRead{error: err} } } } @@ -2216,13 +2221,13 @@ func (s *Server) serveConn(c net.Conn) (err error) { s.setState(c, StateActive) if s.ReadTimeout > 0 { - if err := c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil { + if err = c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil { break } } else if s.IdleTimeout > 0 && connRequestNum > 1 { // If this was an idle connection and the server has an IdleTimeout but // no ReadTimeout then we should remove the ReadTimeout. - if err := c.SetReadDeadline(zeroTime); err != nil { + if err = c.SetReadDeadline(zeroTime); err != nil { break } } @@ -2256,8 +2261,8 @@ func (s *Server) serveConn(c net.Conn) (err error) { reqConf := onHdrRecv(&ctx.Request.Header) if reqConf.ReadTimeout > 0 { deadline := time.Now().Add(reqConf.ReadTimeout) - if err := c.SetReadDeadline(deadline); err != nil { - panic(fmt.Sprintf("BUG: error in SetReadDeadline(%v): %v", deadline, err)) + if err = c.SetReadDeadline(deadline); err != nil { + break } } switch { @@ -2281,8 +2286,9 @@ func (s *Server) serveConn(c net.Conn) (err error) { err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly, !s.DisablePreParseMultipartForm) } } - - if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { + // When StreamRequestBody is set to true, we cannot safely release br. + // For example, when using chunked encoding, it's possible that br has only read the request headers. + if (!s.StreamRequestBody && s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { releaseReader(s, br) br = nil } @@ -2353,7 +2359,7 @@ func (s *Server) serveConn(c net.Conn) (err error) { } else { err = ctx.Request.ContinueReadBody(br, maxRequestBodySize, !s.DisablePreParseMultipartForm) } - if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { + if (!s.StreamRequestBody && s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { releaseReader(s, br) br = nil } @@ -2396,20 +2402,20 @@ func (s *Server) serveConn(c net.Conn) (err error) { ctx.hijackNoResponse = false if writeTimeout > 0 { - if err := c.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil { - panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%v): %v", writeTimeout, err)) + if err = c.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil { + break } previousWriteTimeout = writeTimeout } else if previousWriteTimeout > 0 { // We don't want a write timeout but we previously set one, remove it. - if err := c.SetWriteDeadline(zeroTime); err != nil { - panic(fmt.Sprintf("BUG: error in SetWriteDeadline(zeroTime): %v", err)) + if err = c.SetWriteDeadline(zeroTime); err != nil { + break } previousWriteTimeout = 0 } connectionClose = connectionClose || - (s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn)) || + (s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn)) || // #nosec G115 ctx.Response.Header.ConnectionClose() || (s.CloseOnShutdown && atomic.LoadInt32(&s.stop) == 1) if connectionClose { @@ -2743,7 +2749,15 @@ func (ctx *RequestCtx) Deadline() (deadline time.Time, ok bool) { // Note: Because creating a new channel for every request is just too expensive, so // RequestCtx.s.done is only closed when the server is shutting down. func (ctx *RequestCtx) Done() <-chan struct{} { - return ctx.s.done + // fix use new variables to prevent panic caused by modifying the original done chan to nil. + done := ctx.s.done + + if done == nil { + done = make(chan struct{}, 1) + done <- struct{}{} + return done + } + return done } // Err returns a non-nil error value after Done is closed, @@ -2757,7 +2771,7 @@ func (ctx *RequestCtx) Done() <-chan struct{} { // RequestCtx.s.done is only closed when the server is shutting down. func (ctx *RequestCtx) Err() error { select { - case <-ctx.s.done: + case <-ctx.Done(): return context.Canceled default: return nil @@ -2959,7 +2973,7 @@ const ( StateClosed ) -var stateName = map[ConnState]string{ +var stateName = []string{ StateNew: "new", StateActive: "active", StateIdle: "idle", diff --git a/vendor/github.com/valyala/fasthttp/stackless/writer.go b/vendor/github.com/valyala/fasthttp/stackless/writer.go index 28dbedf5..2a6841ac 100644 --- a/vendor/github.com/valyala/fasthttp/stackless/writer.go +++ b/vendor/github.com/valyala/fasthttp/stackless/writer.go @@ -41,12 +41,13 @@ func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer { type writer struct { dstW io.Writer zw Writer - xw xWriter err error - n int + xw xWriter + + p []byte + n int - p []byte op op } diff --git a/vendor/github.com/valyala/fasthttp/tcp.go b/vendor/github.com/valyala/fasthttp/tcp.go deleted file mode 100644 index 7e804374..00000000 --- a/vendor/github.com/valyala/fasthttp/tcp.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows - -package fasthttp - -import ( - "errors" - "syscall" -) - -func isConnectionReset(err error) bool { - return errors.Is(err, syscall.ECONNRESET) -} diff --git a/vendor/github.com/valyala/fasthttp/tcp_windows.go b/vendor/github.com/valyala/fasthttp/tcp_windows.go deleted file mode 100644 index d71950b9..00000000 --- a/vendor/github.com/valyala/fasthttp/tcp_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package fasthttp - -import ( - "errors" - "syscall" -) - -func isConnectionReset(err error) bool { - return errors.Is(err, syscall.WSAECONNRESET) -} diff --git a/vendor/github.com/valyala/fasthttp/tcpdialer.go b/vendor/github.com/valyala/fasthttp/tcpdialer.go index e5f06bd0..9b648daf 100644 --- a/vendor/github.com/valyala/fasthttp/tcpdialer.go +++ b/vendor/github.com/valyala/fasthttp/tcpdialer.go @@ -126,19 +126,6 @@ type Resolver interface { // TCPDialer contains options to control a group of Dial calls. type TCPDialer struct { - // Concurrency controls the maximum number of concurrent Dials - // that can be performed using this object. - // Setting this to 0 means unlimited. - // - // WARNING: This can only be changed before the first Dial. - // Changes made after the first Dial will not affect anything. - Concurrency int - - // LocalAddr is the local address to use when dialing an - // address. - // If nil, a local address is automatically chosen. - LocalAddr *net.TCPAddr - // This may be used to override DNS resolving policy, like this: // var dialer = &fasthttp.TCPDialer{ // Resolver: &net.Resolver{ @@ -152,16 +139,30 @@ type TCPDialer struct { // } Resolver Resolver - // DisableDNSResolution may be used to disable DNS resolution - DisableDNSResolution bool - // DNSCacheDuration may be used to override the default DNS cache duration (DefaultDNSCacheDuration) - DNSCacheDuration time.Duration + // LocalAddr is the local address to use when dialing an + // address. + // If nil, a local address is automatically chosen. + LocalAddr *net.TCPAddr + + concurrencyCh chan struct{} tcpAddrsMap sync.Map - concurrencyCh chan struct{} + // Concurrency controls the maximum number of concurrent Dials + // that can be performed using this object. + // Setting this to 0 means unlimited. + // + // WARNING: This can only be changed before the first Dial. + // Changes made after the first Dial will not affect anything. + Concurrency int + + // DNSCacheDuration may be used to override the default DNS cache duration (DefaultDNSCacheDuration) + DNSCacheDuration time.Duration once sync.Once + + // DisableDNSResolution may be used to disable DNS resolution + DisableDNSResolution bool } // Dial dials the given TCP addr using tcp4. @@ -297,7 +298,7 @@ func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (ne return nil, err } var conn net.Conn - n := uint32(len(addrs)) + n := uint32(len(addrs)) // #nosec G115 for n > 0 { conn, err = d.tryDial(network, addrs[idx%n].String(), deadline, d.concurrencyCh) if err == nil { @@ -371,8 +372,8 @@ var ErrDialTimeout = errors.New("dialing to the given TCP address timed out") // upstream = dialErr.Upstream // 34.206.39.153:80 // } type ErrDialWithUpstream struct { - Upstream string wrapErr error + Upstream string } func (e *ErrDialWithUpstream) Error() string { @@ -395,11 +396,11 @@ func wrapDialWithUpstream(err error, upstream string) error { const DefaultDialTimeout = 3 * time.Second type tcpAddrEntry struct { - addrs []net.TCPAddr - addrsIdx uint32 - - pending int32 resolveTime time.Time + addrs []net.TCPAddr + addrsIdx uint32 + + pending int32 } // DefaultDNSCacheDuration is the duration for caching resolved TCP addresses diff --git a/vendor/github.com/valyala/fasthttp/uri.go b/vendor/github.com/valyala/fasthttp/uri.go index 19ceb694..7ddadfcb 100644 --- a/vendor/github.com/valyala/fasthttp/uri.go +++ b/vendor/github.com/valyala/fasthttp/uri.go @@ -42,6 +42,8 @@ var uriPool = &sync.Pool{ type URI struct { noCopy noCopy + queryArgs Args + pathOriginal []byte scheme []byte path []byte @@ -49,7 +51,11 @@ type URI struct { hash []byte host []byte - queryArgs Args + fullURI []byte + requestURI []byte + + username []byte + password []byte parsedQueryArgs bool // Path values are sent as-is without normalization. @@ -60,12 +66,6 @@ type URI struct { // By default path values are normalized, i.e. // extra slashes are removed, special characters are encoded. DisablePathNormalizing bool - - fullURI []byte - requestURI []byte - - username []byte - password []byte } // CopyTo copies uri contents to dst. diff --git a/vendor/github.com/valyala/fasthttp/userdata.go b/vendor/github.com/valyala/fasthttp/userdata.go index 38cca864..20366b63 100644 --- a/vendor/github.com/valyala/fasthttp/userdata.go +++ b/vendor/github.com/valyala/fasthttp/userdata.go @@ -77,6 +77,8 @@ func (d *userData) Reset() { if vc, ok := v.(io.Closer); ok { vc.Close() } + (*d)[i].value = nil + (*d)[i].key = nil } *d = (*d)[:0] } @@ -92,6 +94,7 @@ func (d *userData) Remove(key any) { if kv.key == key { n-- args[i], args[n] = args[n], args[i] + args[n].key = nil args[n].value = nil args = args[:n] *d = args diff --git a/vendor/github.com/valyala/fasthttp/workerpool.go b/vendor/github.com/valyala/fasthttp/workerpool.go index 235eec10..9ecd9481 100644 --- a/vendor/github.com/valyala/fasthttp/workerpool.go +++ b/vendor/github.com/valyala/fasthttp/workerpool.go @@ -15,29 +15,30 @@ import ( // // Such a scheme keeps CPU caches hot (in theory). type workerPool struct { + workerChanPool sync.Pool + + Logger Logger + // Function for serving server connections. // It must leave c unclosed. WorkerFunc ServeHandler - MaxWorkersCount int - - LogAllErrors bool + stopCh chan struct{} - MaxIdleWorkerDuration time.Duration + connState func(net.Conn, ConnState) - Logger Logger + ready []*workerChan - lock sync.Mutex - workersCount int - mustStop bool + MaxWorkersCount int - ready []*workerChan + MaxIdleWorkerDuration time.Duration - stopCh chan struct{} + workersCount int - workerChanPool sync.Pool + lock sync.Mutex - connState func(net.Conn, ConnState) + LogAllErrors bool + mustStop bool } type workerChan struct { diff --git a/vendor/github.com/valyala/fasthttp/zstd.go b/vendor/github.com/valyala/fasthttp/zstd.go index 226a1263..0ee2cca1 100644 --- a/vendor/github.com/valyala/fasthttp/zstd.go +++ b/vendor/github.com/valyala/fasthttp/zstd.go @@ -102,7 +102,7 @@ func releaseRealZstdWrter(zw *zstd.Encoder, level int) { } func AppendZstdBytesLevel(dst, src []byte, level int) []byte { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} WriteZstdLevel(w, src, level) //nolint:errcheck return w.b } @@ -155,7 +155,7 @@ func AppendZstdBytes(dst, src []byte) []byte { // WriteUnzstd writes unzstd p to w and returns the number of uncompressed // bytes written to w. func WriteUnzstd(w io.Writer, p []byte) (int, error) { - r := &byteSliceReader{p} + r := &byteSliceReader{b: p} zr, err := acquireZstdReader(r) if err != nil { return 0, err @@ -171,7 +171,7 @@ func WriteUnzstd(w io.Writer, p []byte) (int, error) { // AppendUnzstdBytes appends unzstd src to dst and returns the resulting dst. func AppendUnzstdBytes(dst, src []byte) ([]byte, error) { - w := &byteSliceWriter{dst} + w := &byteSliceWriter{b: dst} _, err := WriteUnzstd(w, src) return w.b, err } diff --git a/vendor/github.com/xyproto/files/README.md b/vendor/github.com/xyproto/files/README.md index f4df7703..a3a73f86 100644 --- a/vendor/github.com/xyproto/files/README.md +++ b/vendor/github.com/xyproto/files/README.md @@ -1,7 +1,37 @@ # files -Functions that has to do with files and/or directories. +Functions for querying files and paths. -* Version: 1.6.0 +## Function signatures + +``` +func Exists(path string) bool +func IsFile(path string) bool +func IsSymlink(path string) bool +func IsFileOrSymlink(path string) bool +func IsDir(path string) bool +func Which(executable string) string +func WhichCached(executable string) string +func PathHas(executable string) bool +func PathHasCached(executable string) bool +func BinDirectory(filename string) bool +func DataReadyOnStdin() bool +func IsBinary(filename string) bool +func FilterOutBinaryFiles(filenames []string) []string +func TimestampedFilename(filename string) string +func ShortPath(path string) string +func FileHas(path, what string) bool +func ReadString(filename string) string +func CanRead(filename string) bool +func Relative(path string) string +func Touch(filename string) error +func ExistsCached(path string) bool +func ClearCache() +func RemoveFile(path string) error +``` + +## General info + +* Version: 1.7.0 * License: BSD-3 -* Author: Alexander F. Rødseth +* Author: Alexander F. Rødseth >xyproto@archlinux.org< diff --git a/vendor/github.com/xyproto/files/files.go b/vendor/github.com/xyproto/files/files.go index 8b601a48..83f7cb35 100644 --- a/vendor/github.com/xyproto/files/files.go +++ b/vendor/github.com/xyproto/files/files.go @@ -80,6 +80,17 @@ func WhichCached(executable string) string { return path } +// PathHas checks if the given executable is in $PATH +func PathHas(executable string) bool { + _, err := exec.LookPath(executable) + return err == nil +} + +// PathHasCached checks if the given executable is in $PATH (looks in the cache first and then caches the result) +func PathHasCached(executable string) bool { + return WhichCached(executable) != "" +} + // BinDirectory will check if the given filename is in one of these directories: // /bin, /sbin, /usr/bin, /usr/sbin, /usr/local/bin, /usr/local/sbin, ~/.bin, ~/bin, ~/.local/bin func BinDirectory(filename string) bool { diff --git a/vendor/github.com/xyproto/vt100/README.md b/vendor/github.com/xyproto/vt100/README.md index 8df167e1..2246465c 100644 --- a/vendor/github.com/xyproto/vt100/README.md +++ b/vendor/github.com/xyproto/vt100/README.md @@ -78,6 +78,6 @@ Quick installation: ### General info -* Version: 1.14.5 +* Version: 1.14.7 * Licence: BSD-3 * Author: Alexander F. Rødseth <xyproto@archlinux.org> diff --git a/vendor/github.com/xyproto/vt100/size.go b/vendor/github.com/xyproto/vt100/size.go index 0e1c8e8b..5d0a90b4 100644 --- a/vendor/github.com/xyproto/vt100/size.go +++ b/vendor/github.com/xyproto/vt100/size.go @@ -7,6 +7,8 @@ import ( "errors" "syscall" "unsafe" + + "github.com/xyproto/env/v2" ) type winsize struct { @@ -25,6 +27,9 @@ func TermSize() (uint, uint, error) { uintptr(unsafe.Pointer(ws))); int(retCode) != -1 { return uint(ws.Col), uint(ws.Row), nil } + if w, h := env.Int("COLUMNS", 0), env.Int("LINES", 0); w > 0 && h > 0 { + return uint(w), uint(h), nil + } return 0, 0, errors.New("could not get terminal size") } diff --git a/vendor/modules.txt b/vendor/modules.txt index ce11748a..e2270d14 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -60,7 +60,7 @@ github.com/dustin/go-humanize ## explicit github.com/eknkc/amber github.com/eknkc/amber/parser -# github.com/evanw/esbuild v0.23.1 +# github.com/evanw/esbuild v0.24.0 ## explicit; go 1.13 github.com/evanw/esbuild/internal/api_helpers github.com/evanw/esbuild/internal/ast @@ -100,7 +100,7 @@ github.com/fsnotify/fsnotify # github.com/go-gcfg/gcfg v1.2.3 ## explicit github.com/go-gcfg/gcfg -# github.com/go-mysql-org/go-mysql v1.9.0 +# github.com/go-mysql-org/go-mysql v1.9.1 ## explicit; go 1.18 github.com/go-mysql-org/go-mysql/client github.com/go-mysql-org/go-mysql/compress @@ -117,7 +117,7 @@ github.com/golang-sql/civil # github.com/golang-sql/sqlexp v0.1.0 ## explicit; go 1.16 github.com/golang-sql/sqlexp -# github.com/gomarkdown/markdown v0.0.0-20240730141124-034f12af3bf6 +# github.com/gomarkdown/markdown v0.0.0-20240930133441-72d49d9543d8 ## explicit; go 1.12 github.com/gomarkdown/markdown github.com/gomarkdown/markdown/ast @@ -126,7 +126,7 @@ github.com/gomarkdown/markdown/parser # github.com/gomodule/redigo v1.9.2 ## explicit; go 1.17 github.com/gomodule/redigo/redis -# github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 +# github.com/google/pprof v0.0.0-20240929191954-255acd752d31 ## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 @@ -135,8 +135,8 @@ github.com/google/uuid # github.com/gorilla/css v1.0.1 ## explicit; go 1.20 github.com/gorilla/css/scanner -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.10 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -288,7 +288,7 @@ github.com/tylerb/graceful # github.com/valyala/bytebufferpool v1.0.0 ## explicit github.com/valyala/bytebufferpool -# github.com/valyala/fasthttp v1.55.0 +# github.com/valyala/fasthttp v1.56.0 ## explicit; go 1.20 github.com/valyala/fasthttp github.com/valyala/fasthttp/fasthttpadaptor @@ -328,7 +328,7 @@ github.com/xyproto/datablock # github.com/xyproto/env/v2 v2.5.0 ## explicit; go 1.19 github.com/xyproto/env/v2 -# github.com/xyproto/files v1.6.0 +# github.com/xyproto/files v1.7.0 ## explicit; go 1.19 github.com/xyproto/files # github.com/xyproto/gluamapper v1.2.1 @@ -409,7 +409,7 @@ github.com/xyproto/tinysvg # github.com/xyproto/unzip v1.0.0 ## explicit; go 1.23.1 github.com/xyproto/unzip -# github.com/xyproto/vt100 v1.14.5 +# github.com/xyproto/vt100 v1.14.7 ## explicit; go 1.22.0 github.com/xyproto/vt100 # github.com/yosssi/gcss v0.1.0