# HG changeset patch # User Mikael Berthe # Date 1581875641 -3600 # Node ID 1c52a0eeb952df432cd7005ac1d203eae20c1404 # Parent c040f992052f76f54a2423a0443483c9b7bf896f Update dependencies This should fix #22. diff -r c040f992052f -r 1c52a0eeb952 go.mod --- a/go.mod Wed Sep 18 19:17:42 2019 +0200 +++ b/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -1,19 +1,24 @@ module github.com/McKael/madonctl require ( - github.com/McKael/madon/v2 v2.3.1-0.20190729195747-12ec66101bde + github.com/McKael/madon/v2 v2.3.1-0.20200216174857-9f66daa5f91b github.com/ghodss/yaml v1.0.0 - github.com/kr/text v0.1.0 - github.com/magiconair/properties v1.8.1 // indirect - github.com/mattn/go-isatty v0.0.8 - github.com/pelletier/go-toml v1.4.0 // indirect - github.com/pkg/errors v0.8.1 + github.com/kr/text v0.2.0 + github.com/mattn/go-isatty v0.0.12 + github.com/pelletier/go-toml v1.6.0 // indirect + github.com/pkg/errors v0.9.1 github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/spf13/cobra v0.0.5 github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.4.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.6.2 github.com/stretchr/testify v1.3.0 - golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 - golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e // indirect + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 // indirect + gopkg.in/ini.v1 v1.52.0 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect ) + +go 1.13 diff -r c040f992052f -r 1c52a0eeb952 go.sum --- a/go.sum Wed Sep 18 19:17:42 2019 +0200 +++ b/go.sum Sun Feb 16 18:54:01 2020 +0100 @@ -2,8 +2,8 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/McKael/madon/v2 v2.3.1-0.20190729195747-12ec66101bde h1:OQJLvUv5mhpov3Cn+r3bRWtRmHH8bjP1rAB4Kxp2LI8= -github.com/McKael/madon/v2 v2.3.1-0.20190729195747-12ec66101bde/go.mod h1:UF5UnjiPvfTI6aGjeW3yIRtlCoEjh0TiZf2L7ofLK/c= +github.com/McKael/madon/v2 v2.3.1-0.20200216174857-9f66daa5f91b h1:gRqdgYG9pGWaTbBwwPfCz6j5CWk2+pLGoU18BKykJFI= +github.com/McKael/madon/v2 v2.3.1-0.20200216174857-9f66daa5f91b/go.mod h1:snHyjBm7+iRyVy1MQK6tKx1aOwQwRMp9e0oqLlYAuAI= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -18,7 +18,9 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -40,12 +42,16 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -54,6 +60,8 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -64,12 +72,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= @@ -78,12 +88,14 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -96,10 +108,15 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sendgrid/rest v2.4.1+incompatible h1:HDib/5xzQREPq34lN3YMhQtMkdXxS/qLp5G3k9a5++4= github.com/sendgrid/rest v2.4.1+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= @@ -108,6 +125,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= @@ -116,15 +135,19 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -137,7 +160,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -146,32 +168,31 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c h1:+EXw7AwNOKzPFXMZ1yNjO40aWCh3PIquJB2fYlv9wcs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -181,11 +202,11 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -193,10 +214,17 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/McKael/madon/v2/api.go --- a/vendor/github.com/McKael/madon/v2/api.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/McKael/madon/v2/api.go Sun Feb 16 18:54:01 2020 +0100 @@ -130,11 +130,22 @@ return nil, err } if res.StatusCode < 200 || res.StatusCode >= 300 { + var errorText string + // Try to unmarshal the returned error object for a description + mastodonError := Error{} + decodeErr := json.NewDecoder(res.Body).Decode(&mastodonError) + if decodeErr != nil { + // Decode unsuccessful, fallback to generic error based on response code + errorText = http.StatusText(res.StatusCode) + } else { + errorText = mastodonError.Text + } + // Please note that the error string code is used by Search() // to check the error cause. const errFormatString = "bad server status code (%d)" return nil, errors.Errorf(errFormatString+": %s", - res.StatusCode, http.StatusText(res.StatusCode)) + res.StatusCode, errorText) } // Build Response object. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/McKael/madon/v2/go.mod --- a/vendor/github.com/McKael/madon/v2/go.mod Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/McKael/madon/v2/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -1,14 +1,15 @@ module github.com/McKael/madon/v2 require ( - github.com/davecgh/go-spew v1.1.1 - github.com/gorilla/websocket v1.4.0 - github.com/pkg/errors v0.8.0 - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/protobuf v1.3.3 // indirect + github.com/gorilla/websocket v1.4.1 + github.com/pkg/errors v0.8.1 github.com/sendgrid/rest v2.4.1+incompatible - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect - google.golang.org/appengine v1.2.0 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + google.golang.org/appengine v1.6.5 // indirect ) + +go 1.13 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/McKael/madon/v2/go.sum --- a/vendor/github.com/McKael/madon/v2/go.sum Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/McKael/madon/v2/go.sum Sun Feb 16 18:54:01 2020 +0100 @@ -1,24 +1,36 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sendgrid/rest v2.4.1+incompatible h1:HDib/5xzQREPq34lN3YMhQtMkdXxS/qLp5G3k9a5++4= github.com/sendgrid/rest v2.4.1+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/McKael/madon/v2/media.go --- a/vendor/github.com/McKael/madon/v2/media.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/McKael/madon/v2/media.go Sun Feb 16 18:54:01 2020 +0100 @@ -19,12 +19,12 @@ "github.com/sendgrid/rest" ) +const mediaUploadFieldName = "file" + // UploadMedia uploads the given file and returns an attachment // The description and focus arguments can be empty strings. // 'focus' is the "focal point", written as two comma-delimited floating points. func (mc *Client) UploadMedia(filePath, description, focus string) (*Attachment, error) { - var b bytes.Buffer - if filePath == "" { return nil, ErrInvalidParameter } @@ -35,11 +35,27 @@ } defer f.Close() - w := multipart.NewWriter(&b) - formWriter, err := w.CreateFormFile("file", filepath.Base(filePath)) + return mc.UploadMediaReader(f, filepath.Base(f.Name()), description, focus) +} + +// UploadMediaReader uploads data from the given reader and returns an attachment +// name, description and focus arguments can be empty strings. +// 'focus' is the "focal point", written as two comma-delimited floating points. +func (mc *Client) UploadMediaReader(f io.Reader, name, description, focus string) (*Attachment, error) { + buf := bytes.Buffer{} + + w := multipart.NewWriter(&buf) + var formWriter io.Writer + var err error + if len(name) > 0 { + formWriter, err = w.CreateFormFile(mediaUploadFieldName, name) + } else { + formWriter, err = w.CreateFormField(mediaUploadFieldName) + } if err != nil { return nil, errors.Wrap(err, "media upload") } + if _, err = io.Copy(formWriter, f); err != nil { return nil, errors.Wrap(err, "media upload") } @@ -62,7 +78,7 @@ return nil, errors.Wrap(err, "media prepareRequest failed") } req.Headers["Content-Type"] = w.FormDataContentType() - req.Body = b.Bytes() + req.Body = buf.Bytes() // Make API call r, err := restAPI(req) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/cpuguy83/go-md2man/LICENSE.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,20 @@ +package md2man + +import ( + "github.com/russross/blackfriday" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := RoffRenderer(0) + extensions := 0 + extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS + extensions |= blackfriday.EXTENSION_TABLES + extensions |= blackfriday.EXTENSION_FENCED_CODE + extensions |= blackfriday.EXTENSION_AUTOLINK + extensions |= blackfriday.EXTENSION_SPACE_HEADERS + extensions |= blackfriday.EXTENSION_FOOTNOTES + extensions |= blackfriday.EXTENSION_TITLEBLOCK + + return blackfriday.Markdown(doc, renderer, extensions) +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/cpuguy83/go-md2man/md2man/roff.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,285 @@ +package md2man + +import ( + "bytes" + "fmt" + "html" + "strings" + + "github.com/russross/blackfriday" +) + +type roffRenderer struct { + ListCounters []int +} + +// RoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func RoffRenderer(flags int) blackfriday.Renderer { + return &roffRenderer{} +} + +func (r *roffRenderer) GetFlags() int { + return 0 +} + +func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { + out.WriteString(".TH ") + + splitText := bytes.Split(text, []byte("\n")) + for i, line := range splitText { + line = bytes.TrimPrefix(line, []byte("% ")) + if i == 0 { + line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) + line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) + } + line = append([]byte("\""), line...) + line = append(line, []byte("\" ")...) + out.Write(line) + } + out.WriteString("\n") + + // disable hyphenation + out.WriteString(".nh\n") + // disable justification (adjust text to left margin only) + out.WriteString(".ad l\n") +} + +func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString("\n.PP\n.RS\n\n.nf\n") + escapeSpecialChars(out, text) + out.WriteString("\n.fi\n.RE\n") +} + +func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { + out.WriteString("\n.PP\n.RS\n") + out.Write(text) + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint + out.Write(text) +} + +func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + + switch { + case marker == 0: + // This is the doc header + out.WriteString(".TH ") + case level == 1: + out.WriteString("\n\n.SH ") + case level == 2: + out.WriteString("\n.SH ") + default: + out.WriteString("\n.SS ") + } + + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) HRule(out *bytes.Buffer) { + out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") +} + +func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + r.ListCounters = append(r.ListCounters, 1) + out.WriteString("\n.RS\n") + if !text() { + out.Truncate(marker) + return + } + r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) + r.ListCounters[len(r.ListCounters)-1]++ + } else { + out.WriteString(".IP \\(bu 2\n") + } + out.Write(text) + out.WriteString("\n") +} + +func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + out.WriteString("\n.PP\n") + if !text() { + out.Truncate(marker) + return + } + if marker != 0 { + out.WriteString("\n") + } +} + +func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + out.WriteString("\n.TS\nallbox;\n") + + maxDelims := 0 + lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") + for _, w := range lines { + curDelims := strings.Count(w, "\t") + if curDelims > maxDelims { + maxDelims = curDelims + } + } + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) + out.Write(header) + if len(header) > 0 { + out.Write([]byte("\n")) + } + + out.Write(body) + out.WriteString("\n.TE\n") +} + +func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { + if out.Len() > 0 { + out.WriteString("\n") + } + out.Write(text) +} + +func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) +} + +func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) > 30 { + text = append([]byte("T{\n"), text...) + text = append(text, []byte("\nT}")...) + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write(text) +} + +func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { + +} + +func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + +} + +func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.WriteString("\n\\[la]") + out.Write(link) + out.WriteString("\\[ra]") +} + +func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB\\fC") + escapeSpecialChars(out, text) + out.WriteString("\\fR") +} + +func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fI") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { +} + +func (r *roffRenderer) LineBreak(out *bytes.Buffer) { + out.WriteString("\n.br\n") +} + +func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) + r.AutoLink(out, link, 0) +} + +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint + out.Write(tag) +} + +func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\s+2") + out.Write(text) + out.WriteString("\\s-2") +} + +func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { +} + +func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + +} + +func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { + out.WriteString(html.UnescapeString(string(entity))) +} + +func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { + escapeSpecialChars(out, text) +} + +func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { +} + +func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(out *bytes.Buffer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out.WriteString("\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + out.Write(text[org:i]) + } + + // escape a character + if i >= len(text) { + break + } + out.WriteByte('\\') + out.WriteByte(text[i]) + } +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/decode.go --- a/vendor/github.com/golang/protobuf/proto/decode.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/decode.go Sun Feb 16 18:54:01 2020 +0100 @@ -186,7 +186,6 @@ if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/deprecated.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/equal.go --- a/vendor/github.com/golang/protobuf/proto/equal.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/equal.go Sun Feb 16 18:54:01 2020 +0100 @@ -246,7 +246,8 @@ return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/extensions.go --- a/vendor/github.com/golang/protobuf/proto/extensions.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/extensions.go Sun Feb 16 18:54:01 2020 +0100 @@ -185,9 +185,25 @@ // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/lib.go --- a/vendor/github.com/golang/protobuf/proto/lib.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/lib.go Sun Feb 16 18:54:01 2020 +0100 @@ -341,26 +341,6 @@ ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -413,7 +393,7 @@ // than relying on this API. // // If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and +// by keys in lexicographical order. This is an implementation detail and // subject to change. func (p *Buffer) SetDeterministic(deterministic bool) { p.deterministic = deterministic @@ -960,13 +940,19 @@ return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/message_set.go --- a/vendor/github.com/golang/protobuf/proto/message_set.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/message_set.go Sun Feb 16 18:54:01 2020 +0100 @@ -36,13 +36,7 @@ */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/pointer_reflect.go --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go Sun Feb 16 18:54:01 2020 +0100 @@ -79,10 +79,13 @@ // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go Sun Feb 16 18:54:01 2020 +0100 @@ -85,16 +85,21 @@ // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/properties.go --- a/vendor/github.com/golang/protobuf/proto/properties.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/properties.go Sun Feb 16 18:54:01 2020 +0100 @@ -38,7 +38,6 @@ import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -194,7 +193,7 @@ // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -214,7 +213,7 @@ p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } @@ -334,9 +333,6 @@ sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +342,20 @@ return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +390,14 @@ // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/table_marshal.go --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go Sun Feb 16 18:54:01 2020 +0100 @@ -87,6 +87,7 @@ sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -320,8 +321,11 @@ // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -407,13 +411,22 @@ panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -476,10 +489,6 @@ } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2310,8 +2319,8 @@ for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2329,8 +2338,8 @@ for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2399,7 +2408,7 @@ // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2434,7 +2443,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2465,7 +2474,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2510,7 +2519,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2553,7 +2562,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2591,7 +2600,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2621,7 +2630,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2656,7 +2665,7 @@ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/table_unmarshal.go --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go Sun Feb 16 18:54:01 2020 +0100 @@ -136,7 +136,7 @@ u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,46 +362,48 @@ } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1948,7 +1950,7 @@ // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/golang/protobuf/proto/text.go --- a/vendor/github.com/golang/protobuf/proto/text.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/golang/protobuf/proto/text.go Sun Feb 16 18:54:01 2020 +0100 @@ -456,6 +456,8 @@ return nil } +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -519,8 +521,8 @@ // mutating this value. v = v.Addr() } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/.travis.yml --- a/vendor/github.com/gorilla/websocket/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/README.md --- a/vendor/github.com/gorilla/websocket/README.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -1,11 +1,11 @@ # Gorilla WebSocket +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. -[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) - ### Documentation * [API Reference](http://godoc.org/github.com/gorilla/websocket) @@ -27,7 +27,7 @@ ### Protocol Compliance The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). ### Gorilla WebSocket compared with other packages @@ -40,7 +40,7 @@ RFC 6455 Features -Passes Autobahn Test SuiteYesNo +Passes Autobahn Test SuiteYesNo Receive fragmented messageYesNo, see note 1 Send close messageYesNo Send pings and receive pongsYesNo diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/client.go --- a/vendor/github.com/gorilla/websocket/client.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/client.go Sun Feb 16 18:54:01 2020 +0100 @@ -70,7 +70,7 @@ // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then a useful default size is used. The I/O buffer sizes // do not limit the size of the messages that can be sent or received. ReadBufferSize, WriteBufferSize int @@ -140,7 +140,7 @@ // Use the response.Header to get the selected subprotocol // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). // -// The context will be used in the request and in the Dialer +// The context will be used in the request and in the Dialer. // // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/conn.go --- a/vendor/github.com/gorilla/websocket/conn.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/conn.go Sun Feb 16 18:54:01 2020 +0100 @@ -260,10 +260,12 @@ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 readFinal bool // true the current message has more frames. readLength int64 // Message size. readLimit int64 // Maximum message size. @@ -320,6 +322,17 @@ return c } +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + // Subprotocol returns the negotiated protocol for the connection. func (c *Conn) Subprotocol() string { return c.subprotocol @@ -451,7 +464,8 @@ return err } -func (c *Conn) prepWrite(messageType int) error { +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // Close previous writer if not already closed by the application. It's // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. @@ -471,6 +485,10 @@ return err } + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + if c.writeBuf == nil { wpd, ok := c.writePool.Get().(writePoolData) if ok { @@ -491,16 +509,11 @@ // All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and // PongMessage) are supported. func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if err := c.prepWrite(messageType); err != nil { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { return nil, err } - - mw := &messageWriter{ - c: c, - frameType: messageType, - pos: maxFrameHeaderSize, - } - c.writer = mw + c.writer = &mw if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { w := c.newCompressionWriter(c.writer, c.compressionLevel) mw.compress = true @@ -517,10 +530,16 @@ err error } -func (w *messageWriter) fatal(err error) error { +func (w *messageWriter) endMessage(err error) error { if w.err != nil { - w.err = err - w.c.writer = nil + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil } return err } @@ -534,7 +553,7 @@ // Check for invalid control frames. if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { - return w.fatal(errInvalidControlFrame) + return w.endMessage(errInvalidControlFrame) } b0 := byte(w.frameType) @@ -579,7 +598,7 @@ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { - return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) } } @@ -600,15 +619,11 @@ c.isWriting = false if err != nil { - return w.fatal(err) + return w.endMessage(err) } if final { - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } + w.endMessage(errWriteClosed) return nil } @@ -706,11 +721,7 @@ if w.err != nil { return w.err } - if err := w.flushFrame(true, nil); err != nil { - return err - } - w.err = errWriteClosed - return nil + return w.flushFrame(true, nil) } // WritePreparedMessage writes prepared message into connection. @@ -742,10 +753,10 @@ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { // Fast path with no allocations and single frame. - if err := c.prepWrite(messageType); err != nil { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { return err } - mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} n := copy(c.writeBuf[mw.pos:], data) mw.pos += n data = data[n:] @@ -792,7 +803,7 @@ final := p[0]&finalBit != 0 frameType := int(p[0] & 0xf) mask := p[1]&maskBit != 0 - c.readRemaining = int64(p[1] & 0x7f) + c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { @@ -826,7 +837,17 @@ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) } - // 3. Read and parse frame length. + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. switch c.readRemaining { case 126: @@ -834,13 +855,19 @@ if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint16(p)) + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } case 127: p, err := c.read(8) if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint64(p)) + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } } // 4. Handle frame masking. @@ -863,6 +890,12 @@ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + if c.readLimit > 0 && c.readLength > c.readLimit { c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit @@ -876,7 +909,7 @@ var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.readRemaining = 0 + c.setReadRemaining(0) if err != nil { return noFrame, err } @@ -949,6 +982,7 @@ c.readErr = hideTempErr(err) break } + if frameType == TextMessage || frameType == BinaryMessage { c.messageReader = &messageReader{c} c.reader = c.messageReader @@ -989,7 +1023,9 @@ if c.isServer { c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } - c.readRemaining -= int64(n) + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1041,7 +1077,7 @@ return c.conn.SetReadDeadline(t) } -// SetReadLimit sets the maximum size for a message read from the peer. If a +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a // message exceeds the limit, the connection sends a close message to the peer // and returns ErrReadLimit to the application. func (c *Conn) SetReadLimit(limit int64) { diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/doc.go --- a/vendor/github.com/gorilla/websocket/doc.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/doc.go Sun Feb 16 18:54:01 2020 +0100 @@ -151,6 +151,53 @@ // checking. The application is responsible for checking the Origin header // before calling the Upgrade function. // +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// to a value less than the maximum expected message size can greatly reduce +// memory use with a small impact on performance. Here's an example: If 99% of +// the messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// // Compression EXPERIMENTAL // // Per message compression extensions (RFC 7692) are experimentally supported diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/go.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/gorilla/websocket/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,3 @@ +module github.com/gorilla/websocket + +go 1.12 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/go.sum --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/gorilla/websocket/go.sum Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,2 @@ +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/join.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/gorilla/websocket/join.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/proxy.go --- a/vendor/github.com/gorilla/websocket/proxy.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/proxy.go Sun Feb 16 18:54:01 2020 +0100 @@ -22,18 +22,18 @@ func init() { proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } type httpProxyDialer struct { - proxyURL *url.URL - fowardDial func(network, addr string) (net.Conn, error) + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) } func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.fowardDial(network, hostPort) + conn, err := hpd.forwardDial(network, hostPort) if err != nil { return nil, err } diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/server.go --- a/vendor/github.com/gorilla/websocket/server.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/server.go Sun Feb 16 18:54:01 2020 +0100 @@ -27,7 +27,7 @@ // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then buffers allocated by the HTTP server are used. The // I/O buffer sizes do not limit the size of the messages that can be sent // or received. @@ -153,7 +153,7 @@ challengeKey := r.Header.Get("Sec-Websocket-Key") if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") } subprotocol := u.selectSubprotocol(r, responseHeader) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/gorilla/websocket/util.go --- a/vendor/github.com/gorilla/websocket/util.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/gorilla/websocket/util.go Sun Feb 16 18:54:01 2020 +0100 @@ -31,68 +31,113 @@ return base64.StdEncoding.EncodeToString(p), nil } -// Octet types from RFC 2616. -var octetTypes [256]byte - -const ( - isTokenOctet = 1 << iota - isSpaceOctet -) - -func init() { - // From RFC 2616 - // - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t byte - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpaceOctet - } - if isChar && !isCtl && !isSeparator { - t |= isTokenOctet - } - octetTypes[c] = t - } +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, } +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpaceOctet == 0 { + if b := s[i]; b != ' ' && b != '\t' { break } } return s[i:] } +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. func nextToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { - if octetTypes[s[i]]&isTokenOctet == 0 { + if !isTokenOctet[s[i]] { break } } return s[:i], s[i:] } +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. func nextTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return nextToken(s) @@ -128,7 +173,8 @@ return "", "" } -// equalASCIIFold returns true if s is equal to t with ASCII case folding. +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. func equalASCIIFold(s, t string) bool { for s != "" && t != "" { sr, size := utf8.DecodeRuneInString(s) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/kr/text/go.mod --- a/vendor/github.com/kr/text/go.mod Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/kr/text/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -1,3 +1,3 @@ module "github.com/kr/text" -require "github.com/kr/pty" v1.1.1 +require "github.com/creack/pty" v1.1.9 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/magiconair/properties/.travis.yml --- a/vendor/github.com/magiconair/properties/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/magiconair/properties/.travis.yml Sun Feb 16 18:54:01 2020 +0100 @@ -7,4 +7,6 @@ - 1.8.x - 1.9.x - "1.10.x" + - "1.11.x" + - "1.12.x" - tip diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/magiconair/properties/CHANGELOG.md --- a/vendor/github.com/magiconair/properties/CHANGELOG.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md Sun Feb 16 18:54:01 2020 +0100 @@ -1,5 +1,13 @@ ## Changelog +### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 + + * [PR #26](https://github.com/magiconair/properties/pull/35): Close body always after request + + This patch ensures that in `LoadURL` the response body is always closed. + + Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. + ### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/magiconair/properties/README.md --- a/vendor/github.com/magiconair/properties/README.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/magiconair/properties/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -1,6 +1,6 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) [![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![Codeship CI Status](https://img.shields.io/codeship/16aaf660-f615-0135-b8f0-7e33b70920c0/master.svg?label=codeship&style=flat-square)](https://app.codeship.com/projects/274177") +[![CircleCI Status](https://img.shields.io/circleci/project/github/magiconair/properties.svg?label=circle+ci&style=flat-square)](https://circleci.com/gh/magiconair/properties) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) @@ -30,7 +30,7 @@ error handling functions can be provided. See the package documentation for details. -Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties) +Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) ## Getting Started diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/magiconair/properties/go.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/magiconair/properties/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,1 @@ +module github.com/magiconair/properties diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/magiconair/properties/load.go --- a/vendor/github.com/magiconair/properties/load.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/magiconair/properties/load.go Sun Feb 16 18:54:01 2020 +0100 @@ -115,6 +115,7 @@ if err != nil { return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) } + defer resp.Body.Close() if resp.StatusCode == 404 && l.IgnoreMissing { LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) @@ -129,7 +130,6 @@ if err != nil { return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) } - defer resp.Body.Close() ct := resp.Header.Get("Content-Type") var enc Encoding diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/.travis.yml --- a/vendor/github.com/mattn/go-isatty/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/.travis.yml Sun Feb 16 18:54:01 2020 +0100 @@ -1,13 +1,14 @@ language: go +sudo: false go: + - 1.13.x - tip -os: - - linux - - osx +before_install: + - go get -t -v ./... -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/README.md --- a/vendor/github.com/mattn/go-isatty/README.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/go.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +go 1.12 + +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/go.sum --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/go.sum Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/go.test.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/go.test.sh Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_appengine.go --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_bsd.go --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go Sun Feb 16 18:54:01 2020 +0100 @@ -3,16 +3,16 @@ package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_linux.go --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,18 +0,0 @@ -// +build linux -// +build !appengine,!ppc64,!ppc64le - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go --- a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -// +build linux -// +build ppc64 ppc64le - -package isatty - -import ( - "unsafe" - - syscall "golang.org/x/sys/unix" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_others.go --- a/vendor/github.com/mattn/go-isatty/isatty_others.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go Sun Feb 16 18:54:01 2020 +0100 @@ -1,9 +1,14 @@ -// +build !windows -// +build !appengine +// +build appengine js nacl package isatty -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_plan9.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_solaris.go --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go Sun Feb 16 18:54:01 2020 +0100 @@ -14,3 +14,9 @@ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) return err == nil } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_tcgets.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,18 @@ +// +build linux aix +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/isatty_windows.go --- a/vendor/github.com/mattn/go-isatty/isatty_windows.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go Sun Feb 16 18:54:01 2020 +0100 @@ -4,6 +4,7 @@ package isatty import ( + "errors" "strings" "syscall" "unicode/utf16" @@ -11,15 +12,18 @@ ) const ( - fileNameInfo uintptr = 2 - fileTypePipe = 3 + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 ) var ( kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") ) func init() { @@ -45,7 +49,10 @@ return false } - if token[0] != `\msys` && token[0] != `\cygwin` { + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { return false } @@ -68,11 +75,35 @@ return true } +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + // IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. func IsCygwinTerminal(fd uintptr) bool { if procGetFileInformationByHandleEx == nil { - return false + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) } // Cygwin/msys's pty is a pipe. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mattn/go-isatty/renovate.json --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mattn/go-isatty/renovate.json Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mitchellh/mapstructure/.travis.yml --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml Sun Feb 16 18:54:01 2020 +0100 @@ -1,7 +1,7 @@ language: go go: - - 1.9.x + - "1.11.x" - tip script: diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,21 @@ +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mitchellh/mapstructure/decode_hooks.go --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go Sun Feb 16 18:54:01 2020 +0100 @@ -2,6 +2,8 @@ import ( "errors" + "fmt" + "net" "reflect" "strconv" "strings" @@ -115,6 +117,50 @@ } } +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + // StringToTimeHookFunc returns a DecodeHookFunc that converts // strings to time.Time. func StringToTimeHookFunc(layout string) DecodeHookFunc { diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/mitchellh/mapstructure/mapstructure.go --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go Sun Feb 16 18:54:01 2020 +0100 @@ -224,6 +224,17 @@ // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -237,7 +248,6 @@ return nil } - inputVal := reflect.ValueOf(input) if !inputVal.IsValid() { // If the input value is invalid, then we just set the value // to be the zero value. @@ -260,8 +270,8 @@ } var err error - inputKind := getKind(outVal) - switch inputKind { + outputKind := getKind(outVal) + switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) case reflect.Interface: @@ -288,7 +298,7 @@ err = d.decodeFunc(name, input, outVal) default: // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, inputKind) + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) } // If we reached here, then we successfully decoded SOMETHING, so @@ -306,7 +316,16 @@ if val.IsValid() && val.Elem().IsValid() { return d.decode(name, data, val.Elem()) } + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + if !dataVal.IsValid() { dataVal = reflect.Zero(val.Type()) } @@ -323,7 +342,7 @@ } func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) converted := true @@ -375,7 +394,7 @@ } func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) dataType := dataVal.Type() @@ -417,7 +436,7 @@ } func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) switch { @@ -460,7 +479,7 @@ } func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) switch { @@ -491,7 +510,7 @@ } func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) + dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) dataType := dataVal.Type() @@ -595,6 +614,20 @@ // Accumulate errors errors := make([]string, 0) + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + for _, k := range dataVal.MapKeys() { fieldName := fmt.Sprintf("%s[%s]", name, k) @@ -706,11 +739,33 @@ } func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return nil + } + // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - if val.CanSet() { realVal := val if realVal.IsNil() || d.config.ZeroFields { @@ -752,33 +807,44 @@ valSlice := val if valSlice.IsNil() || d.config.ZeroFields { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + // Check input type if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) } + // If the input value is empty, then don't allocate since non-nil != nil + if dataVal.Len() == 0 { + return nil + } + // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) } @@ -888,10 +954,29 @@ } dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + m := make(map[string]interface{}) + mval := reflect.Indirect(reflect.ValueOf(&m)) + if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, mval, val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) } +} +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { dataValType := dataVal.Type() if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { return fmt.Errorf( diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/.dockerignore --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/.dockerignore Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/.gitignore --- a/vendor/github.com/pelletier/go-toml/.gitignore Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/.gitignore Sun Feb 16 18:54:01 2020 +0100 @@ -1,2 +1,5 @@ test_program/test_program_bin fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/.travis.yml --- a/vendor/github.com/pelletier/go-toml/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,23 +0,0 @@ -sudo: false -language: go -go: - - 1.8.x - - 1.9.x - - 1.10.x - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -script: - - if [ -n "$(go fmt ./...)" ]; then exit 1; fi - - ./test.sh - - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -branches: - only: [master] -after_success: - - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [GoDoc][godoc]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[godoc]: https://godoc.org/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/Dockerfile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/Dockerfile Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/README.md --- a/vendor/github.com/pelletier/go-toml/README.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -3,13 +3,14 @@ Go library for the [TOML](https://github.com/mojombo/toml) format. This library supports TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) +[v0.5.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) -[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) ## Features @@ -98,6 +99,30 @@ go install github.com/pelletier/go-toml/cmd/tomljson tomljson --help ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also availble as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` ## Contribute @@ -107,12 +132,7 @@ ### Run tests -You have to make sure two kind of tests run: - -1. The Go unit tests -2. The TOML examples base - -You can run both of them using `./test.sh`. +`go test ./...` ### Fuzzing diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/azure-pipelines.yml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,167 @@ +trigger: +- master + +stages: +- stage: fuzzit + displayName: "Run Fuzzit" + dependsOn: [] + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: submit + displayName: "Submit" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.13" + inputs: + version: "1.13" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: fuzzing + FUZZIT_API_KEY: $(FUZZIT_API_KEY) + +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.13" + inputs: + version: "1.13" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.13" + inputs: + version: "1.13" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t $(CODECOV_TOKEN)' + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.13" + inputs: + version: "1.13" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: fuzzing + displayName: "fuzzing" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.13" + inputs: + version: "1.13" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml + - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml + - task: Bash@3 + inputs: + filePath: './fuzzit.sh' + env: + TYPE: local-regression + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.13: + goVersion: '1.13' + imageName: 'ubuntu-latest' + mac 1.13: + goVersion: '1.13' + imageName: 'macos-10.13' + windows 1.13: + goVersion: '1.13' + imageName: 'vs2017-win2016' + linux 1.12: + goVersion: '1.12' + imageName: 'ubuntu-latest' + mac 1.12: + goVersion: '1.12' + imageName: 'macos-10.13' + windows 1.12: + goVersion: '1.12' + imageName: 'vs2017-win2016' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' \ No newline at end of file diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/benchmark.sh --- a/vendor/github.com/pelletier/go-toml/benchmark.sh Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh Sun Feb 16 18:54:01 2020 +0100 @@ -1,6 +1,6 @@ #!/bin/bash -set -e +set -ex reference_ref=${1:-master} reference_git=${2:-.} @@ -8,7 +8,6 @@ if ! `hash benchstat 2>/dev/null`; then echo "Installing benchstat" go get golang.org/x/perf/cmd/benchstat - go install golang.org/x/perf/cmd/benchstat fi tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` @@ -29,4 +28,4 @@ echo "" echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/doc.go --- a/vendor/github.com/pelletier/go-toml/doc.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/doc.go Sun Feb 16 18:54:01 2020 +0100 @@ -1,7 +1,7 @@ // Package toml is a TOML parser and manipulation library. // // This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md // // Marshaling // diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/fuzzit.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/fuzzit.sh Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,26 @@ +#!/bin/bash +set -xe + +# go-fuzz doesn't support modules yet, so ensure we do everything +# in the old style GOPATH way +export GO111MODULE="off" + +# install go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) +# to add another target, make sure to create it with `fuzzit create target` +# before using `fuzzit create job` +TARGET=toml-fuzzer + +go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml +clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} + +# install fuzzit for talking to fuzzit.dev service +# or latest version: +# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 +wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzit + +# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there +./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/go.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,9 @@ +module github.com/pelletier/go-toml + +go 1.12 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.2.4 +) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/go.sum --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/go.sum Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,11 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/keysparsing.go --- a/vendor/github.com/pelletier/go-toml/keysparsing.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go Sun Feb 16 18:54:01 2020 +0100 @@ -3,79 +3,107 @@ package toml import ( - "bytes" "errors" "fmt" "unicode" ) // Convert the bare key group string to an array. -// The input supports double quotation to allow "." inside the key name, +// The input supports double quotation and single quotation, // but escape sequences are not supported. Lexers must unescape them beforehand. func parseKey(key string) ([]string, error) { - groups := []string{} - var buffer bytes.Buffer - inQuotes := false - wasInQuotes := false - ignoreSpace := true - expectDot := false + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } - for _, char := range key { - if ignoreSpace { - if char == ' ' { - continue - } - ignoreSpace = false + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break } - switch char { - case '"': - if inQuotes { - groups = append(groups, buffer.String()) - buffer.Reset() - wasInQuotes = true + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx } - inQuotes = !inQuotes - expectDot = false - case '.': - if inQuotes { - buffer.WriteRune(char) - } else { - if !wasInQuotes { - if buffer.Len() == 0 { - return nil, errors.New("empty table key") - } - groups = append(groups, buffer.String()) - buffer.Reset() + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break } - ignoreSpace = true - expectDot = false - wasInQuotes = false - } - case ' ': - if inQuotes { - buffer.WriteRune(char) - } else { - expectDot = true + idx++ } - default: - if !inQuotes && !isValidBareChar(char) { - return nil, fmt.Errorf("invalid bare character: %c", char) + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ } - if !inQuotes && expectDot { - return nil, errors.New("what?") + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") } - buffer.WriteRune(char) - expectDot = false + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) } } - if inQuotes { - return nil, errors.New("mismatched quotes") - } - if buffer.Len() > 0 { - groups = append(groups, buffer.String()) - } if len(groups) == 0 { - return nil, errors.New("empty key") + return nil, fmt.Errorf("empty key") } return groups, nil } diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/lexer.go --- a/vendor/github.com/pelletier/go-toml/lexer.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/lexer.go Sun Feb 16 18:54:01 2020 +0100 @@ -223,9 +223,12 @@ } possibleDate := l.peekString(35) - dateMatch := dateRegexp.FindString(possibleDate) - if dateMatch != "" { - l.fastForward(len(dateMatch)) + dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) + if dateSubmatches != nil && dateSubmatches[0] != "" { + l.fastForward(len(dateSubmatches[0])) + if dateSubmatches[2] == "" { // no timezone information => local date + return l.lexLocalDate + } return l.lexDate } @@ -247,13 +250,13 @@ func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenLeftCurlyBrace) - return l.lexRvalue + return l.lexVoid } func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenRightCurlyBrace) - return l.lexRvalue + return l.lexVoid } func (l *tomlLexer) lexDate() tomlLexStateFn { @@ -261,6 +264,11 @@ return l.lexRvalue } +func (l *tomlLexer) lexLocalDate() tomlLexStateFn { + l.emit(tokenLocalDate) + return l.lexRvalue +} + func (l *tomlLexer) lexTrue() tomlLexStateFn { l.fastForward(4) l.emit(tokenTrue) @@ -309,7 +317,7 @@ if err != nil { return l.errorf(err.Error()) } - growingString += str + growingString += "\"" + str + "\"" l.next() continue } else if r == '\'' { @@ -318,13 +326,15 @@ if err != nil { return l.errorf(err.Error()) } - growingString += str + growingString += "'" + str + "'" l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { break + } else if r == '.' { + // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } @@ -731,7 +741,27 @@ } func init() { - dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) + // Regexp for all date/time formats supported by TOML. + // Group 1: nano precision + // Group 2: timezone + // + // /!\ also matches the empty string + // + // Example matches: + //1979-05-27T07:32:00Z + //1979-05-27T00:32:00-07:00 + //1979-05-27T00:32:00.999999-07:00 + //1979-05-27 07:32:00Z + //1979-05-27 00:32:00-07:00 + //1979-05-27 00:32:00.999999-07:00 + //1979-05-27T07:32:00 + //1979-05-27T00:32:00.999999 + //1979-05-27 07:32:00 + //1979-05-27 00:32:00.999999 + //1979-05-27 + //07:32:00 + //00:32:00.999999 + dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) } // Entry point diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/localtime.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/localtime.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,281 @@ +// Implementation of TOML's local date/time. +// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go +// to avoid pulling all the Google dependencies. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/marshal.go --- a/vendor/github.com/pelletier/go-toml/marshal.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/marshal.go Sun Feb 16 18:54:01 2020 +0100 @@ -6,20 +6,28 @@ "fmt" "io" "reflect" + "sort" "strconv" "strings" "time" ) -const tagKeyMultiline = "multiline" +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagDefault = "default" +) type tomlOpts struct { - name string - comment string - commented bool - multiline bool - include bool - omitempty bool + name string + comment string + commented bool + multiline bool + include bool + omitempty bool + defaultValue string } type encOpts struct { @@ -31,10 +39,40 @@ quoteMapKeys: false, } +type annotation struct { + tag string + comment string + commented string + multiline string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + defaultValue: tagDefault, +} + +type marshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical marshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -// Check if the given marshall type maps to a Tree primitive +// Check if the given marshal type maps to a Tree primitive func isPrimitive(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: @@ -50,37 +88,41 @@ case reflect.String: return true case reflect.Struct: - return mtype == timeType || isCustomMarshaler(mtype) + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType || isCustomMarshaler(mtype) default: return false } } -// Check if the given marshall type maps to a Tree slice -func isTreeSlice(mtype reflect.Type) bool { +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { switch mtype.Kind() { - case reflect.Slice: - return !isOtherSlice(mtype) + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) default: return false } } -// Check if the given marshall type maps to a non-Tree slice -func isOtherSlice(mtype reflect.Type) bool { +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: - return isOtherSlice(mtype.Elem()) - case reflect.Slice: - return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) default: return false } } -// Check if the given marshall type maps to a Tree +// Check if the given marshal type maps to a Tree func isTree(mtype reflect.Type) bool { switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) case reflect.Map: return true case reflect.Struct: @@ -135,7 +177,9 @@ float64 float32, float64, pointers to same string string, pointers to same bool bool, pointers to same - time.Time time.Time{}, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. */ func Marshal(v interface{}) ([]byte, error) { return NewEncoder(nil).marshal(v) @@ -145,13 +189,21 @@ type Encoder struct { w io.Writer encOpts + annotation + line int + col int + order marshalOrder } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, } } @@ -197,11 +249,49 @@ return e } +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord marshalOrder) *Encoder { + e.order = ord + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + func (e *Encoder) marshal(v interface{}) ([]byte, error) { mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Struct { - return []byte{}, errors.New("Only a struct can be marshaled to TOML") + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") } + sval := reflect.ValueOf(v) if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) @@ -212,22 +302,27 @@ } var buf bytes.Buffer - _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order) return buf.Bytes(), err } +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + // Convert given marshal struct or map value to toml tree func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { return e.valueToTree(mtype.Elem(), mval.Elem()) } - tval := newTree() + tval := e.nextTree() switch mtype.Kind() { case reflect.Struct: for i := 0; i < mtype.NumField(); i++ { mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef) + opts := tomlOptions(mtypef, e.annotation) if opts.include && (!opts.omitempty || !isZero(mvalf)) { val, err := e.valueToToml(mtypef.Type, mvalf) if err != nil { @@ -242,7 +337,26 @@ } } case reflect.Map: - for _, key := range mval.MapKeys() { + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { mvalf := mval.MapIndex(key) val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { @@ -290,6 +404,7 @@ // Convert given marshal value to toml value func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + e.line++ if mtype.Kind() == reflect.Ptr { return e.valueToToml(mtype.Elem(), mval.Elem()) } @@ -298,15 +413,18 @@ return callCustomMarshaler(mval) case isTree(mtype): return e.valueToTree(mtype, mval) - case isTreeSlice(mtype): + case isTreeSequence(mtype): return e.valueToTreeSlice(mtype, mval) - case isOtherSlice(mtype): + case isOtherSequence(mtype): return e.valueToOtherSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: return mval.Bool(), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } return mval.Int(), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return mval.Uint(), nil @@ -315,7 +433,7 @@ case reflect.String: return mval.String(), nil case reflect.Struct: - return mval.Interface().(time.Time), nil + return mval.Interface(), nil default: return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) } @@ -326,7 +444,7 @@ // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t} + d := Decoder{tval: t, tagName: tagFieldName} return d.unmarshal(v) } @@ -334,8 +452,11 @@ // See Marshal() documentation for types mapping table. func (t *Tree) Marshal() ([]byte, error) { var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -347,6 +468,14 @@ // The following struct annotations are supported: // // toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 // // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { @@ -362,6 +491,7 @@ r io.Reader tval *Tree encOpts + tagName string } // NewDecoder returns a new decoder that reads from r. @@ -369,6 +499,7 @@ return &Decoder{ r: r, encOpts: encOptsDefaults, + tagName: tagFieldName, } } @@ -385,13 +516,29 @@ return d.unmarshal(v) } +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") } - sval, err := d.valueFromTree(mtype.Elem(), d.tval) + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + sval, err := d.valueFromTree(elem, d.tval, &vv) if err != nil { return err } @@ -399,34 +546,92 @@ return nil } -// Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) } var mval reflect.Value switch mtype.Kind() { case reflect.Struct: - mval = reflect.New(mtype).Elem() + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + for i := 0; i < mtype.NumField(); i++ { mtypef := mtype.Field(i) - opts := tomlOptions(mtypef) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) if opts.include { baseKey := opts.name - keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false for _, key := range keysToTry { exists := tval.Has(key) if !exists { continue } val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } mval.Field(i).Set(mvalf) + found = true break } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + if err != nil { + return mval.Field(i), err + } + case reflect.Int: + val, err = strconv.Atoi(opts.defaultValue) + if err != nil { + return mval.Field(i), err + } + case reflect.String: + val = opts.defaultValue + case reflect.Int64: + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + if err != nil { + return mval.Field(i), err + } + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + if err != nil { + return mval.Field(i), err + } + default: + return mval.Field(i), fmt.Errorf("unsuported field type for default option") + } + mval.Field(i).Set(reflect.ValueOf(val)) + } + + // save the old behavior above and try to check anonymous structs + if !found && opts.defaultValue == "" && mtypef.Anonymous && mtypef.Type.Kind() == reflect.Struct { + v, err := d.valueFromTree(mtypef.Type, tval, nil) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } } } case reflect.Map: @@ -434,11 +639,11 @@ for _, key := range tval.Keys() { // TODO: path splits key val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } - mval.SetMapIndex(reflect.ValueOf(key), mvalf) + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) } } return mval, nil @@ -448,7 +653,7 @@ func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } @@ -461,7 +666,7 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) if err != nil { return mval, err } @@ -470,33 +675,63 @@ return mval, nil } -// Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return d.unwrapPointer(mtype, tval, mval1) } - switch tval.(type) { + switch t := tval.(type) { case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + if isTree(mtype) { - return d.valueFromTree(mtype, tval.(*Tree)) + return d.valueFromTree(mtype, t, mval11) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) case []*Tree: - if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: - if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, tval.([]interface{})) + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: switch mtype.Kind() { case reflect.Bool, reflect.Struct: val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } @@ -512,10 +747,17 @@ return val.Convert(mtype), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -525,10 +767,11 @@ if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if val.Int() < 0 { + + if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -538,7 +781,7 @@ if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -549,8 +792,15 @@ } } -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && mtype.Elem().Kind() == reflect.Struct { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) if err != nil { return reflect.ValueOf(nil), err } @@ -559,16 +809,25 @@ return mval, nil } -func tomlOptions(vf reflect.StructField) tomlOpts { - tag := vf.Tag.Get("toml") +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) parse := strings.Split(tag, ",") var comment string - if c := vf.Tag.Get("comment"); c != "" { + if c := vf.Tag.Get(an.comment); c != "" { comment = c } - commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) - multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) - result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + comment: comment, + commented: commented, + multiline: multiline, + include: true, + omitempty: false, + defaultValue: defaultValue, + } if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/marshal_test.toml --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml Sun Feb 16 18:54:01 2020 +0100 @@ -4,6 +4,7 @@ bool = true date = 1979-05-27T07:32:00Z float = 123.4 + float64 = 123.456782132399 int = 5000 string = "Bite me" uint = 5001 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/parser.go --- a/vendor/github.com/pelletier/go-toml/parser.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/parser.go Sun Feb 16 18:54:01 2020 +0100 @@ -77,8 +77,10 @@ return p.parseAssign case tokenEOF: return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) default: - p.raiseError(tok, "unexpected token") + p.raiseError(tok, "unexpected token %s", tok.typ) } return nil } @@ -165,6 +167,11 @@ key := p.getToken() p.assume(tokenEqual) + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + value := p.parseRvalue() var tableKey []string if len(p.currentTable) > 0 { @@ -173,6 +180,9 @@ tableKey = []string{} } + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + // find the table to assign, looking out for arrays of tables var targetNode *Tree switch node := p.tree.GetPath(tableKey).(type) { @@ -180,17 +190,19 @@ targetNode = node[len(node)-1] case *Tree: targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) default: p.raiseError(key, "Unknown table type for path: %s", strings.Join(tableKey, ".")) } // assign value to the found table - keyVals := []string{key.val} - if len(keyVals) != 1 { - p.raiseError(key, "Invalid key") - } - keyVal := keyVals[0] + keyVal := parsedKey[len(parsedKey)-1] localKey := []string{keyVal} finalKey := append(tableKey, keyVal) if targetNode.GetPath(localKey) != nil { @@ -301,7 +313,41 @@ } return val case tokenDate: - val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + layout := time.RFC3339Nano + if !strings.Contains(tok.val, "T") { + layout = strings.Replace(layout, "T", " ", 1) + } + val, err := time.ParseInLocation(layout, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + v := strings.Replace(tok.val, " ", "T", -1) + isDateTime := false + isTime := false + for _, c := range v { + if c == 'T' || c == 't' { + isDateTime = true + break + } + if c == ':' { + isTime = true + break + } + } + + var val interface{} + var err error + + if isDateTime { + val, err = ParseLocalDateTime(v) + } else if isTime { + val, err = ParseLocalTime(v) + } else { + val, err = ParseLocalDate(v) + } + if err != nil { p.raiseError(tok, "%s", err) } @@ -338,18 +384,21 @@ case tokenRightCurlyBrace: p.getToken() break Loop - case tokenKey: + case tokenKey, tokenInteger, tokenString: if !tokenIsComma(previous) && previous != nil { p.raiseError(follow, "comma expected between fields in inline table") } key := p.getToken() p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + value := p.parseRvalue() - tree.Set(key.val, value) + tree.SetPath(parsedKey, value) case tokenComma: - if previous == nil { - p.raiseError(follow, "inline table cannot start with a comma") - } if tokenIsComma(previous) { p.raiseError(follow, "need field between two commas in inline table") } diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/test.sh --- a/vendor/github.com/pelletier/go-toml/test.sh Wed Sep 18 19:17:42 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,88 +0,0 @@ -#!/bin/bash -# fail out of the script if anything here fails -set -e -set -o pipefail - -# set the path to the present working directory -export GOPATH=`pwd` - -function git_clone() { - path=$1 - branch=$2 - version=$3 - if [ ! -d "src/$path" ]; then - mkdir -p src/$path - git clone https://$path.git src/$path - fi - pushd src/$path - git checkout "$branch" - git reset --hard "$version" - popd -} - -# Remove potential previous runs -rm -rf src test_program_bin toml-test - -go get github.com/pelletier/go-buffruneio -go get github.com/davecgh/go-spew/spew -go get gopkg.in/yaml.v2 -go get github.com/BurntSushi/toml - -# get code for BurntSushi TOML validation -# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) -git_clone github.com/BurntSushi/toml master HEAD -git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD - -# build the BurntSushi test application -go build -o toml-test github.com/BurntSushi/toml-test - -# vendorize the current lib for testing -# NOTE: this basically mocks an install without having to go back out to github for code -mkdir -p src/github.com/pelletier/go-toml/cmd -mkdir -p src/github.com/pelletier/go-toml/query -cp *.go *.toml src/github.com/pelletier/go-toml -cp -R cmd/* src/github.com/pelletier/go-toml/cmd -cp -R query/* src/github.com/pelletier/go-toml/query -go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go - -# Run basic unit tests -go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out -go test github.com/pelletier/go-toml/cmd/tomljson -go test github.com/pelletier/go-toml/query - -# run the entire BurntSushi test suite -if [[ $# -eq 0 ]] ; then - echo "Running all BurntSushi tests" - ./toml-test ./test_program_bin | tee test_out -else - # run a specific test - test=$1 - test_path='src/github.com/BurntSushi/toml-test/tests' - valid_test="$test_path/valid/$test" - invalid_test="$test_path/invalid/$test" - - if [ -e "$valid_test.toml" ]; then - echo "Valid Test TOML for $test:" - echo "====" - cat "$valid_test.toml" - - echo "Valid Test JSON for $test:" - echo "====" - cat "$valid_test.json" - - echo "Go-TOML Output for $test:" - echo "====" - cat "$valid_test.toml" | ./test_program_bin - fi - - if [ -e "$invalid_test.toml" ]; then - echo "Invalid Test TOML for $test:" - echo "====" - cat "$invalid_test.toml" - - echo "Go-TOML Output for $test:" - echo "====" - echo "go-toml Output:" - cat "$invalid_test.toml" | ./test_program_bin - fi -fi diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/token.go --- a/vendor/github.com/pelletier/go-toml/token.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/token.go Sun Feb 16 18:54:01 2020 +0100 @@ -2,7 +2,6 @@ import ( "fmt" - "strconv" "unicode" ) @@ -35,6 +34,7 @@ tokenDoubleLeftBracket tokenDoubleRightBracket tokenDate + tokenLocalDate tokenKeyGroup tokenKeyGroupArray tokenComma @@ -68,7 +68,8 @@ ")", "]]", "[[", - "Date", + "LocalDate", + "LocalDate", "KeyGroup", "KeyGroupArray", ",", @@ -95,14 +96,6 @@ return "Unknown" } -func (t token) Int() int { - if result, err := strconv.Atoi(t.val); err != nil { - panic(err) - } else { - return result - } -} - func (t token) String() string { switch t.typ { case tokenEOF: diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/toml.go --- a/vendor/github.com/pelletier/go-toml/toml.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/toml.go Sun Feb 16 18:54:01 2020 +0100 @@ -27,9 +27,13 @@ } func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { return &Tree{ values: make(map[string]interface{}), - position: Position{}, + position: pos, } } @@ -194,10 +198,10 @@ // formatting instructions to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { + for i, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] if !exists { - nextTree = newTree() + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) subtree.values[intermediateKey] = nextTree // add new element here } switch node := nextTree.(type) { @@ -207,7 +211,7 @@ // go to most recent element if len(node) == 0 { // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) + subtree.values[intermediateKey] = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) } subtree = node[len(node)-1] } @@ -215,19 +219,21 @@ var toInsert interface{} - switch value.(type) { + switch v := value.(type) { case *Tree: - tt := value.(*Tree) - tt.comment = opts.Comment + v.comment = opts.Comment toInsert = value case []*Tree: toInsert = value case *tomlValue: - tt := value.(*tomlValue) - tt.comment = opts.Comment - toInsert = tt + v.comment = opts.Comment + toInsert = v default: - toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} } subtree.values[keys[len(keys)-1]] = toInsert @@ -256,44 +262,35 @@ // SetPathWithComment is the same as SetPath, but allows you to provide comment // information to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err } + return t.DeletePath(keys) +} - var toInsert interface{} - - switch value.(type) { +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { case *Tree: - tt := value.(*Tree) - tt.comment = comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: comment, commented: commented} + delete(node.values, item) + return nil } - - subtree.values[keys[len(keys)-1]] = toInsert + return errors.New("no such key to delete") } // createSubTree takes a tree and a key and create the necessary intermediate @@ -305,10 +302,10 @@ // Returns nil on success, error object on failure func (t *Tree) createSubTree(keys []string, pos Position) error { subtree := t - for _, intermediateKey := range keys { + for i, intermediateKey := range keys { nextTree, exists := subtree.values[intermediateKey] if !exists { - tree := newTree() + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) tree.position = pos subtree.values[intermediateKey] = tree nextTree = tree @@ -337,10 +334,39 @@ err = errors.New(r.(string)) } }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + tree = parseToml(lexToml(b)) return } +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + // LoadReader creates a Tree from any io.Reader. func LoadReader(reader io.Reader) (tree *Tree, err error) { inputBytes, err := ioutil.ReadAll(reader) diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pelletier/go-toml/tomltree_write.go --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go Sun Feb 16 18:54:01 2020 +0100 @@ -5,6 +5,7 @@ "fmt" "io" "math" + "math/big" "reflect" "sort" "strconv" @@ -12,6 +13,18 @@ "time" ) +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + // Encodes a string to a TOML-compliant multi-line string value // This function is a clone of the existing encodeTomlString function, except that whitespace characters // are preserved. Quotation marks and backslashes are also not escaped. @@ -94,12 +107,20 @@ case int64: return strconv.FormatInt(value, 10), nil case float64: - // Ensure a round float does contain a decimal point. Otherwise feeding - // the output back to the parser would convert to an integer. + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil case string: if tv.multiline { return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil @@ -115,6 +136,12 @@ return "false", nil case time.Time: return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil case nil: return "", nil } @@ -153,117 +180,233 @@ return "", fmt.Errorf("unsupported value type %T: %v", v, v) } -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - simpleValuesKeys := make([]string, 0) - complexValuesKeys := make([]string, 0) +func getTreeArrayLine(trees []*Tree) (line int) { + // get lowest line number that is not 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) for k := range t.values { v := t.values[k] switch v.(type) { case *Tree, []*Tree: - complexValuesKeys = append(complexValuesKeys, k) + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) default: - simpleValuesKeys = append(simpleValuesKeys, k) + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) } - sort.Strings(simpleValuesKeys) - sort.Strings(complexValuesKeys) - - for _, k := range simpleValuesKeys { - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc + var commented string + if t.commented { + commented = "# " } - } - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - - for _, k := range complexValuesKeys { - v := t.values[k] + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - var commented string - if t.commented { - commented = "# " - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) + bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) if !ok { return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + + repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) start := "# " if strings.HasPrefix(comment, "#") { start = "" } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") bytesCount += int64(writtenBytesCountComment) if errc != nil { return bytesCount, errc } } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + + var commented string + if v.commented { + commented = "# " + } + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - } } } return bytesCount, nil } +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + func writeStrings(w io.Writer, s ...string) (int, error) { var n int for i := range s { @@ -286,12 +429,11 @@ // Output spans multiple lines, and is suitable for ingest by a TOML parser. // If the conversion cannot be performed, ToString returns a non-nil error. func (t *Tree) ToTomlString() (string, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) + b, err := t.Marshal() if err != nil { return "", err } - return buf.String(), nil + return string(b), nil } // String generates a human-readable representation of the current tree. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/.travis.yml --- a/vendor/github.com/pkg/errors/.travis.yml Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pkg/errors/.travis.yml Sun Feb 16 18:54:01 2020 +0100 @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pkg/errors/Makefile Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/README.md --- a/vendor/github.com/pkg/errors/README.md Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pkg/errors/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -41,11 +41,18 @@ [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/errors.go --- a/vendor/github.com/pkg/errors/errors.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pkg/errors/errors.go Sun Feb 16 18:54:01 2020 +0100 @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/go113.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/pkg/errors/go113.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/pkg/errors/stack.go --- a/vendor/github.com/pkg/errors/stack.go Wed Sep 18 19:17:42 2019 +0200 +++ b/vendor/github.com/pkg/errors/stack.go Sun Feb 16 18:54:01 2020 +0100 @@ -5,10 +5,13 @@ "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,18 +110,32 @@ switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) } } +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + // stack represents a stack of program counters. type stack []uintptr diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/.gitignore --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/.gitignore Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/.travis.yml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/.travis.yml Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.9.x" + - "1.10.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/LICENSE.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/README.md --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/README.md Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,369 @@ +Blackfriday +[![Build Status][BuildSVG]][BuildURL] +[![Godoc][GodocV2SVG]][GodocV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with any modern Go release. With Go and git installed: + + go get -u gopkg.in/russross/blackfriday.v2 + +will download, compile, and install the package into your `$GOPATH` directory +hierarchy. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://godoc.org/gopkg.in/russross/blackfriday.v2. + +It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, +but we highly recommend using package management tool like [dep][7] or +[Glide][8] and make use of semantic versioning. With package management you +should import `github.com/russross/blackfriday` and specify that you're using +version 2.0.0. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://godoc.org/github.com/russross/blackfriday + +### Known issue with `dep` + +There is a known problem with using Blackfriday v1 _transitively_ and `dep`. +Currently `dep` prioritizes semver versions over anything else, and picks the +latest one, plus it does not apply a `[[constraint]]` specifier to transitively +pulled in packages. So if you're using something that uses Blackfriday v1, but +that something does not use `dep` yet, you will get Blackfriday v2 pulled in and +your first dependency will fail to build. + +There are couple of fixes for it, documented here: +https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version + +Meanwhile, `dep` team is working on a more general solution to the constraints +on transitive dependencies problem: https://github.com/golang/dep/issues/1124. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "gopkg.in/russross/blackfriday.v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" + [7]: https://github.com/golang/dep/ "dep" + [8]: https://github.com/Masterminds/glide "Glide" + + [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master + [BuildURL]: https://travis-ci.org/russross/blackfriday + [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg + [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/block.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/block.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,1474 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + if p.flags&EXTENSION_FENCED_CODE != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + *flags |= LIST_ITEM_CONTAINS_BLOCK + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indent : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/doc.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/doc.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/go.mod --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/go.mod Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,1 @@ +module github.com/russross/blackfriday diff -r c040f992052f -r 1c52a0eeb952 vendor/github.com/russross/blackfriday/html.go --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vendor/github.com/russross/blackfriday/html.go Sun Feb 16 18:54:01 2020 +0100 @@ -0,0 +1,938 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded