Add obsolete tokens, resolve conflicts, bump ent
Signed-off-by: m.nabokikh <maksim.nabokikh@flant.com>
This commit is contained in:
parent
24fa4def5b
commit
8553309db3
4
go.mod
4
go.mod
@ -3,12 +3,12 @@ module github.com/dexidp/dex
|
|||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
entgo.io/ent v0.7.0
|
entgo.io/ent v0.8.0
|
||||||
github.com/AppsFlyer/go-sundheit v0.3.1
|
github.com/AppsFlyer/go-sundheit v0.3.1
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/coreos/go-oidc/v3 v3.0.0
|
github.com/coreos/go-oidc/v3 v3.0.0
|
||||||
github.com/dexidp/dex/api/v2 v2.0.0
|
github.com/dexidp/dex/api/v2 v2.0.0
|
||||||
github.com/felixge/httpsnoop v1.0.1
|
github.com/felixge/httpsnoop v1.0.2
|
||||||
github.com/ghodss/yaml v1.0.0
|
github.com/ghodss/yaml v1.0.0
|
||||||
github.com/go-ldap/ldap/v3 v3.3.0
|
github.com/go-ldap/ldap/v3 v3.3.0
|
||||||
github.com/go-sql-driver/mysql v1.6.0
|
github.com/go-sql-driver/mysql v1.6.0
|
||||||
|
17
go.sum
17
go.sum
@ -38,6 +38,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
entgo.io/ent v0.8.0 h1:xirrW//1oda7pp0bz+XssSOv4/C3nmgYQOxjIfljFt8=
|
||||||
|
entgo.io/ent v0.8.0/go.mod h1:KNjsukat/NJi6zJh1utwRadsbGOZsBbAZNDxkW7tMCc=
|
||||||
github.com/AppsFlyer/go-sundheit v0.3.1 h1:Zqnr3wV3WQmXonc234k9XZAoV2KHUHw3osR5k2iHQZE=
|
github.com/AppsFlyer/go-sundheit v0.3.1 h1:Zqnr3wV3WQmXonc234k9XZAoV2KHUHw3osR5k2iHQZE=
|
||||||
github.com/AppsFlyer/go-sundheit v0.3.1/go.mod h1:iZ8zWMS7idcvmqewf5mEymWWgoOiG/0WD4+aeh+heX4=
|
github.com/AppsFlyer/go-sundheit v0.3.1/go.mod h1:iZ8zWMS7idcvmqewf5mEymWWgoOiG/0WD4+aeh+heX4=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
|
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
|
||||||
@ -45,6 +47,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzS
|
|||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||||
|
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||||
@ -128,8 +132,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/facebook/ent v0.5.3 h1:YT3Sl28n7gGGOkQeYgeJsZmizJ1Iiy7psgkOtEk0aq4=
|
|
||||||
github.com/facebook/ent v0.5.3/go.mod h1:tlWP+qCd3x2EeO7B/EqlJQ4dWu/2IeYFhP/szzDKAi8=
|
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
||||||
@ -143,6 +145,7 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
|||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8=
|
github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||||
|
github.com/go-bindata/go-bindata v1.0.1-0.20190711162640-ee3c2418e368/go.mod h1:7xCgX1lzlrXPHkfvn3EhumqHkmSlzt8at9q7v0ax19c=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
@ -154,7 +157,9 @@ github.com/go-ldap/ldap/v3 v3.3.0/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjR
|
|||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
|
github.com/go-sql-driver/mysql v1.5.1-0.20200311113236-681ffa848bae/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
@ -232,6 +237,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
|||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
@ -283,6 +290,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
|
|||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||||
|
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/jonboulle/clockwork v0.2.0 h1:J2SLSdy7HgElq8ekSl2Mxh6vrRNFxqbXGenYH2I02Vs=
|
github.com/jonboulle/clockwork v0.2.0 h1:J2SLSdy7HgElq8ekSl2Mxh6vrRNFxqbXGenYH2I02Vs=
|
||||||
@ -311,6 +319,7 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
|
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lib/pq v1.10.1 h1:6VXZrLU0jHBYyAqrSPa+MgPfnSvTPuMgK+k0o5kVFWo=
|
github.com/lib/pq v1.10.1 h1:6VXZrLU0jHBYyAqrSPa+MgPfnSvTPuMgK+k0o5kVFWo=
|
||||||
github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||||
@ -323,6 +332,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
|||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
|
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
|
github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
|
||||||
github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
@ -336,6 +347,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
|||||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
@ -355,6 +367,7 @@ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
|||||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||||
|
@ -22,6 +22,7 @@ func (d *Database) CreateRefresh(refresh storage.RefreshToken) error {
|
|||||||
SetConnectorID(refresh.ConnectorID).
|
SetConnectorID(refresh.ConnectorID).
|
||||||
SetConnectorData(refresh.ConnectorData).
|
SetConnectorData(refresh.ConnectorData).
|
||||||
SetToken(refresh.Token).
|
SetToken(refresh.Token).
|
||||||
|
SetObsoleteToken(refresh.ObsoleteToken).
|
||||||
// Save utc time into database because ent doesn't support comparing dates with different timezones
|
// Save utc time into database because ent doesn't support comparing dates with different timezones
|
||||||
SetLastUsed(refresh.LastUsed.UTC()).
|
SetLastUsed(refresh.LastUsed.UTC()).
|
||||||
SetCreatedAt(refresh.CreatedAt.UTC()).
|
SetCreatedAt(refresh.CreatedAt.UTC()).
|
||||||
@ -94,6 +95,7 @@ func (d *Database) UpdateRefreshToken(id string, updater func(old storage.Refres
|
|||||||
SetConnectorID(newtToken.ConnectorID).
|
SetConnectorID(newtToken.ConnectorID).
|
||||||
SetConnectorData(newtToken.ConnectorData).
|
SetConnectorData(newtToken.ConnectorData).
|
||||||
SetToken(newtToken.Token).
|
SetToken(newtToken.Token).
|
||||||
|
SetObsoleteToken(newtToken.ObsoleteToken).
|
||||||
// Save utc time into database because ent doesn't support comparing dates with different timezones
|
// Save utc time into database because ent doesn't support comparing dates with different timezones
|
||||||
SetLastUsed(newtToken.LastUsed.UTC()).
|
SetLastUsed(newtToken.LastUsed.UTC()).
|
||||||
SetCreatedAt(newtToken.CreatedAt.UTC()).
|
SetCreatedAt(newtToken.CreatedAt.UTC()).
|
||||||
|
@ -117,6 +117,7 @@ func toStorageRefreshToken(r *db.RefreshToken) storage.RefreshToken {
|
|||||||
return storage.RefreshToken{
|
return storage.RefreshToken{
|
||||||
ID: r.ID,
|
ID: r.ID,
|
||||||
Token: r.Token,
|
Token: r.Token,
|
||||||
|
ObsoleteToken: r.ObsoleteToken,
|
||||||
CreatedAt: r.CreatedAt,
|
CreatedAt: r.CreatedAt,
|
||||||
LastUsed: r.LastUsed,
|
LastUsed: r.LastUsed,
|
||||||
ClientID: r.ClientID,
|
ClientID: r.ClientID,
|
||||||
|
@ -55,13 +55,13 @@ func (*AuthCode) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case authcode.FieldScopes, authcode.FieldClaimsGroups, authcode.FieldConnectorData:
|
case authcode.FieldScopes, authcode.FieldClaimsGroups, authcode.FieldConnectorData:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case authcode.FieldClaimsEmailVerified:
|
case authcode.FieldClaimsEmailVerified:
|
||||||
values[i] = &sql.NullBool{}
|
values[i] = new(sql.NullBool)
|
||||||
case authcode.FieldID, authcode.FieldClientID, authcode.FieldNonce, authcode.FieldRedirectURI, authcode.FieldClaimsUserID, authcode.FieldClaimsUsername, authcode.FieldClaimsEmail, authcode.FieldClaimsPreferredUsername, authcode.FieldConnectorID, authcode.FieldCodeChallenge, authcode.FieldCodeChallengeMethod:
|
case authcode.FieldID, authcode.FieldClientID, authcode.FieldNonce, authcode.FieldRedirectURI, authcode.FieldClaimsUserID, authcode.FieldClaimsUsername, authcode.FieldClaimsEmail, authcode.FieldClaimsPreferredUsername, authcode.FieldConnectorID, authcode.FieldCodeChallenge, authcode.FieldCodeChallengeMethod:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case authcode.FieldExpiry:
|
case authcode.FieldExpiry:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type AuthCode", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type AuthCode", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type AuthCodeQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.AuthCode
|
predicates []predicate.AuthCode
|
||||||
@ -46,6 +47,13 @@ func (acq *AuthCodeQuery) Offset(offset int) *AuthCodeQuery {
|
|||||||
return acq
|
return acq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (acq *AuthCodeQuery) Unique(unique bool) *AuthCodeQuery {
|
||||||
|
acq.unique = &unique
|
||||||
|
return acq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (acq *AuthCodeQuery) Order(o ...OrderFunc) *AuthCodeQuery {
|
func (acq *AuthCodeQuery) Order(o ...OrderFunc) *AuthCodeQuery {
|
||||||
acq.order = append(acq.order, o...)
|
acq.order = append(acq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (acq *AuthCodeQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: acq.sql,
|
From: acq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := acq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := acq.fields; len(fields) > 0 {
|
if fields := acq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, authcode.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, authcode.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (acq *AuthCodeQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := acq.order; len(ps) > 0 {
|
if ps := acq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, authcode.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (acq *AuthCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range acq.order {
|
for _, p := range acq.order {
|
||||||
p(selector, authcode.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := acq.offset; offset != nil {
|
if offset := acq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (acgb *AuthCodeGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(acgb.fields)+len(acgb.fns))
|
columns := make([]string, 0, len(acgb.fields)+len(acgb.fns))
|
||||||
columns = append(columns, acgb.fields...)
|
columns = append(columns, acgb.fields...)
|
||||||
for _, fn := range acgb.fns {
|
for _, fn := range acgb.fns {
|
||||||
columns = append(columns, fn(selector, authcode.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(acgb.fields...)
|
return selector.Select(columns...).GroupBy(acgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -416,6 +416,7 @@ func (acu *AuthCodeUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// AuthCodeUpdateOne is the builder for updating a single AuthCode entity.
|
// AuthCodeUpdateOne is the builder for updating a single AuthCode entity.
|
||||||
type AuthCodeUpdateOne struct {
|
type AuthCodeUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *AuthCodeMutation
|
mutation *AuthCodeMutation
|
||||||
}
|
}
|
||||||
@ -557,6 +558,13 @@ func (acuo *AuthCodeUpdateOne) Mutation() *AuthCodeMutation {
|
|||||||
return acuo.mutation
|
return acuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (acuo *AuthCodeUpdateOne) Select(field string, fields ...string) *AuthCodeUpdateOne {
|
||||||
|
acuo.fields = append([]string{field}, fields...)
|
||||||
|
return acuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated AuthCode entity.
|
// Save executes the query and returns the updated AuthCode entity.
|
||||||
func (acuo *AuthCodeUpdateOne) Save(ctx context.Context) (*AuthCode, error) {
|
func (acuo *AuthCodeUpdateOne) Save(ctx context.Context) (*AuthCode, error) {
|
||||||
var (
|
var (
|
||||||
@ -670,6 +678,18 @@ func (acuo *AuthCodeUpdateOne) sqlSave(ctx context.Context) (_node *AuthCode, er
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing AuthCode.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing AuthCode.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := acuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authcode.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !authcode.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != authcode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := acuo.mutation.predicates; len(ps) > 0 {
|
if ps := acuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -63,13 +63,13 @@ func (*AuthRequest) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case authrequest.FieldScopes, authrequest.FieldResponseTypes, authrequest.FieldClaimsGroups, authrequest.FieldConnectorData:
|
case authrequest.FieldScopes, authrequest.FieldResponseTypes, authrequest.FieldClaimsGroups, authrequest.FieldConnectorData:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case authrequest.FieldForceApprovalPrompt, authrequest.FieldLoggedIn, authrequest.FieldClaimsEmailVerified:
|
case authrequest.FieldForceApprovalPrompt, authrequest.FieldLoggedIn, authrequest.FieldClaimsEmailVerified:
|
||||||
values[i] = &sql.NullBool{}
|
values[i] = new(sql.NullBool)
|
||||||
case authrequest.FieldID, authrequest.FieldClientID, authrequest.FieldRedirectURI, authrequest.FieldNonce, authrequest.FieldState, authrequest.FieldClaimsUserID, authrequest.FieldClaimsUsername, authrequest.FieldClaimsEmail, authrequest.FieldClaimsPreferredUsername, authrequest.FieldConnectorID, authrequest.FieldCodeChallenge, authrequest.FieldCodeChallengeMethod:
|
case authrequest.FieldID, authrequest.FieldClientID, authrequest.FieldRedirectURI, authrequest.FieldNonce, authrequest.FieldState, authrequest.FieldClaimsUserID, authrequest.FieldClaimsUsername, authrequest.FieldClaimsEmail, authrequest.FieldClaimsPreferredUsername, authrequest.FieldConnectorID, authrequest.FieldCodeChallenge, authrequest.FieldCodeChallengeMethod:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case authrequest.FieldExpiry:
|
case authrequest.FieldExpiry:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type AuthRequest", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type AuthRequest", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type AuthRequestQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.AuthRequest
|
predicates []predicate.AuthRequest
|
||||||
@ -46,6 +47,13 @@ func (arq *AuthRequestQuery) Offset(offset int) *AuthRequestQuery {
|
|||||||
return arq
|
return arq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (arq *AuthRequestQuery) Unique(unique bool) *AuthRequestQuery {
|
||||||
|
arq.unique = &unique
|
||||||
|
return arq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (arq *AuthRequestQuery) Order(o ...OrderFunc) *AuthRequestQuery {
|
func (arq *AuthRequestQuery) Order(o ...OrderFunc) *AuthRequestQuery {
|
||||||
arq.order = append(arq.order, o...)
|
arq.order = append(arq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (arq *AuthRequestQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: arq.sql,
|
From: arq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := arq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := arq.fields; len(fields) > 0 {
|
if fields := arq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, authrequest.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, authrequest.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (arq *AuthRequestQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := arq.order; len(ps) > 0 {
|
if ps := arq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, authrequest.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (arq *AuthRequestQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range arq.order {
|
for _, p := range arq.order {
|
||||||
p(selector, authrequest.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := arq.offset; offset != nil {
|
if offset := arq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (argb *AuthRequestGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(argb.fields)+len(argb.fns))
|
columns := make([]string, 0, len(argb.fields)+len(argb.fns))
|
||||||
columns = append(columns, argb.fields...)
|
columns = append(columns, argb.fields...)
|
||||||
for _, fn := range argb.fns {
|
for _, fn := range argb.fns {
|
||||||
columns = append(columns, fn(selector, authrequest.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(argb.fields...)
|
return selector.Select(columns...).GroupBy(argb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -434,6 +434,7 @@ func (aru *AuthRequestUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// AuthRequestUpdateOne is the builder for updating a single AuthRequest entity.
|
// AuthRequestUpdateOne is the builder for updating a single AuthRequest entity.
|
||||||
type AuthRequestUpdateOne struct {
|
type AuthRequestUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *AuthRequestMutation
|
mutation *AuthRequestMutation
|
||||||
}
|
}
|
||||||
@ -605,6 +606,13 @@ func (aruo *AuthRequestUpdateOne) Mutation() *AuthRequestMutation {
|
|||||||
return aruo.mutation
|
return aruo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (aruo *AuthRequestUpdateOne) Select(field string, fields ...string) *AuthRequestUpdateOne {
|
||||||
|
aruo.fields = append([]string{field}, fields...)
|
||||||
|
return aruo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated AuthRequest entity.
|
// Save executes the query and returns the updated AuthRequest entity.
|
||||||
func (aruo *AuthRequestUpdateOne) Save(ctx context.Context) (*AuthRequest, error) {
|
func (aruo *AuthRequestUpdateOne) Save(ctx context.Context) (*AuthRequest, error) {
|
||||||
var (
|
var (
|
||||||
@ -672,6 +680,18 @@ func (aruo *AuthRequestUpdateOne) sqlSave(ctx context.Context) (_node *AuthReque
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing AuthRequest.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing AuthRequest.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := aruo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authrequest.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !authrequest.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != authrequest.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := aruo.mutation.predicates; len(ps) > 0 {
|
if ps := aruo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -250,7 +250,9 @@ func (c *AuthCodeClient) DeleteOneID(id string) *AuthCodeDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for AuthCode.
|
// Query returns a query builder for AuthCode.
|
||||||
func (c *AuthCodeClient) Query() *AuthCodeQuery {
|
func (c *AuthCodeClient) Query() *AuthCodeQuery {
|
||||||
return &AuthCodeQuery{config: c.config}
|
return &AuthCodeQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a AuthCode entity by its id.
|
// Get returns a AuthCode entity by its id.
|
||||||
@ -338,7 +340,9 @@ func (c *AuthRequestClient) DeleteOneID(id string) *AuthRequestDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for AuthRequest.
|
// Query returns a query builder for AuthRequest.
|
||||||
func (c *AuthRequestClient) Query() *AuthRequestQuery {
|
func (c *AuthRequestClient) Query() *AuthRequestQuery {
|
||||||
return &AuthRequestQuery{config: c.config}
|
return &AuthRequestQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a AuthRequest entity by its id.
|
// Get returns a AuthRequest entity by its id.
|
||||||
@ -426,7 +430,9 @@ func (c *ConnectorClient) DeleteOneID(id string) *ConnectorDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for Connector.
|
// Query returns a query builder for Connector.
|
||||||
func (c *ConnectorClient) Query() *ConnectorQuery {
|
func (c *ConnectorClient) Query() *ConnectorQuery {
|
||||||
return &ConnectorQuery{config: c.config}
|
return &ConnectorQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a Connector entity by its id.
|
// Get returns a Connector entity by its id.
|
||||||
@ -514,7 +520,9 @@ func (c *DeviceRequestClient) DeleteOneID(id int) *DeviceRequestDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for DeviceRequest.
|
// Query returns a query builder for DeviceRequest.
|
||||||
func (c *DeviceRequestClient) Query() *DeviceRequestQuery {
|
func (c *DeviceRequestClient) Query() *DeviceRequestQuery {
|
||||||
return &DeviceRequestQuery{config: c.config}
|
return &DeviceRequestQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a DeviceRequest entity by its id.
|
// Get returns a DeviceRequest entity by its id.
|
||||||
@ -602,7 +610,9 @@ func (c *DeviceTokenClient) DeleteOneID(id int) *DeviceTokenDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for DeviceToken.
|
// Query returns a query builder for DeviceToken.
|
||||||
func (c *DeviceTokenClient) Query() *DeviceTokenQuery {
|
func (c *DeviceTokenClient) Query() *DeviceTokenQuery {
|
||||||
return &DeviceTokenQuery{config: c.config}
|
return &DeviceTokenQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a DeviceToken entity by its id.
|
// Get returns a DeviceToken entity by its id.
|
||||||
@ -690,7 +700,9 @@ func (c *KeysClient) DeleteOneID(id string) *KeysDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for Keys.
|
// Query returns a query builder for Keys.
|
||||||
func (c *KeysClient) Query() *KeysQuery {
|
func (c *KeysClient) Query() *KeysQuery {
|
||||||
return &KeysQuery{config: c.config}
|
return &KeysQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a Keys entity by its id.
|
// Get returns a Keys entity by its id.
|
||||||
@ -778,7 +790,9 @@ func (c *OAuth2ClientClient) DeleteOneID(id string) *OAuth2ClientDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for OAuth2Client.
|
// Query returns a query builder for OAuth2Client.
|
||||||
func (c *OAuth2ClientClient) Query() *OAuth2ClientQuery {
|
func (c *OAuth2ClientClient) Query() *OAuth2ClientQuery {
|
||||||
return &OAuth2ClientQuery{config: c.config}
|
return &OAuth2ClientQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a OAuth2Client entity by its id.
|
// Get returns a OAuth2Client entity by its id.
|
||||||
@ -866,7 +880,9 @@ func (c *OfflineSessionClient) DeleteOneID(id string) *OfflineSessionDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for OfflineSession.
|
// Query returns a query builder for OfflineSession.
|
||||||
func (c *OfflineSessionClient) Query() *OfflineSessionQuery {
|
func (c *OfflineSessionClient) Query() *OfflineSessionQuery {
|
||||||
return &OfflineSessionQuery{config: c.config}
|
return &OfflineSessionQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a OfflineSession entity by its id.
|
// Get returns a OfflineSession entity by its id.
|
||||||
@ -954,7 +970,9 @@ func (c *PasswordClient) DeleteOneID(id int) *PasswordDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for Password.
|
// Query returns a query builder for Password.
|
||||||
func (c *PasswordClient) Query() *PasswordQuery {
|
func (c *PasswordClient) Query() *PasswordQuery {
|
||||||
return &PasswordQuery{config: c.config}
|
return &PasswordQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a Password entity by its id.
|
// Get returns a Password entity by its id.
|
||||||
@ -1042,7 +1060,9 @@ func (c *RefreshTokenClient) DeleteOneID(id string) *RefreshTokenDeleteOne {
|
|||||||
|
|
||||||
// Query returns a query builder for RefreshToken.
|
// Query returns a query builder for RefreshToken.
|
||||||
func (c *RefreshTokenClient) Query() *RefreshTokenQuery {
|
func (c *RefreshTokenClient) Query() *RefreshTokenQuery {
|
||||||
return &RefreshTokenQuery{config: c.config}
|
return &RefreshTokenQuery{
|
||||||
|
config: c.config,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a RefreshToken entity by its id.
|
// Get returns a RefreshToken entity by its id.
|
||||||
|
@ -31,9 +31,9 @@ func (*Connector) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case connector.FieldConfig:
|
case connector.FieldConfig:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case connector.FieldID, connector.FieldType, connector.FieldName, connector.FieldResourceVersion:
|
case connector.FieldID, connector.FieldType, connector.FieldName, connector.FieldResourceVersion:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type Connector", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type Connector", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type ConnectorQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.Connector
|
predicates []predicate.Connector
|
||||||
@ -46,6 +47,13 @@ func (cq *ConnectorQuery) Offset(offset int) *ConnectorQuery {
|
|||||||
return cq
|
return cq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (cq *ConnectorQuery) Unique(unique bool) *ConnectorQuery {
|
||||||
|
cq.unique = &unique
|
||||||
|
return cq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (cq *ConnectorQuery) Order(o ...OrderFunc) *ConnectorQuery {
|
func (cq *ConnectorQuery) Order(o ...OrderFunc) *ConnectorQuery {
|
||||||
cq.order = append(cq.order, o...)
|
cq.order = append(cq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (cq *ConnectorQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: cq.sql,
|
From: cq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := cq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := cq.fields; len(fields) > 0 {
|
if fields := cq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, connector.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, connector.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (cq *ConnectorQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := cq.order; len(ps) > 0 {
|
if ps := cq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, connector.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (cq *ConnectorQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range cq.order {
|
for _, p := range cq.order {
|
||||||
p(selector, connector.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := cq.offset; offset != nil {
|
if offset := cq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (cgb *ConnectorGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(cgb.fields)+len(cgb.fns))
|
columns := make([]string, 0, len(cgb.fields)+len(cgb.fns))
|
||||||
columns = append(columns, cgb.fields...)
|
columns = append(columns, cgb.fields...)
|
||||||
for _, fn := range cgb.fns {
|
for _, fn := range cgb.fns {
|
||||||
columns = append(columns, fn(selector, connector.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(cgb.fields...)
|
return selector.Select(columns...).GroupBy(cgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -187,6 +187,7 @@ func (cu *ConnectorUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// ConnectorUpdateOne is the builder for updating a single Connector entity.
|
// ConnectorUpdateOne is the builder for updating a single Connector entity.
|
||||||
type ConnectorUpdateOne struct {
|
type ConnectorUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *ConnectorMutation
|
mutation *ConnectorMutation
|
||||||
}
|
}
|
||||||
@ -220,6 +221,13 @@ func (cuo *ConnectorUpdateOne) Mutation() *ConnectorMutation {
|
|||||||
return cuo.mutation
|
return cuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (cuo *ConnectorUpdateOne) Select(field string, fields ...string) *ConnectorUpdateOne {
|
||||||
|
cuo.fields = append([]string{field}, fields...)
|
||||||
|
return cuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated Connector entity.
|
// Save executes the query and returns the updated Connector entity.
|
||||||
func (cuo *ConnectorUpdateOne) Save(ctx context.Context) (*Connector, error) {
|
func (cuo *ConnectorUpdateOne) Save(ctx context.Context) (*Connector, error) {
|
||||||
var (
|
var (
|
||||||
@ -308,6 +316,18 @@ func (cuo *ConnectorUpdateOne) sqlSave(ctx context.Context) (_node *Connector, e
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Connector.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Connector.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := cuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, connector.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !connector.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != connector.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := cuo.mutation.predicates; len(ps) > 0 {
|
if ps := cuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -37,13 +37,13 @@ func (*DeviceRequest) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case devicerequest.FieldScopes:
|
case devicerequest.FieldScopes:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case devicerequest.FieldID:
|
case devicerequest.FieldID:
|
||||||
values[i] = &sql.NullInt64{}
|
values[i] = new(sql.NullInt64)
|
||||||
case devicerequest.FieldUserCode, devicerequest.FieldDeviceCode, devicerequest.FieldClientID, devicerequest.FieldClientSecret:
|
case devicerequest.FieldUserCode, devicerequest.FieldDeviceCode, devicerequest.FieldClientID, devicerequest.FieldClientSecret:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case devicerequest.FieldExpiry:
|
case devicerequest.FieldExpiry:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type DeviceRequest", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type DeviceRequest", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type DeviceRequestQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.DeviceRequest
|
predicates []predicate.DeviceRequest
|
||||||
@ -46,6 +47,13 @@ func (drq *DeviceRequestQuery) Offset(offset int) *DeviceRequestQuery {
|
|||||||
return drq
|
return drq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (drq *DeviceRequestQuery) Unique(unique bool) *DeviceRequestQuery {
|
||||||
|
drq.unique = &unique
|
||||||
|
return drq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (drq *DeviceRequestQuery) Order(o ...OrderFunc) *DeviceRequestQuery {
|
func (drq *DeviceRequestQuery) Order(o ...OrderFunc) *DeviceRequestQuery {
|
||||||
drq.order = append(drq.order, o...)
|
drq.order = append(drq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (drq *DeviceRequestQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: drq.sql,
|
From: drq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := drq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := drq.fields; len(fields) > 0 {
|
if fields := drq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, devicerequest.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, devicerequest.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (drq *DeviceRequestQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := drq.order; len(ps) > 0 {
|
if ps := drq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, devicerequest.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (drq *DeviceRequestQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range drq.order {
|
for _, p := range drq.order {
|
||||||
p(selector, devicerequest.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := drq.offset; offset != nil {
|
if offset := drq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (drgb *DeviceRequestGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(drgb.fields)+len(drgb.fns))
|
columns := make([]string, 0, len(drgb.fields)+len(drgb.fns))
|
||||||
columns = append(columns, drgb.fields...)
|
columns = append(columns, drgb.fields...)
|
||||||
for _, fn := range drgb.fns {
|
for _, fn := range drgb.fns {
|
||||||
columns = append(columns, fn(selector, devicerequest.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(drgb.fields...)
|
return selector.Select(columns...).GroupBy(drgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -236,6 +236,7 @@ func (dru *DeviceRequestUpdate) sqlSave(ctx context.Context) (n int, err error)
|
|||||||
// DeviceRequestUpdateOne is the builder for updating a single DeviceRequest entity.
|
// DeviceRequestUpdateOne is the builder for updating a single DeviceRequest entity.
|
||||||
type DeviceRequestUpdateOne struct {
|
type DeviceRequestUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *DeviceRequestMutation
|
mutation *DeviceRequestMutation
|
||||||
}
|
}
|
||||||
@ -287,6 +288,13 @@ func (druo *DeviceRequestUpdateOne) Mutation() *DeviceRequestMutation {
|
|||||||
return druo.mutation
|
return druo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (druo *DeviceRequestUpdateOne) Select(field string, fields ...string) *DeviceRequestUpdateOne {
|
||||||
|
druo.fields = append([]string{field}, fields...)
|
||||||
|
return druo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated DeviceRequest entity.
|
// Save executes the query and returns the updated DeviceRequest entity.
|
||||||
func (druo *DeviceRequestUpdateOne) Save(ctx context.Context) (*DeviceRequest, error) {
|
func (druo *DeviceRequestUpdateOne) Save(ctx context.Context) (*DeviceRequest, error) {
|
||||||
var (
|
var (
|
||||||
@ -385,6 +393,18 @@ func (druo *DeviceRequestUpdateOne) sqlSave(ctx context.Context) (_node *DeviceR
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing DeviceRequest.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing DeviceRequest.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := druo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, devicerequest.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !devicerequest.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != devicerequest.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := druo.mutation.predicates; len(ps) > 0 {
|
if ps := druo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -36,13 +36,13 @@ func (*DeviceToken) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case devicetoken.FieldToken:
|
case devicetoken.FieldToken:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case devicetoken.FieldID, devicetoken.FieldPollInterval:
|
case devicetoken.FieldID, devicetoken.FieldPollInterval:
|
||||||
values[i] = &sql.NullInt64{}
|
values[i] = new(sql.NullInt64)
|
||||||
case devicetoken.FieldDeviceCode, devicetoken.FieldStatus:
|
case devicetoken.FieldDeviceCode, devicetoken.FieldStatus:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case devicetoken.FieldExpiry, devicetoken.FieldLastRequest:
|
case devicetoken.FieldExpiry, devicetoken.FieldLastRequest:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type DeviceToken", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type DeviceToken", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type DeviceTokenQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.DeviceToken
|
predicates []predicate.DeviceToken
|
||||||
@ -46,6 +47,13 @@ func (dtq *DeviceTokenQuery) Offset(offset int) *DeviceTokenQuery {
|
|||||||
return dtq
|
return dtq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (dtq *DeviceTokenQuery) Unique(unique bool) *DeviceTokenQuery {
|
||||||
|
dtq.unique = &unique
|
||||||
|
return dtq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (dtq *DeviceTokenQuery) Order(o ...OrderFunc) *DeviceTokenQuery {
|
func (dtq *DeviceTokenQuery) Order(o ...OrderFunc) *DeviceTokenQuery {
|
||||||
dtq.order = append(dtq.order, o...)
|
dtq.order = append(dtq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (dtq *DeviceTokenQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: dtq.sql,
|
From: dtq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := dtq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := dtq.fields; len(fields) > 0 {
|
if fields := dtq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, devicetoken.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, devicetoken.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (dtq *DeviceTokenQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := dtq.order; len(ps) > 0 {
|
if ps := dtq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, devicetoken.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (dtq *DeviceTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range dtq.order {
|
for _, p := range dtq.order {
|
||||||
p(selector, devicetoken.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := dtq.offset; offset != nil {
|
if offset := dtq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (dtgb *DeviceTokenGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns))
|
columns := make([]string, 0, len(dtgb.fields)+len(dtgb.fns))
|
||||||
columns = append(columns, dtgb.fields...)
|
columns = append(columns, dtgb.fields...)
|
||||||
for _, fn := range dtgb.fns {
|
for _, fn := range dtgb.fns {
|
||||||
columns = append(columns, fn(selector, devicetoken.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(dtgb.fields...)
|
return selector.Select(columns...).GroupBy(dtgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -240,6 +240,7 @@ func (dtu *DeviceTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// DeviceTokenUpdateOne is the builder for updating a single DeviceToken entity.
|
// DeviceTokenUpdateOne is the builder for updating a single DeviceToken entity.
|
||||||
type DeviceTokenUpdateOne struct {
|
type DeviceTokenUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *DeviceTokenMutation
|
mutation *DeviceTokenMutation
|
||||||
}
|
}
|
||||||
@ -298,6 +299,13 @@ func (dtuo *DeviceTokenUpdateOne) Mutation() *DeviceTokenMutation {
|
|||||||
return dtuo.mutation
|
return dtuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (dtuo *DeviceTokenUpdateOne) Select(field string, fields ...string) *DeviceTokenUpdateOne {
|
||||||
|
dtuo.fields = append([]string{field}, fields...)
|
||||||
|
return dtuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated DeviceToken entity.
|
// Save executes the query and returns the updated DeviceToken entity.
|
||||||
func (dtuo *DeviceTokenUpdateOne) Save(ctx context.Context) (*DeviceToken, error) {
|
func (dtuo *DeviceTokenUpdateOne) Save(ctx context.Context) (*DeviceToken, error) {
|
||||||
var (
|
var (
|
||||||
@ -386,6 +394,18 @@ func (dtuo *DeviceTokenUpdateOne) sqlSave(ctx context.Context) (_node *DeviceTok
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing DeviceToken.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing DeviceToken.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := dtuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, devicetoken.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !devicetoken.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != devicetoken.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := dtuo.mutation.predicates; len(ps) > 0 {
|
if ps := dtuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -10,6 +10,16 @@ import (
|
|||||||
"entgo.io/ent/dialect"
|
"entgo.io/ent/dialect"
|
||||||
"entgo.io/ent/dialect/sql"
|
"entgo.io/ent/dialect/sql"
|
||||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/authcode"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/authrequest"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/connector"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/devicerequest"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/devicetoken"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/keys"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/oauth2client"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/offlinesession"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/password"
|
||||||
|
"github.com/dexidp/dex/storage/ent/db/refreshtoken"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ent aliases to avoid import conflicts in user's code.
|
// ent aliases to avoid import conflicts in user's code.
|
||||||
@ -25,36 +35,64 @@ type (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// OrderFunc applies an ordering on the sql selector.
|
// OrderFunc applies an ordering on the sql selector.
|
||||||
type OrderFunc func(*sql.Selector, func(string) bool)
|
type OrderFunc func(*sql.Selector)
|
||||||
|
|
||||||
|
// columnChecker returns a function indicates if the column exists in the given column.
|
||||||
|
func columnChecker(table string) func(string) error {
|
||||||
|
checks := map[string]func(string) bool{
|
||||||
|
authcode.Table: authcode.ValidColumn,
|
||||||
|
authrequest.Table: authrequest.ValidColumn,
|
||||||
|
connector.Table: connector.ValidColumn,
|
||||||
|
devicerequest.Table: devicerequest.ValidColumn,
|
||||||
|
devicetoken.Table: devicetoken.ValidColumn,
|
||||||
|
keys.Table: keys.ValidColumn,
|
||||||
|
oauth2client.Table: oauth2client.ValidColumn,
|
||||||
|
offlinesession.Table: offlinesession.ValidColumn,
|
||||||
|
password.Table: password.ValidColumn,
|
||||||
|
refreshtoken.Table: refreshtoken.ValidColumn,
|
||||||
|
}
|
||||||
|
check, ok := checks[table]
|
||||||
|
if !ok {
|
||||||
|
return func(string) error {
|
||||||
|
return fmt.Errorf("unknown table %q", table)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(column string) error {
|
||||||
|
if !check(column) {
|
||||||
|
return fmt.Errorf("unknown column %q for table %q", column, table)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Asc applies the given fields in ASC order.
|
// Asc applies the given fields in ASC order.
|
||||||
func Asc(fields ...string) OrderFunc {
|
func Asc(fields ...string) OrderFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) {
|
return func(s *sql.Selector) {
|
||||||
|
check := columnChecker(s.TableName())
|
||||||
for _, f := range fields {
|
for _, f := range fields {
|
||||||
if check(f) {
|
if err := check(f); err != nil {
|
||||||
s.OrderBy(sql.Asc(f))
|
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)})
|
||||||
} else {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
|
|
||||||
}
|
}
|
||||||
|
s.OrderBy(sql.Asc(s.C(f)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Desc applies the given fields in DESC order.
|
// Desc applies the given fields in DESC order.
|
||||||
func Desc(fields ...string) OrderFunc {
|
func Desc(fields ...string) OrderFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) {
|
return func(s *sql.Selector) {
|
||||||
|
check := columnChecker(s.TableName())
|
||||||
for _, f := range fields {
|
for _, f := range fields {
|
||||||
if check(f) {
|
if err := check(f); err != nil {
|
||||||
s.OrderBy(sql.Desc(f))
|
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)})
|
||||||
} else {
|
|
||||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("invalid field %q for ordering", f)})
|
|
||||||
}
|
}
|
||||||
|
s.OrderBy(sql.Desc(s.C(f)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
||||||
type AggregateFunc func(*sql.Selector, func(string) bool) string
|
type AggregateFunc func(*sql.Selector) string
|
||||||
|
|
||||||
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
||||||
//
|
//
|
||||||
@ -63,23 +101,24 @@ type AggregateFunc func(*sql.Selector, func(string) bool) string
|
|||||||
// Scan(ctx, &v)
|
// Scan(ctx, &v)
|
||||||
//
|
//
|
||||||
func As(fn AggregateFunc, end string) AggregateFunc {
|
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
return sql.As(fn(s, check), end)
|
return sql.As(fn(s), end)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count applies the "count" aggregation function on each group.
|
// Count applies the "count" aggregation function on each group.
|
||||||
func Count() AggregateFunc {
|
func Count() AggregateFunc {
|
||||||
return func(s *sql.Selector, _ func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
return sql.Count("*")
|
return sql.Count("*")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max applies the "max" aggregation function on the given field of each group.
|
// Max applies the "max" aggregation function on the given field of each group.
|
||||||
func Max(field string) AggregateFunc {
|
func Max(field string) AggregateFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
if !check(field) {
|
check := columnChecker(s.TableName())
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
if err := check(field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)})
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return sql.Max(s.C(field))
|
return sql.Max(s.C(field))
|
||||||
@ -88,9 +127,10 @@ func Max(field string) AggregateFunc {
|
|||||||
|
|
||||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||||
func Mean(field string) AggregateFunc {
|
func Mean(field string) AggregateFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
if !check(field) {
|
check := columnChecker(s.TableName())
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
if err := check(field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)})
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return sql.Avg(s.C(field))
|
return sql.Avg(s.C(field))
|
||||||
@ -99,9 +139,10 @@ func Mean(field string) AggregateFunc {
|
|||||||
|
|
||||||
// Min applies the "min" aggregation function on the given field of each group.
|
// Min applies the "min" aggregation function on the given field of each group.
|
||||||
func Min(field string) AggregateFunc {
|
func Min(field string) AggregateFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
if !check(field) {
|
check := columnChecker(s.TableName())
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
if err := check(field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)})
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return sql.Min(s.C(field))
|
return sql.Min(s.C(field))
|
||||||
@ -110,9 +151,10 @@ func Min(field string) AggregateFunc {
|
|||||||
|
|
||||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||||
func Sum(field string) AggregateFunc {
|
func Sum(field string) AggregateFunc {
|
||||||
return func(s *sql.Selector, check func(string) bool) string {
|
return func(s *sql.Selector) string {
|
||||||
if !check(field) {
|
check := columnChecker(s.TableName())
|
||||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("invalid field %q for grouping", field)})
|
if err := check(field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)})
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return sql.Sum(s.C(field))
|
return sql.Sum(s.C(field))
|
||||||
|
@ -35,11 +35,11 @@ func (*Keys) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case keys.FieldVerificationKeys, keys.FieldSigningKey, keys.FieldSigningKeyPub:
|
case keys.FieldVerificationKeys, keys.FieldSigningKey, keys.FieldSigningKeyPub:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case keys.FieldID:
|
case keys.FieldID:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case keys.FieldNextRotation:
|
case keys.FieldNextRotation:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type Keys", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type Keys", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type KeysQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.Keys
|
predicates []predicate.Keys
|
||||||
@ -46,6 +47,13 @@ func (kq *KeysQuery) Offset(offset int) *KeysQuery {
|
|||||||
return kq
|
return kq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (kq *KeysQuery) Unique(unique bool) *KeysQuery {
|
||||||
|
kq.unique = &unique
|
||||||
|
return kq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (kq *KeysQuery) Order(o ...OrderFunc) *KeysQuery {
|
func (kq *KeysQuery) Order(o ...OrderFunc) *KeysQuery {
|
||||||
kq.order = append(kq.order, o...)
|
kq.order = append(kq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (kq *KeysQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: kq.sql,
|
From: kq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := kq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := kq.fields; len(fields) > 0 {
|
if fields := kq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, keys.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, keys.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (kq *KeysQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := kq.order; len(ps) > 0 {
|
if ps := kq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, keys.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (kq *KeysQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range kq.order {
|
for _, p := range kq.order {
|
||||||
p(selector, keys.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := kq.offset; offset != nil {
|
if offset := kq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (kgb *KeysGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(kgb.fields)+len(kgb.fns))
|
columns := make([]string, 0, len(kgb.fields)+len(kgb.fns))
|
||||||
columns = append(columns, kgb.fields...)
|
columns = append(columns, kgb.fields...)
|
||||||
for _, fn := range kgb.fns {
|
for _, fn := range kgb.fns {
|
||||||
columns = append(columns, fn(selector, keys.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(kgb.fields...)
|
return selector.Select(columns...).GroupBy(kgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -169,6 +169,7 @@ func (ku *KeysUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// KeysUpdateOne is the builder for updating a single Keys entity.
|
// KeysUpdateOne is the builder for updating a single Keys entity.
|
||||||
type KeysUpdateOne struct {
|
type KeysUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *KeysMutation
|
mutation *KeysMutation
|
||||||
}
|
}
|
||||||
@ -202,6 +203,13 @@ func (kuo *KeysUpdateOne) Mutation() *KeysMutation {
|
|||||||
return kuo.mutation
|
return kuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (kuo *KeysUpdateOne) Select(field string, fields ...string) *KeysUpdateOne {
|
||||||
|
kuo.fields = append([]string{field}, fields...)
|
||||||
|
return kuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated Keys entity.
|
// Save executes the query and returns the updated Keys entity.
|
||||||
func (kuo *KeysUpdateOne) Save(ctx context.Context) (*Keys, error) {
|
func (kuo *KeysUpdateOne) Save(ctx context.Context) (*Keys, error) {
|
||||||
var (
|
var (
|
||||||
@ -269,6 +277,18 @@ func (kuo *KeysUpdateOne) sqlSave(ctx context.Context) (_node *Keys, err error)
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Keys.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Keys.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := kuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, keys.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !keys.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != keys.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := kuo.mutation.predicates; len(ps) > 0 {
|
if ps := kuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -190,6 +190,7 @@ var (
|
|||||||
{Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"sqlite3": "text"}},
|
{Name: "connector_id", Type: field.TypeString, Size: 2147483647, SchemaType: map[string]string{"sqlite3": "text"}},
|
||||||
{Name: "connector_data", Type: field.TypeBytes, Nullable: true},
|
{Name: "connector_data", Type: field.TypeBytes, Nullable: true},
|
||||||
{Name: "token", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"sqlite3": "text"}},
|
{Name: "token", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"sqlite3": "text"}},
|
||||||
|
{Name: "obsolete_token", Type: field.TypeString, Size: 2147483647, Default: "", SchemaType: map[string]string{"sqlite3": "text"}},
|
||||||
{Name: "created_at", Type: field.TypeTime},
|
{Name: "created_at", Type: field.TypeTime},
|
||||||
{Name: "last_used", Type: field.TypeTime},
|
{Name: "last_used", Type: field.TypeTime},
|
||||||
}
|
}
|
||||||
|
@ -6151,6 +6151,7 @@ type RefreshTokenMutation struct {
|
|||||||
connector_id *string
|
connector_id *string
|
||||||
connector_data *[]byte
|
connector_data *[]byte
|
||||||
token *string
|
token *string
|
||||||
|
obsolete_token *string
|
||||||
created_at *time.Time
|
created_at *time.Time
|
||||||
last_used *time.Time
|
last_used *time.Time
|
||||||
clearedFields map[string]struct{}
|
clearedFields map[string]struct{}
|
||||||
@ -6715,6 +6716,42 @@ func (m *RefreshTokenMutation) ResetToken() {
|
|||||||
m.token = nil
|
m.token = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetObsoleteToken sets the "obsolete_token" field.
|
||||||
|
func (m *RefreshTokenMutation) SetObsoleteToken(s string) {
|
||||||
|
m.obsolete_token = &s
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteToken returns the value of the "obsolete_token" field in the mutation.
|
||||||
|
func (m *RefreshTokenMutation) ObsoleteToken() (r string, exists bool) {
|
||||||
|
v := m.obsolete_token
|
||||||
|
if v == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return *v, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// OldObsoleteToken returns the old "obsolete_token" field's value of the RefreshToken entity.
|
||||||
|
// If the RefreshToken object wasn't provided to the builder, the object is fetched from the database.
|
||||||
|
// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
|
||||||
|
func (m *RefreshTokenMutation) OldObsoleteToken(ctx context.Context) (v string, err error) {
|
||||||
|
if !m.op.Is(OpUpdateOne) {
|
||||||
|
return v, fmt.Errorf("OldObsoleteToken is only allowed on UpdateOne operations")
|
||||||
|
}
|
||||||
|
if m.id == nil || m.oldValue == nil {
|
||||||
|
return v, fmt.Errorf("OldObsoleteToken requires an ID field in the mutation")
|
||||||
|
}
|
||||||
|
oldValue, err := m.oldValue(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return v, fmt.Errorf("querying old value for OldObsoleteToken: %w", err)
|
||||||
|
}
|
||||||
|
return oldValue.ObsoleteToken, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetObsoleteToken resets all changes to the "obsolete_token" field.
|
||||||
|
func (m *RefreshTokenMutation) ResetObsoleteToken() {
|
||||||
|
m.obsolete_token = nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
// SetCreatedAt sets the "created_at" field.
|
||||||
func (m *RefreshTokenMutation) SetCreatedAt(t time.Time) {
|
func (m *RefreshTokenMutation) SetCreatedAt(t time.Time) {
|
||||||
m.created_at = &t
|
m.created_at = &t
|
||||||
@ -6801,7 +6838,7 @@ func (m *RefreshTokenMutation) Type() string {
|
|||||||
// order to get all numeric fields that were incremented/decremented, call
|
// order to get all numeric fields that were incremented/decremented, call
|
||||||
// AddedFields().
|
// AddedFields().
|
||||||
func (m *RefreshTokenMutation) Fields() []string {
|
func (m *RefreshTokenMutation) Fields() []string {
|
||||||
fields := make([]string, 0, 14)
|
fields := make([]string, 0, 15)
|
||||||
if m.client_id != nil {
|
if m.client_id != nil {
|
||||||
fields = append(fields, refreshtoken.FieldClientID)
|
fields = append(fields, refreshtoken.FieldClientID)
|
||||||
}
|
}
|
||||||
@ -6838,6 +6875,9 @@ func (m *RefreshTokenMutation) Fields() []string {
|
|||||||
if m.token != nil {
|
if m.token != nil {
|
||||||
fields = append(fields, refreshtoken.FieldToken)
|
fields = append(fields, refreshtoken.FieldToken)
|
||||||
}
|
}
|
||||||
|
if m.obsolete_token != nil {
|
||||||
|
fields = append(fields, refreshtoken.FieldObsoleteToken)
|
||||||
|
}
|
||||||
if m.created_at != nil {
|
if m.created_at != nil {
|
||||||
fields = append(fields, refreshtoken.FieldCreatedAt)
|
fields = append(fields, refreshtoken.FieldCreatedAt)
|
||||||
}
|
}
|
||||||
@ -6876,6 +6916,8 @@ func (m *RefreshTokenMutation) Field(name string) (ent.Value, bool) {
|
|||||||
return m.ConnectorData()
|
return m.ConnectorData()
|
||||||
case refreshtoken.FieldToken:
|
case refreshtoken.FieldToken:
|
||||||
return m.Token()
|
return m.Token()
|
||||||
|
case refreshtoken.FieldObsoleteToken:
|
||||||
|
return m.ObsoleteToken()
|
||||||
case refreshtoken.FieldCreatedAt:
|
case refreshtoken.FieldCreatedAt:
|
||||||
return m.CreatedAt()
|
return m.CreatedAt()
|
||||||
case refreshtoken.FieldLastUsed:
|
case refreshtoken.FieldLastUsed:
|
||||||
@ -6913,6 +6955,8 @@ func (m *RefreshTokenMutation) OldField(ctx context.Context, name string) (ent.V
|
|||||||
return m.OldConnectorData(ctx)
|
return m.OldConnectorData(ctx)
|
||||||
case refreshtoken.FieldToken:
|
case refreshtoken.FieldToken:
|
||||||
return m.OldToken(ctx)
|
return m.OldToken(ctx)
|
||||||
|
case refreshtoken.FieldObsoleteToken:
|
||||||
|
return m.OldObsoleteToken(ctx)
|
||||||
case refreshtoken.FieldCreatedAt:
|
case refreshtoken.FieldCreatedAt:
|
||||||
return m.OldCreatedAt(ctx)
|
return m.OldCreatedAt(ctx)
|
||||||
case refreshtoken.FieldLastUsed:
|
case refreshtoken.FieldLastUsed:
|
||||||
@ -7010,6 +7054,13 @@ func (m *RefreshTokenMutation) SetField(name string, value ent.Value) error {
|
|||||||
}
|
}
|
||||||
m.SetToken(v)
|
m.SetToken(v)
|
||||||
return nil
|
return nil
|
||||||
|
case refreshtoken.FieldObsoleteToken:
|
||||||
|
v, ok := value.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field %s", value, name)
|
||||||
|
}
|
||||||
|
m.SetObsoleteToken(v)
|
||||||
|
return nil
|
||||||
case refreshtoken.FieldCreatedAt:
|
case refreshtoken.FieldCreatedAt:
|
||||||
v, ok := value.(time.Time)
|
v, ok := value.(time.Time)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -7130,6 +7181,9 @@ func (m *RefreshTokenMutation) ResetField(name string) error {
|
|||||||
case refreshtoken.FieldToken:
|
case refreshtoken.FieldToken:
|
||||||
m.ResetToken()
|
m.ResetToken()
|
||||||
return nil
|
return nil
|
||||||
|
case refreshtoken.FieldObsoleteToken:
|
||||||
|
m.ResetObsoleteToken()
|
||||||
|
return nil
|
||||||
case refreshtoken.FieldCreatedAt:
|
case refreshtoken.FieldCreatedAt:
|
||||||
m.ResetCreatedAt()
|
m.ResetCreatedAt()
|
||||||
return nil
|
return nil
|
||||||
|
@ -36,11 +36,11 @@ func (*OAuth2Client) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case oauth2client.FieldRedirectUris, oauth2client.FieldTrustedPeers:
|
case oauth2client.FieldRedirectUris, oauth2client.FieldTrustedPeers:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case oauth2client.FieldPublic:
|
case oauth2client.FieldPublic:
|
||||||
values[i] = &sql.NullBool{}
|
values[i] = new(sql.NullBool)
|
||||||
case oauth2client.FieldID, oauth2client.FieldSecret, oauth2client.FieldName, oauth2client.FieldLogoURL:
|
case oauth2client.FieldID, oauth2client.FieldSecret, oauth2client.FieldName, oauth2client.FieldLogoURL:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type OAuth2Client", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type OAuth2Client", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type OAuth2ClientQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.OAuth2Client
|
predicates []predicate.OAuth2Client
|
||||||
@ -46,6 +47,13 @@ func (oq *OAuth2ClientQuery) Offset(offset int) *OAuth2ClientQuery {
|
|||||||
return oq
|
return oq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (oq *OAuth2ClientQuery) Unique(unique bool) *OAuth2ClientQuery {
|
||||||
|
oq.unique = &unique
|
||||||
|
return oq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (oq *OAuth2ClientQuery) Order(o ...OrderFunc) *OAuth2ClientQuery {
|
func (oq *OAuth2ClientQuery) Order(o ...OrderFunc) *OAuth2ClientQuery {
|
||||||
oq.order = append(oq.order, o...)
|
oq.order = append(oq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (oq *OAuth2ClientQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: oq.sql,
|
From: oq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := oq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := oq.fields; len(fields) > 0 {
|
if fields := oq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, oauth2client.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, oauth2client.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (oq *OAuth2ClientQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := oq.order; len(ps) > 0 {
|
if ps := oq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, oauth2client.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (oq *OAuth2ClientQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range oq.order {
|
for _, p := range oq.order {
|
||||||
p(selector, oauth2client.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := oq.offset; offset != nil {
|
if offset := oq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (ogb *OAuth2ClientGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(ogb.fields)+len(ogb.fns))
|
columns := make([]string, 0, len(ogb.fields)+len(ogb.fns))
|
||||||
columns = append(columns, ogb.fields...)
|
columns = append(columns, ogb.fields...)
|
||||||
for _, fn := range ogb.fns {
|
for _, fn := range ogb.fns {
|
||||||
columns = append(columns, fn(selector, oauth2client.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(ogb.fields...)
|
return selector.Select(columns...).GroupBy(ogb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -242,6 +242,7 @@ func (ou *OAuth2ClientUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// OAuth2ClientUpdateOne is the builder for updating a single OAuth2Client entity.
|
// OAuth2ClientUpdateOne is the builder for updating a single OAuth2Client entity.
|
||||||
type OAuth2ClientUpdateOne struct {
|
type OAuth2ClientUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *OAuth2ClientMutation
|
mutation *OAuth2ClientMutation
|
||||||
}
|
}
|
||||||
@ -299,6 +300,13 @@ func (ouo *OAuth2ClientUpdateOne) Mutation() *OAuth2ClientMutation {
|
|||||||
return ouo.mutation
|
return ouo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (ouo *OAuth2ClientUpdateOne) Select(field string, fields ...string) *OAuth2ClientUpdateOne {
|
||||||
|
ouo.fields = append([]string{field}, fields...)
|
||||||
|
return ouo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated OAuth2Client entity.
|
// Save executes the query and returns the updated OAuth2Client entity.
|
||||||
func (ouo *OAuth2ClientUpdateOne) Save(ctx context.Context) (*OAuth2Client, error) {
|
func (ouo *OAuth2ClientUpdateOne) Save(ctx context.Context) (*OAuth2Client, error) {
|
||||||
var (
|
var (
|
||||||
@ -392,6 +400,18 @@ func (ouo *OAuth2ClientUpdateOne) sqlSave(ctx context.Context) (_node *OAuth2Cli
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing OAuth2Client.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing OAuth2Client.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := ouo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, oauth2client.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !oauth2client.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != oauth2client.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := ouo.mutation.predicates; len(ps) > 0 {
|
if ps := ouo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -31,9 +31,9 @@ func (*OfflineSession) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case offlinesession.FieldRefresh, offlinesession.FieldConnectorData:
|
case offlinesession.FieldRefresh, offlinesession.FieldConnectorData:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case offlinesession.FieldID, offlinesession.FieldUserID, offlinesession.FieldConnID:
|
case offlinesession.FieldID, offlinesession.FieldUserID, offlinesession.FieldConnID:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type OfflineSession", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type OfflineSession", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type OfflineSessionQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.OfflineSession
|
predicates []predicate.OfflineSession
|
||||||
@ -46,6 +47,13 @@ func (osq *OfflineSessionQuery) Offset(offset int) *OfflineSessionQuery {
|
|||||||
return osq
|
return osq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (osq *OfflineSessionQuery) Unique(unique bool) *OfflineSessionQuery {
|
||||||
|
osq.unique = &unique
|
||||||
|
return osq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (osq *OfflineSessionQuery) Order(o ...OrderFunc) *OfflineSessionQuery {
|
func (osq *OfflineSessionQuery) Order(o ...OrderFunc) *OfflineSessionQuery {
|
||||||
osq.order = append(osq.order, o...)
|
osq.order = append(osq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (osq *OfflineSessionQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: osq.sql,
|
From: osq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := osq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := osq.fields; len(fields) > 0 {
|
if fields := osq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, offlinesession.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, offlinesession.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (osq *OfflineSessionQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := osq.order; len(ps) > 0 {
|
if ps := osq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, offlinesession.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (osq *OfflineSessionQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range osq.order {
|
for _, p := range osq.order {
|
||||||
p(selector, offlinesession.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := osq.offset; offset != nil {
|
if offset := osq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (osgb *OfflineSessionGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(osgb.fields)+len(osgb.fns))
|
columns := make([]string, 0, len(osgb.fields)+len(osgb.fns))
|
||||||
columns = append(columns, osgb.fields...)
|
columns = append(columns, osgb.fields...)
|
||||||
for _, fn := range osgb.fns {
|
for _, fn := range osgb.fns {
|
||||||
columns = append(columns, fn(selector, offlinesession.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(osgb.fields...)
|
return selector.Select(columns...).GroupBy(osgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -199,6 +199,7 @@ func (osu *OfflineSessionUpdate) sqlSave(ctx context.Context) (n int, err error)
|
|||||||
// OfflineSessionUpdateOne is the builder for updating a single OfflineSession entity.
|
// OfflineSessionUpdateOne is the builder for updating a single OfflineSession entity.
|
||||||
type OfflineSessionUpdateOne struct {
|
type OfflineSessionUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *OfflineSessionMutation
|
mutation *OfflineSessionMutation
|
||||||
}
|
}
|
||||||
@ -238,6 +239,13 @@ func (osuo *OfflineSessionUpdateOne) Mutation() *OfflineSessionMutation {
|
|||||||
return osuo.mutation
|
return osuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (osuo *OfflineSessionUpdateOne) Select(field string, fields ...string) *OfflineSessionUpdateOne {
|
||||||
|
osuo.fields = append([]string{field}, fields...)
|
||||||
|
return osuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated OfflineSession entity.
|
// Save executes the query and returns the updated OfflineSession entity.
|
||||||
func (osuo *OfflineSessionUpdateOne) Save(ctx context.Context) (*OfflineSession, error) {
|
func (osuo *OfflineSessionUpdateOne) Save(ctx context.Context) (*OfflineSession, error) {
|
||||||
var (
|
var (
|
||||||
@ -326,6 +334,18 @@ func (osuo *OfflineSessionUpdateOne) sqlSave(ctx context.Context) (_node *Offlin
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing OfflineSession.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing OfflineSession.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := osuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, offlinesession.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !offlinesession.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != offlinesession.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := osuo.mutation.predicates; len(ps) > 0 {
|
if ps := osuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -31,11 +31,11 @@ func (*Password) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case password.FieldHash:
|
case password.FieldHash:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case password.FieldID:
|
case password.FieldID:
|
||||||
values[i] = &sql.NullInt64{}
|
values[i] = new(sql.NullInt64)
|
||||||
case password.FieldEmail, password.FieldUsername, password.FieldUserID:
|
case password.FieldEmail, password.FieldUsername, password.FieldUserID:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type Password", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type Password", columns[i])
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ type PasswordQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.Password
|
predicates []predicate.Password
|
||||||
@ -46,6 +47,13 @@ func (pq *PasswordQuery) Offset(offset int) *PasswordQuery {
|
|||||||
return pq
|
return pq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (pq *PasswordQuery) Unique(unique bool) *PasswordQuery {
|
||||||
|
pq.unique = &unique
|
||||||
|
return pq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (pq *PasswordQuery) Order(o ...OrderFunc) *PasswordQuery {
|
func (pq *PasswordQuery) Order(o ...OrderFunc) *PasswordQuery {
|
||||||
pq.order = append(pq.order, o...)
|
pq.order = append(pq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (pq *PasswordQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: pq.sql,
|
From: pq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := pq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := pq.fields; len(fields) > 0 {
|
if fields := pq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, password.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, password.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (pq *PasswordQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := pq.order; len(ps) > 0 {
|
if ps := pq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, password.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (pq *PasswordQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range pq.order {
|
for _, p := range pq.order {
|
||||||
p(selector, password.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := pq.offset; offset != nil {
|
if offset := pq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (pgb *PasswordGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(pgb.fields)+len(pgb.fns))
|
columns := make([]string, 0, len(pgb.fields)+len(pgb.fns))
|
||||||
columns = append(columns, pgb.fields...)
|
columns = append(columns, pgb.fields...)
|
||||||
for _, fn := range pgb.fns {
|
for _, fn := range pgb.fns {
|
||||||
columns = append(columns, fn(selector, password.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(pgb.fields...)
|
return selector.Select(columns...).GroupBy(pgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -192,6 +192,7 @@ func (pu *PasswordUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// PasswordUpdateOne is the builder for updating a single Password entity.
|
// PasswordUpdateOne is the builder for updating a single Password entity.
|
||||||
type PasswordUpdateOne struct {
|
type PasswordUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *PasswordMutation
|
mutation *PasswordMutation
|
||||||
}
|
}
|
||||||
@ -225,6 +226,13 @@ func (puo *PasswordUpdateOne) Mutation() *PasswordMutation {
|
|||||||
return puo.mutation
|
return puo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (puo *PasswordUpdateOne) Select(field string, fields ...string) *PasswordUpdateOne {
|
||||||
|
puo.fields = append([]string{field}, fields...)
|
||||||
|
return puo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated Password entity.
|
// Save executes the query and returns the updated Password entity.
|
||||||
func (puo *PasswordUpdateOne) Save(ctx context.Context) (*Password, error) {
|
func (puo *PasswordUpdateOne) Save(ctx context.Context) (*Password, error) {
|
||||||
var (
|
var (
|
||||||
@ -318,6 +326,18 @@ func (puo *PasswordUpdateOne) sqlSave(ctx context.Context) (_node *Password, err
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Password.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing Password.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := puo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, password.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !password.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != password.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := puo.mutation.predicates; len(ps) > 0 {
|
if ps := puo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
|
@ -41,6 +41,8 @@ type RefreshToken struct {
|
|||||||
ConnectorData *[]byte `json:"connector_data,omitempty"`
|
ConnectorData *[]byte `json:"connector_data,omitempty"`
|
||||||
// Token holds the value of the "token" field.
|
// Token holds the value of the "token" field.
|
||||||
Token string `json:"token,omitempty"`
|
Token string `json:"token,omitempty"`
|
||||||
|
// ObsoleteToken holds the value of the "obsolete_token" field.
|
||||||
|
ObsoleteToken string `json:"obsolete_token,omitempty"`
|
||||||
// CreatedAt holds the value of the "created_at" field.
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
// LastUsed holds the value of the "last_used" field.
|
// LastUsed holds the value of the "last_used" field.
|
||||||
@ -53,13 +55,13 @@ func (*RefreshToken) scanValues(columns []string) ([]interface{}, error) {
|
|||||||
for i := range columns {
|
for i := range columns {
|
||||||
switch columns[i] {
|
switch columns[i] {
|
||||||
case refreshtoken.FieldScopes, refreshtoken.FieldClaimsGroups, refreshtoken.FieldConnectorData:
|
case refreshtoken.FieldScopes, refreshtoken.FieldClaimsGroups, refreshtoken.FieldConnectorData:
|
||||||
values[i] = &[]byte{}
|
values[i] = new([]byte)
|
||||||
case refreshtoken.FieldClaimsEmailVerified:
|
case refreshtoken.FieldClaimsEmailVerified:
|
||||||
values[i] = &sql.NullBool{}
|
values[i] = new(sql.NullBool)
|
||||||
case refreshtoken.FieldID, refreshtoken.FieldClientID, refreshtoken.FieldNonce, refreshtoken.FieldClaimsUserID, refreshtoken.FieldClaimsUsername, refreshtoken.FieldClaimsEmail, refreshtoken.FieldClaimsPreferredUsername, refreshtoken.FieldConnectorID, refreshtoken.FieldToken:
|
case refreshtoken.FieldID, refreshtoken.FieldClientID, refreshtoken.FieldNonce, refreshtoken.FieldClaimsUserID, refreshtoken.FieldClaimsUsername, refreshtoken.FieldClaimsEmail, refreshtoken.FieldClaimsPreferredUsername, refreshtoken.FieldConnectorID, refreshtoken.FieldToken, refreshtoken.FieldObsoleteToken:
|
||||||
values[i] = &sql.NullString{}
|
values[i] = new(sql.NullString)
|
||||||
case refreshtoken.FieldCreatedAt, refreshtoken.FieldLastUsed:
|
case refreshtoken.FieldCreatedAt, refreshtoken.FieldLastUsed:
|
||||||
values[i] = &sql.NullTime{}
|
values[i] = new(sql.NullTime)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unexpected column %q for type RefreshToken", columns[i])
|
return nil, fmt.Errorf("unexpected column %q for type RefreshToken", columns[i])
|
||||||
}
|
}
|
||||||
@ -159,6 +161,12 @@ func (rt *RefreshToken) assignValues(columns []string, values []interface{}) err
|
|||||||
} else if value.Valid {
|
} else if value.Valid {
|
||||||
rt.Token = value.String
|
rt.Token = value.String
|
||||||
}
|
}
|
||||||
|
case refreshtoken.FieldObsoleteToken:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field obsolete_token", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
rt.ObsoleteToken = value.String
|
||||||
|
}
|
||||||
case refreshtoken.FieldCreatedAt:
|
case refreshtoken.FieldCreatedAt:
|
||||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
@ -225,6 +233,8 @@ func (rt *RefreshToken) String() string {
|
|||||||
}
|
}
|
||||||
builder.WriteString(", token=")
|
builder.WriteString(", token=")
|
||||||
builder.WriteString(rt.Token)
|
builder.WriteString(rt.Token)
|
||||||
|
builder.WriteString(", obsolete_token=")
|
||||||
|
builder.WriteString(rt.ObsoleteToken)
|
||||||
builder.WriteString(", created_at=")
|
builder.WriteString(", created_at=")
|
||||||
builder.WriteString(rt.CreatedAt.Format(time.ANSIC))
|
builder.WriteString(rt.CreatedAt.Format(time.ANSIC))
|
||||||
builder.WriteString(", last_used=")
|
builder.WriteString(", last_used=")
|
||||||
|
@ -35,6 +35,8 @@ const (
|
|||||||
FieldConnectorData = "connector_data"
|
FieldConnectorData = "connector_data"
|
||||||
// FieldToken holds the string denoting the token field in the database.
|
// FieldToken holds the string denoting the token field in the database.
|
||||||
FieldToken = "token"
|
FieldToken = "token"
|
||||||
|
// FieldObsoleteToken holds the string denoting the obsolete_token field in the database.
|
||||||
|
FieldObsoleteToken = "obsolete_token"
|
||||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
FieldCreatedAt = "created_at"
|
FieldCreatedAt = "created_at"
|
||||||
// FieldLastUsed holds the string denoting the last_used field in the database.
|
// FieldLastUsed holds the string denoting the last_used field in the database.
|
||||||
@ -58,6 +60,7 @@ var Columns = []string{
|
|||||||
FieldConnectorID,
|
FieldConnectorID,
|
||||||
FieldConnectorData,
|
FieldConnectorData,
|
||||||
FieldToken,
|
FieldToken,
|
||||||
|
FieldObsoleteToken,
|
||||||
FieldCreatedAt,
|
FieldCreatedAt,
|
||||||
FieldLastUsed,
|
FieldLastUsed,
|
||||||
}
|
}
|
||||||
@ -89,6 +92,8 @@ var (
|
|||||||
ConnectorIDValidator func(string) error
|
ConnectorIDValidator func(string) error
|
||||||
// DefaultToken holds the default value on creation for the "token" field.
|
// DefaultToken holds the default value on creation for the "token" field.
|
||||||
DefaultToken string
|
DefaultToken string
|
||||||
|
// DefaultObsoleteToken holds the default value on creation for the "obsolete_token" field.
|
||||||
|
DefaultObsoleteToken string
|
||||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
DefaultCreatedAt func() time.Time
|
DefaultCreatedAt func() time.Time
|
||||||
// DefaultLastUsed holds the default value on creation for the "last_used" field.
|
// DefaultLastUsed holds the default value on creation for the "last_used" field.
|
||||||
|
@ -162,6 +162,13 @@ func Token(v string) predicate.RefreshToken {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ObsoleteToken applies equality check predicate on the "obsolete_token" field. It's identical to ObsoleteTokenEQ.
|
||||||
|
func ObsoleteToken(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.EQ(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
func CreatedAt(v time.Time) predicate.RefreshToken {
|
func CreatedAt(v time.Time) predicate.RefreshToken {
|
||||||
return predicate.RefreshToken(func(s *sql.Selector) {
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
@ -1196,6 +1203,117 @@ func TokenContainsFold(v string) predicate.RefreshToken {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenEQ applies the EQ predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenEQ(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.EQ(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenNEQ applies the NEQ predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenNEQ(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.NEQ(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenIn applies the In predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenIn(vs ...string) predicate.RefreshToken {
|
||||||
|
v := make([]interface{}, len(vs))
|
||||||
|
for i := range v {
|
||||||
|
v[i] = vs[i]
|
||||||
|
}
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
// if not arguments were provided, append the FALSE constants,
|
||||||
|
// since we can't apply "IN ()". This will make this predicate falsy.
|
||||||
|
if len(v) == 0 {
|
||||||
|
s.Where(sql.False())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.Where(sql.In(s.C(FieldObsoleteToken), v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenNotIn applies the NotIn predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenNotIn(vs ...string) predicate.RefreshToken {
|
||||||
|
v := make([]interface{}, len(vs))
|
||||||
|
for i := range v {
|
||||||
|
v[i] = vs[i]
|
||||||
|
}
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
// if not arguments were provided, append the FALSE constants,
|
||||||
|
// since we can't apply "IN ()". This will make this predicate falsy.
|
||||||
|
if len(v) == 0 {
|
||||||
|
s.Where(sql.False())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.Where(sql.NotIn(s.C(FieldObsoleteToken), v...))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenGT applies the GT predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenGT(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.GT(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenGTE applies the GTE predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenGTE(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.GTE(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenLT applies the LT predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenLT(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.LT(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenLTE applies the LTE predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenLTE(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.LTE(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenContains applies the Contains predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenContains(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.Contains(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenHasPrefix applies the HasPrefix predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenHasPrefix(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.HasPrefix(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenHasSuffix applies the HasSuffix predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenHasSuffix(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.HasSuffix(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenEqualFold applies the EqualFold predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenEqualFold(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.EqualFold(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObsoleteTokenContainsFold applies the ContainsFold predicate on the "obsolete_token" field.
|
||||||
|
func ObsoleteTokenContainsFold(v string) predicate.RefreshToken {
|
||||||
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.ContainsFold(s.C(FieldObsoleteToken), v))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
func CreatedAtEQ(v time.Time) predicate.RefreshToken {
|
func CreatedAtEQ(v time.Time) predicate.RefreshToken {
|
||||||
return predicate.RefreshToken(func(s *sql.Selector) {
|
return predicate.RefreshToken(func(s *sql.Selector) {
|
||||||
|
@ -108,6 +108,20 @@ func (rtc *RefreshTokenCreate) SetNillableToken(s *string) *RefreshTokenCreate {
|
|||||||
return rtc
|
return rtc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetObsoleteToken sets the "obsolete_token" field.
|
||||||
|
func (rtc *RefreshTokenCreate) SetObsoleteToken(s string) *RefreshTokenCreate {
|
||||||
|
rtc.mutation.SetObsoleteToken(s)
|
||||||
|
return rtc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil.
|
||||||
|
func (rtc *RefreshTokenCreate) SetNillableObsoleteToken(s *string) *RefreshTokenCreate {
|
||||||
|
if s != nil {
|
||||||
|
rtc.SetObsoleteToken(*s)
|
||||||
|
}
|
||||||
|
return rtc
|
||||||
|
}
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
// SetCreatedAt sets the "created_at" field.
|
||||||
func (rtc *RefreshTokenCreate) SetCreatedAt(t time.Time) *RefreshTokenCreate {
|
func (rtc *RefreshTokenCreate) SetCreatedAt(t time.Time) *RefreshTokenCreate {
|
||||||
rtc.mutation.SetCreatedAt(t)
|
rtc.mutation.SetCreatedAt(t)
|
||||||
@ -202,6 +216,10 @@ func (rtc *RefreshTokenCreate) defaults() {
|
|||||||
v := refreshtoken.DefaultToken
|
v := refreshtoken.DefaultToken
|
||||||
rtc.mutation.SetToken(v)
|
rtc.mutation.SetToken(v)
|
||||||
}
|
}
|
||||||
|
if _, ok := rtc.mutation.ObsoleteToken(); !ok {
|
||||||
|
v := refreshtoken.DefaultObsoleteToken
|
||||||
|
rtc.mutation.SetObsoleteToken(v)
|
||||||
|
}
|
||||||
if _, ok := rtc.mutation.CreatedAt(); !ok {
|
if _, ok := rtc.mutation.CreatedAt(); !ok {
|
||||||
v := refreshtoken.DefaultCreatedAt()
|
v := refreshtoken.DefaultCreatedAt()
|
||||||
rtc.mutation.SetCreatedAt(v)
|
rtc.mutation.SetCreatedAt(v)
|
||||||
@ -271,6 +289,9 @@ func (rtc *RefreshTokenCreate) check() error {
|
|||||||
if _, ok := rtc.mutation.Token(); !ok {
|
if _, ok := rtc.mutation.Token(); !ok {
|
||||||
return &ValidationError{Name: "token", err: errors.New("db: missing required field \"token\"")}
|
return &ValidationError{Name: "token", err: errors.New("db: missing required field \"token\"")}
|
||||||
}
|
}
|
||||||
|
if _, ok := rtc.mutation.ObsoleteToken(); !ok {
|
||||||
|
return &ValidationError{Name: "obsolete_token", err: errors.New("db: missing required field \"obsolete_token\"")}
|
||||||
|
}
|
||||||
if _, ok := rtc.mutation.CreatedAt(); !ok {
|
if _, ok := rtc.mutation.CreatedAt(); !ok {
|
||||||
return &ValidationError{Name: "created_at", err: errors.New("db: missing required field \"created_at\"")}
|
return &ValidationError{Name: "created_at", err: errors.New("db: missing required field \"created_at\"")}
|
||||||
}
|
}
|
||||||
@ -407,6 +428,14 @@ func (rtc *RefreshTokenCreate) createSpec() (*RefreshToken, *sqlgraph.CreateSpec
|
|||||||
})
|
})
|
||||||
_node.Token = value
|
_node.Token = value
|
||||||
}
|
}
|
||||||
|
if value, ok := rtc.mutation.ObsoleteToken(); ok {
|
||||||
|
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||||
|
Type: field.TypeString,
|
||||||
|
Value: value,
|
||||||
|
Column: refreshtoken.FieldObsoleteToken,
|
||||||
|
})
|
||||||
|
_node.ObsoleteToken = value
|
||||||
|
}
|
||||||
if value, ok := rtc.mutation.CreatedAt(); ok {
|
if value, ok := rtc.mutation.CreatedAt(); ok {
|
||||||
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
|
||||||
Type: field.TypeTime,
|
Type: field.TypeTime,
|
||||||
|
@ -20,6 +20,7 @@ type RefreshTokenQuery struct {
|
|||||||
config
|
config
|
||||||
limit *int
|
limit *int
|
||||||
offset *int
|
offset *int
|
||||||
|
unique *bool
|
||||||
order []OrderFunc
|
order []OrderFunc
|
||||||
fields []string
|
fields []string
|
||||||
predicates []predicate.RefreshToken
|
predicates []predicate.RefreshToken
|
||||||
@ -46,6 +47,13 @@ func (rtq *RefreshTokenQuery) Offset(offset int) *RefreshTokenQuery {
|
|||||||
return rtq
|
return rtq
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (rtq *RefreshTokenQuery) Unique(unique bool) *RefreshTokenQuery {
|
||||||
|
rtq.unique = &unique
|
||||||
|
return rtq
|
||||||
|
}
|
||||||
|
|
||||||
// Order adds an order step to the query.
|
// Order adds an order step to the query.
|
||||||
func (rtq *RefreshTokenQuery) Order(o ...OrderFunc) *RefreshTokenQuery {
|
func (rtq *RefreshTokenQuery) Order(o ...OrderFunc) *RefreshTokenQuery {
|
||||||
rtq.order = append(rtq.order, o...)
|
rtq.order = append(rtq.order, o...)
|
||||||
@ -352,6 +360,9 @@ func (rtq *RefreshTokenQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
From: rtq.sql,
|
From: rtq.sql,
|
||||||
Unique: true,
|
Unique: true,
|
||||||
}
|
}
|
||||||
|
if unique := rtq.unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
}
|
||||||
if fields := rtq.fields; len(fields) > 0 {
|
if fields := rtq.fields; len(fields) > 0 {
|
||||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
_spec.Node.Columns = append(_spec.Node.Columns, refreshtoken.FieldID)
|
_spec.Node.Columns = append(_spec.Node.Columns, refreshtoken.FieldID)
|
||||||
@ -377,7 +388,7 @@ func (rtq *RefreshTokenQuery) querySpec() *sqlgraph.QuerySpec {
|
|||||||
if ps := rtq.order; len(ps) > 0 {
|
if ps := rtq.order; len(ps) > 0 {
|
||||||
_spec.Order = func(selector *sql.Selector) {
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
ps[i](selector, refreshtoken.ValidColumn)
|
ps[i](selector)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -396,7 +407,7 @@ func (rtq *RefreshTokenQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|||||||
p(selector)
|
p(selector)
|
||||||
}
|
}
|
||||||
for _, p := range rtq.order {
|
for _, p := range rtq.order {
|
||||||
p(selector, refreshtoken.ValidColumn)
|
p(selector)
|
||||||
}
|
}
|
||||||
if offset := rtq.offset; offset != nil {
|
if offset := rtq.offset; offset != nil {
|
||||||
// limit is mandatory for offset clause. We start
|
// limit is mandatory for offset clause. We start
|
||||||
@ -662,7 +673,7 @@ func (rtgb *RefreshTokenGroupBy) sqlQuery() *sql.Selector {
|
|||||||
columns := make([]string, 0, len(rtgb.fields)+len(rtgb.fns))
|
columns := make([]string, 0, len(rtgb.fields)+len(rtgb.fns))
|
||||||
columns = append(columns, rtgb.fields...)
|
columns = append(columns, rtgb.fields...)
|
||||||
for _, fn := range rtgb.fns {
|
for _, fn := range rtgb.fns {
|
||||||
columns = append(columns, fn(selector, refreshtoken.ValidColumn))
|
columns = append(columns, fn(selector))
|
||||||
}
|
}
|
||||||
return selector.Select(columns...).GroupBy(rtgb.fields...)
|
return selector.Select(columns...).GroupBy(rtgb.fields...)
|
||||||
}
|
}
|
||||||
|
@ -133,6 +133,20 @@ func (rtu *RefreshTokenUpdate) SetNillableToken(s *string) *RefreshTokenUpdate {
|
|||||||
return rtu
|
return rtu
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetObsoleteToken sets the "obsolete_token" field.
|
||||||
|
func (rtu *RefreshTokenUpdate) SetObsoleteToken(s string) *RefreshTokenUpdate {
|
||||||
|
rtu.mutation.SetObsoleteToken(s)
|
||||||
|
return rtu
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil.
|
||||||
|
func (rtu *RefreshTokenUpdate) SetNillableObsoleteToken(s *string) *RefreshTokenUpdate {
|
||||||
|
if s != nil {
|
||||||
|
rtu.SetObsoleteToken(*s)
|
||||||
|
}
|
||||||
|
return rtu
|
||||||
|
}
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
// SetCreatedAt sets the "created_at" field.
|
||||||
func (rtu *RefreshTokenUpdate) SetCreatedAt(t time.Time) *RefreshTokenUpdate {
|
func (rtu *RefreshTokenUpdate) SetCreatedAt(t time.Time) *RefreshTokenUpdate {
|
||||||
rtu.mutation.SetCreatedAt(t)
|
rtu.mutation.SetCreatedAt(t)
|
||||||
@ -378,6 +392,13 @@ func (rtu *RefreshTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
Column: refreshtoken.FieldToken,
|
Column: refreshtoken.FieldToken,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if value, ok := rtu.mutation.ObsoleteToken(); ok {
|
||||||
|
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||||
|
Type: field.TypeString,
|
||||||
|
Value: value,
|
||||||
|
Column: refreshtoken.FieldObsoleteToken,
|
||||||
|
})
|
||||||
|
}
|
||||||
if value, ok := rtu.mutation.CreatedAt(); ok {
|
if value, ok := rtu.mutation.CreatedAt(); ok {
|
||||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||||
Type: field.TypeTime,
|
Type: field.TypeTime,
|
||||||
@ -406,6 +427,7 @@ func (rtu *RefreshTokenUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
|||||||
// RefreshTokenUpdateOne is the builder for updating a single RefreshToken entity.
|
// RefreshTokenUpdateOne is the builder for updating a single RefreshToken entity.
|
||||||
type RefreshTokenUpdateOne struct {
|
type RefreshTokenUpdateOne struct {
|
||||||
config
|
config
|
||||||
|
fields []string
|
||||||
hooks []Hook
|
hooks []Hook
|
||||||
mutation *RefreshTokenMutation
|
mutation *RefreshTokenMutation
|
||||||
}
|
}
|
||||||
@ -516,6 +538,20 @@ func (rtuo *RefreshTokenUpdateOne) SetNillableToken(s *string) *RefreshTokenUpda
|
|||||||
return rtuo
|
return rtuo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetObsoleteToken sets the "obsolete_token" field.
|
||||||
|
func (rtuo *RefreshTokenUpdateOne) SetObsoleteToken(s string) *RefreshTokenUpdateOne {
|
||||||
|
rtuo.mutation.SetObsoleteToken(s)
|
||||||
|
return rtuo
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableObsoleteToken sets the "obsolete_token" field if the given value is not nil.
|
||||||
|
func (rtuo *RefreshTokenUpdateOne) SetNillableObsoleteToken(s *string) *RefreshTokenUpdateOne {
|
||||||
|
if s != nil {
|
||||||
|
rtuo.SetObsoleteToken(*s)
|
||||||
|
}
|
||||||
|
return rtuo
|
||||||
|
}
|
||||||
|
|
||||||
// SetCreatedAt sets the "created_at" field.
|
// SetCreatedAt sets the "created_at" field.
|
||||||
func (rtuo *RefreshTokenUpdateOne) SetCreatedAt(t time.Time) *RefreshTokenUpdateOne {
|
func (rtuo *RefreshTokenUpdateOne) SetCreatedAt(t time.Time) *RefreshTokenUpdateOne {
|
||||||
rtuo.mutation.SetCreatedAt(t)
|
rtuo.mutation.SetCreatedAt(t)
|
||||||
@ -549,6 +585,13 @@ func (rtuo *RefreshTokenUpdateOne) Mutation() *RefreshTokenMutation {
|
|||||||
return rtuo.mutation
|
return rtuo.mutation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (rtuo *RefreshTokenUpdateOne) Select(field string, fields ...string) *RefreshTokenUpdateOne {
|
||||||
|
rtuo.fields = append([]string{field}, fields...)
|
||||||
|
return rtuo
|
||||||
|
}
|
||||||
|
|
||||||
// Save executes the query and returns the updated RefreshToken entity.
|
// Save executes the query and returns the updated RefreshToken entity.
|
||||||
func (rtuo *RefreshTokenUpdateOne) Save(ctx context.Context) (*RefreshToken, error) {
|
func (rtuo *RefreshTokenUpdateOne) Save(ctx context.Context) (*RefreshToken, error) {
|
||||||
var (
|
var (
|
||||||
@ -657,6 +700,18 @@ func (rtuo *RefreshTokenUpdateOne) sqlSave(ctx context.Context) (_node *RefreshT
|
|||||||
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing RefreshToken.ID for update")}
|
return nil, &ValidationError{Name: "ID", err: fmt.Errorf("missing RefreshToken.ID for update")}
|
||||||
}
|
}
|
||||||
_spec.Node.ID.Value = id
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := rtuo.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, refreshtoken.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !refreshtoken.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != refreshtoken.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if ps := rtuo.mutation.predicates; len(ps) > 0 {
|
if ps := rtuo.mutation.predicates; len(ps) > 0 {
|
||||||
_spec.Predicate = func(selector *sql.Selector) {
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
for i := range ps {
|
for i := range ps {
|
||||||
@ -766,6 +821,13 @@ func (rtuo *RefreshTokenUpdateOne) sqlSave(ctx context.Context) (_node *RefreshT
|
|||||||
Column: refreshtoken.FieldToken,
|
Column: refreshtoken.FieldToken,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
if value, ok := rtuo.mutation.ObsoleteToken(); ok {
|
||||||
|
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||||
|
Type: field.TypeString,
|
||||||
|
Value: value,
|
||||||
|
Column: refreshtoken.FieldObsoleteToken,
|
||||||
|
})
|
||||||
|
}
|
||||||
if value, ok := rtuo.mutation.CreatedAt(); ok {
|
if value, ok := rtuo.mutation.CreatedAt(); ok {
|
||||||
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
|
||||||
Type: field.TypeTime,
|
Type: field.TypeTime,
|
||||||
|
@ -214,12 +214,16 @@ func init() {
|
|||||||
refreshtokenDescToken := refreshtokenFields[12].Descriptor()
|
refreshtokenDescToken := refreshtokenFields[12].Descriptor()
|
||||||
// refreshtoken.DefaultToken holds the default value on creation for the token field.
|
// refreshtoken.DefaultToken holds the default value on creation for the token field.
|
||||||
refreshtoken.DefaultToken = refreshtokenDescToken.Default.(string)
|
refreshtoken.DefaultToken = refreshtokenDescToken.Default.(string)
|
||||||
|
// refreshtokenDescObsoleteToken is the schema descriptor for obsolete_token field.
|
||||||
|
refreshtokenDescObsoleteToken := refreshtokenFields[13].Descriptor()
|
||||||
|
// refreshtoken.DefaultObsoleteToken holds the default value on creation for the obsolete_token field.
|
||||||
|
refreshtoken.DefaultObsoleteToken = refreshtokenDescObsoleteToken.Default.(string)
|
||||||
// refreshtokenDescCreatedAt is the schema descriptor for created_at field.
|
// refreshtokenDescCreatedAt is the schema descriptor for created_at field.
|
||||||
refreshtokenDescCreatedAt := refreshtokenFields[13].Descriptor()
|
refreshtokenDescCreatedAt := refreshtokenFields[14].Descriptor()
|
||||||
// refreshtoken.DefaultCreatedAt holds the default value on creation for the created_at field.
|
// refreshtoken.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
refreshtoken.DefaultCreatedAt = refreshtokenDescCreatedAt.Default.(func() time.Time)
|
refreshtoken.DefaultCreatedAt = refreshtokenDescCreatedAt.Default.(func() time.Time)
|
||||||
// refreshtokenDescLastUsed is the schema descriptor for last_used field.
|
// refreshtokenDescLastUsed is the schema descriptor for last_used field.
|
||||||
refreshtokenDescLastUsed := refreshtokenFields[14].Descriptor()
|
refreshtokenDescLastUsed := refreshtokenFields[15].Descriptor()
|
||||||
// refreshtoken.DefaultLastUsed holds the default value on creation for the last_used field.
|
// refreshtoken.DefaultLastUsed holds the default value on creation for the last_used field.
|
||||||
refreshtoken.DefaultLastUsed = refreshtokenDescLastUsed.Default.(func() time.Time)
|
refreshtoken.DefaultLastUsed = refreshtokenDescLastUsed.Default.(func() time.Time)
|
||||||
// refreshtokenDescID is the schema descriptor for id field.
|
// refreshtokenDescID is the schema descriptor for id field.
|
||||||
|
@ -5,6 +5,6 @@ package runtime
|
|||||||
// The schema-stitching logic is generated in github.com/dexidp/dex/storage/ent/db/runtime.go
|
// The schema-stitching logic is generated in github.com/dexidp/dex/storage/ent/db/runtime.go
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Version = "v0.7.0" // Version of ent codegen.
|
Version = "v0.8.0" // Version of ent codegen.
|
||||||
Sum = "h1:E3EjO0cUL61DvUg5ZEZdxa4yTL+4SuZv0LqBExo8CQA=" // Sum of ent codegen.
|
Sum = "h1:xirrW//1oda7pp0bz+XssSOv4/C3nmgYQOxjIfljFt8=" // Sum of ent codegen.
|
||||||
)
|
)
|
||||||
|
@ -24,7 +24,8 @@ create table refresh_token
|
|||||||
token text default '' not null,
|
token text default '' not null,
|
||||||
created_at timestamp default '0001-01-01 00:00:00 UTC' not null,
|
created_at timestamp default '0001-01-01 00:00:00 UTC' not null,
|
||||||
last_used timestamp default '0001-01-01 00:00:00 UTC' not null,
|
last_used timestamp default '0001-01-01 00:00:00 UTC' not null,
|
||||||
claims_preferred_username text default '' not null
|
claims_preferred_username text default '' not null,
|
||||||
|
obsolete_token text default ''
|
||||||
);
|
);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -75,6 +76,9 @@ func (RefreshToken) Fields() []ent.Field {
|
|||||||
field.Text("token").
|
field.Text("token").
|
||||||
SchemaType(textSchema).
|
SchemaType(textSchema).
|
||||||
Default(""),
|
Default(""),
|
||||||
|
field.Text("obsolete_token").
|
||||||
|
SchemaType(textSchema).
|
||||||
|
Default(""),
|
||||||
|
|
||||||
field.Time("created_at").
|
field.Time("created_at").
|
||||||
Default(time.Now),
|
Default(time.Now),
|
||||||
|
Reference in New Issue
Block a user