Merge pull request #1072 from ericchiang/k8s-test

*: run kubernetes tests in travis
This commit is contained in:
rithu leena john 2017-10-31 10:34:26 -07:00 committed by GitHub
commit 42ef8fd802
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 122 additions and 9 deletions

View File

@ -8,6 +8,7 @@ go:
services: services:
- postgresql - postgresql
- docker
env: env:
- DEX_POSTGRES_DATABASE=postgres DEX_POSTGRES_USER=postgres DEX_POSTGRES_HOST="localhost" DEX_LDAP_TESTS=1 DEBIAN_FRONTEND=noninteractive - DEX_POSTGRES_DATABASE=postgres DEX_POSTGRES_USER=postgres DEX_POSTGRES_HOST="localhost" DEX_LDAP_TESTS=1 DEBIAN_FRONTEND=noninteractive
@ -21,6 +22,7 @@ install:
script: script:
- make testall - make testall
- ./scripts/test-k8s.sh
notifications: notifications:
email: false email: false

View File

@ -2,7 +2,7 @@
## Kubernetes ## Kubernetes
Kubernetes tests will only run if the `DEX_KUBECONFIG` environment variable is set. Kubernetes tests run against a Kubernetes API server, and are enabled by the `DEX_KUBECONFIG` environment variable:
``` ```
$ export DEX_KUBECONFIG=~/.kube/config $ export DEX_KUBECONFIG=~/.kube/config
@ -10,7 +10,11 @@ $ go test -v -i ./storage/kubernetes
$ go test -v ./storage/kubernetes $ go test -v ./storage/kubernetes
``` ```
Because third party resources creation isn't synchronized it's expected that the tests fail the first time. Fear not, and just run them again. These tests can be executed locally using docker by running the following script:
```
$ ./scripts/test-k8s.sh
```
## Postgres ## Postgres

56
scripts/test-k8s.sh Executable file
View File

@ -0,0 +1,56 @@
#!/bin/bash -e
TEMPDIR=$( mktemp -d )
cat << EOF > $TEMPDIR/kubeconfig
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
server: http://localhost:8080
users:
- name: local
user:
contexts:
- context:
cluster: local
user: local
EOF
cleanup () {
docker rm -f $( cat $TEMPDIR/etcd )
docker rm -f $( cat $TEMPDIR/kube-apiserver )
rm -rf $TEMPDIR
}
trap "{ CODE=$?; cleanup ; exit $CODE; }" EXIT
docker run \
--cidfile=$TEMPDIR/etcd \
-d \
--net=host \
gcr.io/google_containers/etcd:3.1.10 \
etcd
docker run \
--cidfile=$TEMPDIR/kube-apiserver \
-d \
-v $TEMPDIR:/var/run/kube-test:ro \
--net=host \
gcr.io/google_containers/kube-apiserver-amd64:v1.7.4 \
kube-apiserver \
--etcd-servers=http://localhost:2379 \
--service-cluster-ip-range=10.0.0.1/16 \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080
until $(curl --output /dev/null --silent --head --fail http://localhost:8080/healthz); do
printf '.'
sleep 1
done
echo "API server ready"
export DEX_KUBECONFIG=$TEMPDIR/kubeconfig
go test -v -i ./storage/kubernetes
go test -v ./storage/kubernetes

View File

@ -157,7 +157,11 @@ func closeResp(r *http.Response) {
} }
func (c *client) get(resource, name string, v interface{}) error { func (c *client) get(resource, name string, v interface{}) error {
url := c.urlFor(c.apiVersion, c.namespace, resource, name) return c.getResource(c.apiVersion, c.namespace, resource, name, v)
}
func (c *client) getResource(apiVersion, namespace, resource, name string, v interface{}) error {
url := c.urlFor(apiVersion, namespace, resource, name)
resp, err := c.client.Get(url) resp, err := c.client.Get(url)
if err != nil { if err != nil {
return err return err

View File

@ -53,9 +53,9 @@ func (c *Config) Open(logger logrus.FieldLogger) (storage.Storage, error) {
// open returns a kubernetes client, initializing the third party resources used // open returns a kubernetes client, initializing the third party resources used
// by dex. // by dex.
// //
// errOnResources controls if errors creating the resources cause this method to return // waitForResources controls if errors creating the resources cause this method to return
// immediately (used during testing), or if the client will asynchronously retry. // immediately (used during testing), or if the client will asynchronously retry.
func (c *Config) open(logger logrus.FieldLogger, errOnResources bool) (*client, error) { func (c *Config) open(logger logrus.FieldLogger, waitForResources bool) (*client, error) {
if c.InCluster && (c.KubeConfigFile != "") { if c.InCluster && (c.KubeConfigFile != "") {
return nil, errors.New("cannot specify both 'inCluster' and 'kubeConfigFile'") return nil, errors.New("cannot specify both 'inCluster' and 'kubeConfigFile'")
} }
@ -87,7 +87,7 @@ func (c *Config) open(logger logrus.FieldLogger, errOnResources bool) (*client,
logger.Info("creating custom Kubernetes resources") logger.Info("creating custom Kubernetes resources")
if !cli.registerCustomResources(c.UseTPR) { if !cli.registerCustomResources(c.UseTPR) {
if errOnResources { if waitForResources {
cancel() cancel()
return nil, fmt.Errorf("failed creating custom resources") return nil, fmt.Errorf("failed creating custom resources")
} }
@ -111,6 +111,13 @@ func (c *Config) open(logger logrus.FieldLogger, errOnResources bool) (*client,
}() }()
} }
if waitForResources {
if err := cli.waitForCRDs(ctx); err != nil {
cancel()
return nil, err
}
}
// If the client is closed, stop trying to create resources. // If the client is closed, stop trying to create resources.
cli.cancel = cancel cli.cancel = cancel
return cli, nil return cli, nil
@ -123,9 +130,6 @@ func (c *Config) open(logger logrus.FieldLogger, errOnResources bool) (*client,
// It logs all errors, returning true if the resources were created successfully. // It logs all errors, returning true if the resources were created successfully.
// //
// Creating a custom resource does not mean that they'll be immediately available. // Creating a custom resource does not mean that they'll be immediately available.
//
// TODO(ericchiang): Provide an option to wait for the resources to actually
// be available.
func (cli *client) registerCustomResources(useTPR bool) (ok bool) { func (cli *client) registerCustomResources(useTPR bool) (ok bool) {
ok = true ok = true
length := len(customResourceDefinitions) length := len(customResourceDefinitions)
@ -165,6 +169,49 @@ func (cli *client) registerCustomResources(useTPR bool) (ok bool) {
return ok return ok
} }
// waitForCRDs waits for all CRDs to be in a ready state, and is used
// by the tests to synchronize before running conformance.
func (cli *client) waitForCRDs(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
for _, crd := range customResourceDefinitions {
for {
err := cli.isCRDReady(crd.Name)
if err == nil {
break
}
cli.logger.Errorf("checking CRD: %v", err)
select {
case <-ctx.Done():
return errors.New("timed out waiting for CRDs to be available")
case <-time.After(time.Millisecond * 100):
}
}
}
return nil
}
// isCRDReady determines if a CRD is ready by inspecting its conditions.
func (cli *client) isCRDReady(name string) error {
var r k8sapi.CustomResourceDefinition
err := cli.getResource("apiextensions.k8s.io/v1beta1", "", "customresourcedefinitions", name, &r)
if err != nil {
return fmt.Errorf("get crd %s: %v", name, err)
}
conds := make(map[string]string) // For debugging, keep the conditions around.
for _, c := range r.Status.Conditions {
if c.Type == k8sapi.Established && c.Status == k8sapi.ConditionTrue {
return nil
}
conds[string(c.Type)] = string(c.Status)
}
return fmt.Errorf("crd %s not ready %#v", name, conds)
}
func (cli *client) Close() error { func (cli *client) Close() error {
if cli.cancel != nil { if cli.cancel != nil {
cli.cancel() cli.cancel()