Unverified Commit 92f748fc by Kubernetes Prow Robot Committed by GitHub

Merge pull request #29 from kmova/enable-prow

Setup automated builds using the release-tools
parents 7d91f3d8 e8341368
./release-tools/cloudbuild.sh
\ No newline at end of file
# Copyright 2017-2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/distroless/static:latest
LABEL maintainers="Kubernetes Authors"
LABEL description="NFS subdir external provisioner"
ARG binary=./bin/nfs-subdir-external-provisioner
COPY ${binary} /nfs-subdir-external-provisioner
ENTRYPOINT ["/nfs-subdir-external-provisioner"]
...@@ -12,38 +12,9 @@ ...@@ -12,38 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
ifeq ($(REGISTRY),) CMDS=nfs-subdir-external-provisioner
REGISTRY = quay.io/external_storage/ all: build
endif
ifeq ($(VERSION),)
VERSION = latest
endif
IMAGE = $(REGISTRY)nfs-subdir-external-provisioner:$(VERSION)
IMAGE_ARM = $(REGISTRY)nfs-subdir-external-provisioner-arm:$(VERSION)
MUTABLE_IMAGE = $(REGISTRY)nfs-subdir-external-provisioner:latest
MUTABLE_IMAGE_ARM = $(REGISTRY)nfs-subdir-external-provisioner-arm:latest
all: build image build_arm image_arm include release-tools/build.make
container: build image build_arm image_arm BUILD_PLATFORMS=linux amd64; linux arm64 -arm64; linux ppc64le -ppc64le; linux s390x -s390x
build:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o docker/x86_64/nfs-subdir-external-provisioner ./cmd/nfs-subdir-external-provisioner
build_arm:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -a -ldflags '-extldflags "-static"' -o docker/arm/nfs-subdir-external-provisioner ./cmd/nfs-subdir-external-provisioner
image:
docker build -t $(MUTABLE_IMAGE) docker/x86_64
docker tag $(MUTABLE_IMAGE) $(IMAGE)
image_arm:
docker run --rm --privileged multiarch/qemu-user-static:register --reset
docker build -t $(MUTABLE_IMAGE_ARM) docker/arm
docker tag $(MUTABLE_IMAGE_ARM) $(IMAGE_ARM)
push:
docker push $(IMAGE)
docker push $(MUTABLE_IMAGE)
docker push $(IMAGE_ARM)
docker push $(MUTABLE_IMAGE_ARM)
...@@ -6,9 +6,11 @@ Note: This repository is being migrated from https://github.com/kubernetes-incub ...@@ -6,9 +6,11 @@ Note: This repository is being migrated from https://github.com/kubernetes-incub
```sh ```sh
make build make build
# Set a custom image registry to push the container image make container
# Example REGISTRY="quay.io/myorg" # `nfs-subdir-external-provisioner:latest` will be created.
make image # To upload this to your customer registry, say `quay.io/myorg`, you can use
# docker tag nfs-subdir-external-provisioner:latest quay.io/myorg/nfs-subdir-external-provisioner:latest
# docker push quay.io/myorg/nfs-subdir-external-provisioner:latest
``` ```
## How to deploy nfs-client to your cluster ## How to deploy nfs-client to your cluster
......
./release-tools/cloudbuild.yaml
\ No newline at end of file
...@@ -151,6 +151,7 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7U ...@@ -151,6 +151,7 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7U
github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
......
#! /bin/bash -e
#
# This is for testing csi-release-tools itself in Prow. All other
# repos use prow.sh for that, but as csi-release-tools isn't a normal
# repo with some Go code in it, it has a custom Prow test script.
./verify-shellcheck.sh "$(pwd)"
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
<!---
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
-->
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
<!---
Custom Information - if you're copying this template for the first time you can add custom content here, for example:
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
- [Mailing list](URL)
-->
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
approvers:
- saad-ali
- msau42
- pohly
reviewers:
- saad-ali
- msau42
- pohly
# [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools)
These build and test rules can be shared between different Go projects
without modifications. Customization for the different projects happen
in the top-level Makefile.
The rules include support for building and pushing Docker images, with
the following features:
- one or more command and image per project
- push canary and/or tagged release images
- automatically derive the image tag(s) from repo tags
- the source code revision is stored in a "revision" image label
- never overwrites an existing release image
Usage
-----
The expected repository layout is:
- `cmd/*/*.go` - source code for each command
- `cmd/*/Dockerfile` - docker file for each command or
Dockerfile in the root when only building a single command
- `Makefile` - includes `release-tools/build.make` and sets
configuration variables
- `.travis.yml` - a symlink to `release-tools/.travis.yml`
To create a release, tag a certain revision with a name that
starts with `v`, for example `v1.0.0`, then `make push`
while that commit is checked out.
It does not matter on which branch that revision exists, i.e. it is
possible to create releases directly from master. A release branch can
still be created for maintenance releases later if needed.
Release branches are expected to be named `release-x.y` for releases
`x.y.z`. Building from such a branch creates `x.y-canary`
images. Building from master creates the main `canary` image.
Sharing and updating
--------------------
[`git subtree`](https://github.com/git/git/blob/master/contrib/subtree/git-subtree.txt)
is the recommended way of maintaining a copy of the rules inside the
`release-tools` directory of a project. This way, it is possible to make
changes also locally, test them and then push them back to the shared
repository at a later time.
Cheat sheet:
- `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once)
- `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes)
- edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:<user>/csi-release-tools.git <my-new-or-existing-branch>` - push to a new branch before submitting a PR
verify-shellcheck.sh
--------------------
The [verify-shellcheck.sh](./verify-shellcheck.sh) script in this repo
is a stripped down copy of the [corresponding
script](https://github.com/kubernetes/kubernetes/blob/release-1.14/hack/verify-shellcheck.sh)
in the Kubernetes repository. It can be used to check for certain
errors shell scripts, like missing quotation marks. The default
`test-shellcheck` target in [build.make](./build.make) only checks the
scripts in this directory. Components can add more directories to
`TEST_SHELLCHECK_DIRS` to check also other scripts.
End-to-end testing
------------------
A repo that wants to opt into testing via Prow must set up a top-level
`.prow.sh`. Typically that will source `prow.sh` and then transfer
control to it:
``` bash
#! /bin/bash -e
. release-tools/prow.sh
main
```
All Kubernetes-CSI repos are expected to switch to Prow. For details
on what is enabled in Prow, see
https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi
Test results for periodic jobs are visible in
https://testgrid.k8s.io/sig-storage-csi-ci
It is possible to reproduce the Prow testing locally on a suitable machine:
- Linux host
- Docker installed
- code to be tested checkout out in `$GOPATH/src/<import path>`
- `cd $GOPATH/src/<import path> && ./.prow.sh`
Beware that the script intentionally doesn't clean up after itself and
modifies the content of `$GOPATH`, in particular the `kubernetes` and
`kind` repositories there. Better run it in an empty, disposable
`$GOPATH`.
When it terminates, the following command can be used to get access to
the Kubernetes cluster that was brought up for testing (assuming that
this step succeeded):
export KUBECONFIG="$(kind get kubeconfig-path --name="csi-prow")"
It is possible to control the execution via environment variables. See
`prow.sh` for details. Particularly useful is testing against different
Kubernetes releases:
CSI_PROW_KUBERNETES_VERSION=1.13.3 ./.prow.sh
CSI_PROW_KUBERNETES_VERSION=latest ./.prow.sh
Dependencies and vendoring
--------------------------
Most projects will (eventually) use `go mod` to manage
dependencies. `dep` is also still supported by `csi-release-tools`,
but not documented here because it's not recommended anymore.
The usual instructions for using [go
modules](https://github.com/golang/go/wiki/Modules) apply. Here's a cheat sheet
for some of the relevant commands:
- list available updates: `GO111MODULE=on go list -u -m all`
- update or add a single dependency: `GO111MODULE=on go get <package>`
- update all dependencies to their next minor or patch release:
`GO111MODULE=on go get ./...` (add `-u=patch` to limit to patch
releases)
- lock onto a specific version: `GO111MODULE=on go get <package>@<version>`
- clean up `go.mod`: `GO111MODULE=on go mod tidy`
- update vendor directory: `GO111MODULE=on go mod vendor`
`GO111MODULE=on` can be left out when using Go >= 1.13 or when the
source is checked out outside of `$GOPATH`.
`go mod tidy` must be used to ensure that the listed dependencies are
really still needed. Changing import statements or a tentative `go
get` can result in stale dependencies.
The `test-vendor` verifies that it was used when run locally or in a
pre-merge CI job. If a `vendor` directory is present, it will also
verify that it's content is up-to-date.
The `vendor` directory is optional. It is still present in projects
because it avoids downloading sources during CI builds. If this is no
longer deemed necessary, then a project can also remove the directory.
Conversion of a repository that uses `dep` to `go mod` can be done with:
GO111MODULE=on go mod init
release-tools/go-get-kubernetes.sh <current Kubernetes version from Gopkg.toml>
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
git rm -f Gopkg.toml Gopkg.lock
git add go.mod go.sum vendor
### Updating Kubernetes dependencies
When using packages that are part of the Kubernetes source code, the
commands above are not enough because the [lack of semantic
versioning](https://github.com/kubernetes/kubernetes/issues/72638)
prevents `go mod` from finding newer releases. Importing directly from
`kubernetes/kubernetes` also needs `replace` statements to override
the fake `v0.0.0` versions
(https://github.com/kubernetes/kubernetes/issues/79384). The
`go-get-kubernetes.sh` script can be used to update all packages in
lockstep to a different Kubernetes version. Example usage:
```
$ ./release-tools/go-get-kubernetes.sh 1.16.4
```
# Release Process
No tagged releases are planned at this point. The intention is to keep
the master branch in a state such that it can be used for all
supported branches in downstream repos which use these files.
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
saad-ali
msau42
# Sidecar Release Process
This page describes the process for releasing a kubernetes-csi sidecar.
## Prerequisites
The release manager must:
* Be a member of the kubernetes-csi organization. Open an
[issue](https://github.com/kubernetes/org/issues/new?assignees=&labels=area%2Fgithub-membership&template=membership.md&title=REQUEST%3A+New+membership+for+%3Cyour-GH-handle%3E) in
kubernetes/org to request membership
* Be a top level approver for the repository. To become a top level approver,
the candidate must demonstrate ownership and deep knowledge of the repository
through active maintainence, responding to and fixing issues, reviewing PRs,
test triage.
* Be part of the maintainers or admin group for the repository. admin is a
superset of maintainers, only maintainers level is required for cutting a
release. Membership can be requested by submitting a PR to kubernetes/org.
[Example](https://github.com/kubernetes/org/pull/1467)
## Updating CI Jobs
Whenever a new Kubernetes minor version is released, our kubernetes-csi CI jobs
must be updated.
[Our CI jobs](https://k8s-testgrid.appspot.com/sig-storage-csi-ci) have the
naming convention `<hostpath-deployment-version>-on-<kubernetes-version>`.
1. Jobs should be actively monitored to find and fix failures in sidecars and
infrastructure changes early in the development cycle. Test failures are sent
to kubernetes-sig-storage-test-failures@googlegroups.com.
1. "-on-master" jobs are the closest reflection to the new Kubernetes version.
1. Fixes to our prow.sh CI script can be tested in the [CSI hostpath
repo](https://github.com/kubernetes-csi/csi-driver-host-path) by modifying
[prow.sh](https://github.com/kubernetes-csi/csi-driver-host-path/blob/master/release-tools/prow.sh)
along with any overrides in
[.prow.sh](https://github.com/kubernetes-csi/csi-driver-host-path/blob/master/.prow.sh)
to mirror the failing environment. Once e2e tests are passing (verify-unit tests
will fail), then the prow.sh changes can be submitted to [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools).
1. Changes can then be updated in all the sidecar repos and hostpath driver repo
by following the [update
instructions](https://github.com/kubernetes-csi/csi-release-tools/blob/master/README.md#sharing-and-updating).
1. New pull and CI jobs are configured by adding new K8s versions to the top of
[gen-jobs.sh](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-csi/gen-jobs.sh).
New pull jobs that have been unverified should be initially made optional by
setting the new K8s version as
[experimental](https://github.com/kubernetes/test-infra/blob/a1858f46d6014480b130789df58b230a49203a64/config/jobs/kubernetes-csi/gen-jobs.sh#L40).
1. Once new pull and CI jobs have been verified, and the new Kubernetes version
is released, we can make the optional jobs required, and also remove the
Kubernetes versions that are no longer supported.
## Release Process
1. Identify all issues and ongoing PRs that should go into the release, and
drive them to resolution.
1. Download v2.8+ [K8s release notes
generator](https://github.com/kubernetes/release/tree/master/cmd/release-notes)
1. Generate release notes for the release. Replace arguments with the relevant
information.
* Clean up old cached information (also needed if you are generating release
notes for multiple repos)
```bash
rm -rf /tmp/k8s-repo
```
* For new minor releases on master:
```bash
GITHUB_TOKEN=<token> release-notes --discover=mergebase-to-latest
--github-org=kubernetes-csi --github-repo=external-provisioner
--required-author="" --output out.md
```
* For new patch releases on a release branch:
```bash
GITHUB_TOKEN=<token> release-notes --discover=patch-to-latest --branch=release-1.1
--github-org=kubernetes-csi --github-repo=external-provisioner
--required-author="" --output out.md
```
1. Compare the generated output to the new commits for the release to check if
any notable change missed a release note.
1. Reword release notes as needed. Make sure to check notes for breaking
changes and deprecations.
1. If release is a new major/minor version, create a new `CHANGELOG-<major>.<minor>.md`
file. Otherwise, add the release notes to the top of the existing CHANGELOG
file for that minor version.
1. Submit a PR for the CHANGELOG changes.
1. Submit a PR for README changes, in particular, Compatibility, Feature status,
and any other sections that may need updating.
1. Check that all [canary CI
jobs](https://k8s-testgrid.appspot.com/sig-storage-csi-ci) are passing,
and that test coverage is adequate for the changes that are going into the release.
1. Make sure that no new PRs have merged in the meantime, and no PRs are in
flight and soon to be merged.
1. Create a new release following a previous release as a template. Be sure to select the correct
branch. This requires Github release permissions as required by the prerequisites.
[external-provisioner example](https://github.com/kubernetes-csi/external-provisioner/releases/new)
1. If release was a new major/minor version, create a new `release-<minor>`
branch at that commit.
1. Check [image build status](https://k8s-testgrid.appspot.com/sig-storage-image-build).
1. Promote images from k8s-staging-sig-storage to k8s.gcr.io/sig-storage. From
the [k8s image
repo](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io/images/k8s-staging-sig-storage),
run `./generate.sh > images.yaml`, and send a PR with the updated images.
Once merged, the image promoter will copy the images from staging to prod.
1. Update [kubernetes-csi/docs](https://github.com/kubernetes-csi/docs) sidecar
and feature pages with the new released version.
1. After all the sidecars have been released, update
CSI hostpath driver with the new sidecars in the [CSI repo](https://github.com/kubernetes-csi/csi-driver-host-path/tree/master/deploy)
and [k/k
in-tree](https://github.com/kubernetes/kubernetes/tree/master/test/e2e/testing-manifests/storage-csi/hostpath/hostpath)
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build-% build container-% container push-% push clean test
# A space-separated list of all commands in the repository, must be
# set in main Makefile of a repository.
# CMDS=
# This is the default. It can be overridden in the main Makefile after
# including build.make.
REGISTRY_NAME?=quay.io/k8scsi
# Can be set to -mod=vendor to ensure that the "vendor" directory is used.
GOFLAGS_VENDOR=
# Revision that gets built into each binary via the main.version
# string. Uses the `git describe` output based on the most recent
# version tag with a short revision suffix or, if nothing has been
# tagged yet, just the revision.
#
# Beware that tags may also be missing in shallow clones as done by
# some CI systems (like TravisCI, which pulls only 50 commits).
REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD)
# A space-separated list of image tags under which the current build is to be pushed.
# Determined dynamically.
IMAGE_TAGS=
# A "canary" image gets built if the current commit is the head of the remote "master" branch.
# That branch does not exist when building some other branch in TravisCI.
IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi)
# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch.
# The actual suffix does not matter, only the "release-" prefix is checked.
IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;')
# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit.
# --abbrev=0 suppresses long format, only showing the closest tag.
IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi)
# Images are named after the command contained in them.
IMAGE_NAME=$(REGISTRY_NAME)/$*
ifdef V
# Adding "-alsologtostderr" assumes that all test binaries contain glog. This is not guaranteed.
TESTARGS = -v -args -alsologtostderr -v 5
else
TESTARGS =
endif
# Specific packages can be excluded from each of the tests below by setting the *_FILTER_CMD variables
# to something like "| grep -v 'github.com/kubernetes-csi/project/pkg/foobar'". See usage below.
# BUILD_PLATFORMS contains a set of <os> <arch> <suffix> triplets,
# separated by semicolon. An empty variable or empty entry (= just a
# semicolon) builds for the default platform of the current Go
# toolchain.
BUILD_PLATFORMS =
# Add go ldflags using LDFLAGS at the time of compilation.
IMPORTPATH_LDFLAGS = -X main.version=$(REV)
EXT_LDFLAGS = -extldflags "-static"
LDFLAGS =
FULL_LDFLAGS = $(LDFLAGS) $(IMPORTPATH_LDFLAGS) $(EXT_LDFLAGS)
# This builds each command (= the sub-directories of ./cmd) for the target platform(s)
# defined by BUILD_PLATFORMS.
$(CMDS:%=build-%): build-%: check-go-version-go
mkdir -p bin
echo '$(BUILD_PLATFORMS)' | tr ';' '\n' | while read -r os arch suffix; do \
if ! (set -x; CGO_ENABLED=0 GOOS="$$os" GOARCH="$$arch" go build $(GOFLAGS_VENDOR) -a -ldflags '$(FULL_LDFLAGS)' -o "./bin/$*$$suffix" ./cmd/$*); then \
echo "Building $* for GOOS=$$os GOARCH=$$arch failed, see error(s) above."; \
exit 1; \
fi; \
done
$(CMDS:%=container-%): container-%: build-%
docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) .
$(CMDS:%=push-%): push-%: container-%
set -ex; \
push_image () { \
docker tag $*:latest $(IMAGE_NAME):$$tag; \
docker push $(IMAGE_NAME):$$tag; \
}; \
for tag in $(IMAGE_TAGS); do \
if [ "$$tag" = "canary" ] || echo "$$tag" | grep -q -e '-canary$$'; then \
: "creating or overwriting canary image"; \
push_image; \
elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \
: "creating release image"; \
push_image; \
else \
: "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \
fi; \
done
build: $(CMDS:%=build-%)
container: $(CMDS:%=container-%)
push: $(CMDS:%=push-%)
# Additional parameters are needed when pushing to a local registry,
# see https://github.com/docker/buildx/issues/94.
# However, that then runs into https://github.com/docker/cli/issues/2396.
#
# What works for local testing is:
# make push-multiarch PULL_BASE_REF=master REGISTRY_NAME=<your account on dockerhub.io> BUILD_PLATFORMS="linux amd64; windows amd64 .exe; linux ppc64le -ppc64le; linux s390x -s390x"
DOCKER_BUILDX_CREATE_ARGS ?=
# This target builds a multiarch image for one command using Moby BuildKit builder toolkit.
# Docker Buildx is included in Docker 19.03.
#
# ./cmd/<command>/Dockerfile[.Windows] is used if found, otherwise Dockerfile[.Windows].
# It is currently optional: if no such file exists, Windows images are not included,
# even when Windows is listed in BUILD_PLATFORMS. That way, projects can test that
# Windows binaries can be built before adding a Dockerfile for it.
#
# BUILD_PLATFORMS determines which individual images are included in the multiarch image.
# PULL_BASE_REF must be set to 'master', 'release-x.y', or a tag name, and determines
# the tag for the resulting multiarch image.
$(CMDS:%=push-multiarch-%): push-multiarch-%: check-pull-base-ref build-%
set -ex; \
DOCKER_CLI_EXPERIMENTAL=enabled; \
export DOCKER_CLI_EXPERIMENTAL; \
docker buildx create $(DOCKER_BUILDX_CREATE_ARGS) --use --name multiarchimage-buildertest; \
trap "docker buildx rm multiarchimage-buildertest" EXIT; \
dockerfile_linux=$$(if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi); \
dockerfile_windows=$$(if [ -e ./cmd/$*/Dockerfile.Windows ]; then echo ./cmd/$*/Dockerfile.Windows; else echo Dockerfile.Windows; fi); \
if [ '$(BUILD_PLATFORMS)' ]; then build_platforms='$(BUILD_PLATFORMS)'; else build_platforms="linux amd64"; fi; \
if ! [ -f "$$dockerfile_windows" ]; then \
build_platforms="$$(echo "$$build_platforms" | sed -e 's/windows *[^ ]* *.exe//g' -e 's/; *;/;/g')"; \
fi; \
pushMultiArch () { \
tag=$$1; \
echo "$$build_platforms" | tr ';' '\n' | while read -r os arch suffix; do \
docker buildx build --push \
--tag $(IMAGE_NAME):$$arch-$$os-$$tag \
--platform=$$os/$$arch \
--file $$(eval echo \$${dockerfile_$$os}) \
--build-arg binary=./bin/$*$$suffix \
--label revision=$(REV) \
.; \
done; \
images=$$(echo "$$build_platforms" | tr ';' '\n' | while read -r os arch suffix; do echo $(IMAGE_NAME):$$arch-$$os-$$tag; done); \
docker manifest create --amend $(IMAGE_NAME):$$tag $$images; \
docker manifest push -p $(IMAGE_NAME):$$tag; \
}; \
if [ $(PULL_BASE_REF) = "master" ]; then \
: "creating or overwriting canary image"; \
pushMultiArch canary; \
elif echo $(PULL_BASE_REF) | grep -q -e 'release-*' ; then \
: "creating or overwriting canary image for release branch"; \
release_canary_tag=$$(echo $(PULL_BASE_REF) | cut -f2 -d '-')-canary; \
pushMultiArch $$release_canary_tag; \
elif docker pull $(IMAGE_NAME):$(PULL_BASE_REF) 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$(PULL_BASE_REF) not found"; then \
: "creating release image"; \
pushMultiArch $(PULL_BASE_REF); \
else \
: "ERROR: release image $(IMAGE_NAME):$(PULL_BASE_REF) already exists: a new tag is required!"; \
exit 1; \
fi
.PHONY: check-pull-base-ref
check-pull-base-ref:
if ! [ "$(PULL_BASE_REF)" ]; then \
echo >&2 "ERROR: PULL_BASE_REF must be set to 'master', 'release-x.y', or a tag name."; \
exit 1; \
fi
.PHONY: push-multiarch
push-multiarch: $(CMDS:%=push-multiarch-%)
clean:
-rm -rf bin
test: check-go-version-go
.PHONY: test-go
test: test-go
test-go:
@ echo; echo "### $@:"
go test $(GOFLAGS_VENDOR) `go list $(GOFLAGS_VENDOR) ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS)
.PHONY: test-vet
test: test-vet
test-vet:
@ echo; echo "### $@:"
go vet $(GOFLAGS_VENDOR) `go list $(GOFLAGS_VENDOR) ./... | grep -v vendor $(TEST_VET_FILTER_CMD)`
.PHONY: test-fmt
test: test-fmt
test-fmt:
@ echo; echo "### $@:"
files=$$(find . -name '*.go' | grep -v './vendor' $(TEST_FMT_FILTER_CMD)); \
if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \
echo "formatting errors:"; \
gofmt -d $$files; \
false; \
fi
# This test only runs when dep >= 0.5 is installed, which is the case for the CI setup.
# When using 'go mod', we allow the test to be skipped in the Prow CI under some special
# circumstances, because it depends on accessing all remote repos and thus
# running it all the time would defeat the purpose of vendoring:
# - not handling a PR or
# - the fabricated merge commit leaves go.mod, go.sum and vendor dir unchanged
# - release-tools also didn't change (changing rules or Go version might lead to
# a different result and thus must be tested)
# - import statements not changed (because if they change, go.mod might have to be updated)
#
# "git diff" is intelligent enough to annotate changes inside the "import" block in
# the start of the diff hunk:
#
# diff --git a/rpc/common.go b/rpc/common.go
# index bb4a5c4..5fa4271 100644
# --- a/rpc/common.go
# +++ b/rpc/common.go
# @@ -21,7 +21,6 @@ import (
# "fmt"
# "time"
#
# - "google.golang.org/grpc"
# "google.golang.org/grpc/codes"
# "google.golang.org/grpc/status"
#
# We rely on that to find such changes.
#
# Vendoring is optional when using go.mod.
.PHONY: test-vendor
test: test-vendor
test-vendor:
@ echo; echo "### $@:"
@ ./release-tools/verify-vendor.sh
.PHONY: test-subtree
test: test-subtree
test-subtree:
@ echo; echo "### $@:"
./release-tools/verify-subtree.sh release-tools
# Components can extend the set of directories which must pass shellcheck.
# The default is to check only the release-tools directory itself.
TEST_SHELLCHECK_DIRS=release-tools
.PHONY: test-shellcheck
test: test-shellcheck
test-shellcheck:
@ echo; echo "### $@:"
@ ret=0; \
if ! command -v docker; then \
echo "skipped, no Docker"; \
exit 0; \
fi; \
for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \
echo; \
echo "$$dir:"; \
./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \
done; \
exit $$ret
# Targets in the makefile can depend on check-go-version-<path to go binary>
# to trigger a warning if the x.y version of that binary does not match
# what the project uses. Make ensures that this is only checked once per
# invocation.
.PHONY: check-go-version-%
check-go-version-%:
./release-tools/verify-go-version.sh "$*"
#! /bin/bash
# shellcheck disable=SC1091
. release-tools/prow.sh
gcr_cloud_build
# A configuration file for multi-arch image building with the Google cloud build service.
#
# Repos using this file must:
# - import csi-release-tools
# - add a symlink cloudbuild.yaml -> release-tools/cloudbuild.yaml
# - add a .cloudbuild.sh which can be a custom file or a symlink
# to release-tools/cloudbuild.sh
# - accept "binary" as build argument in their Dockerfile(s) (see
# https://github.com/pohly/node-driver-registrar/blob/3018101987b0bb6da2a2657de607174d6e3728f7/Dockerfile#L4-L6)
# because binaries will get built for different architectures and then
# get copied from the built host into the container image
#
# See https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/README.md
# for more details on image pushing process in Kubernetes.
#
# To promote release images, see https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io/images/k8s-staging-sig-storage.
# This must be specified in seconds. If omitted, defaults to 600s (10 mins).
# Building three images in external-snapshotter takes roughly half an hour,
# sometimes more.
timeout: 3600s
# This prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:
substitution_option: ALLOW_LOOSE
steps:
# The image must contain bash and curl. Ideally it should also contain
# the desired version of Go (currently defined in release-tools/travis.yml),
# but that just speeds up the build and is not required.
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20200421-a2bf5f8'
entrypoint: ./.cloudbuild.sh
env:
- GIT_TAG=${_GIT_TAG}
- PULL_BASE_REF=${_PULL_BASE_REF}
- REGISTRY_NAME=gcr.io/${_STAGING_PROJECT}
- HOME=/root
substitutions:
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
# can be used as a substitution.
_GIT_TAG: '12345'
# _PULL_BASE_REF will contain the ref that was pushed to trigger this build -
# a branch like 'master' or 'release-0.2', or a tag like 'v0.2'.
_PULL_BASE_REF: 'master'
# The default gcr.io staging project for Kubernetes-CSI
# (=> https://console.cloud.google.com/gcr/images/k8s-staging-sig-storage/GLOBAL).
# Might be overridden in the Prow build job for a repo which wants
# images elsewhere.
_STAGING_PROJECT: 'k8s-staging-sig-storage'
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This command filters a JUnit file such that only tests with a name
matching a regular expression are passed through. By concatenating
multiple input files it is possible to merge them into a single file.
*/
package main
import (
"encoding/xml"
"flag"
"io/ioutil"
"os"
"regexp"
)
var (
output = flag.String("o", "-", "junit file to write, - for stdout")
tests = flag.String("t", "", "regular expression matching the test names that are to be included in the output")
)
/*
* TestSuite represents a JUnit file. Due to how encoding/xml works, we have
* represent all fields that we want to be passed through. It's therefore
* not a complete solution, but good enough for Ginkgo + Spyglass.
*/
type TestSuite struct {
XMLName string `xml:"testsuite"`
TestCases []TestCase `xml:"testcase"`
}
type TestCase struct {
Name string `xml:"name,attr"`
Time string `xml:"time,attr"`
SystemOut string `xml:"system-out,omitempty"`
Failure string `xml:"failure,omitempty"`
Skipped SkipReason `xml:"skipped,omitempty"`
}
// SkipReason deals with the special <skipped></skipped>:
// if present, we must re-encode it, even if empty.
type SkipReason string
func (s *SkipReason) UnmarshalText(text []byte) error {
*s = SkipReason(text)
if *s == "" {
*s = " "
}
return nil
}
func (s SkipReason) MarshalText() ([]byte, error) {
if s == " " {
return []byte{}, nil
}
return []byte(s), nil
}
func main() {
var junit TestSuite
var data []byte
flag.Parse()
re := regexp.MustCompile(*tests)
// Read all input files.
for _, input := range flag.Args() {
if input == "-" {
if _, err := os.Stdin.Read(data); err != nil {
panic(err)
}
} else {
var err error
data, err = ioutil.ReadFile(input)
if err != nil {
panic(err)
}
}
if err := xml.Unmarshal(data, &junit); err != nil {
panic(err)
}
}
// Keep only matching testcases. Testcases skipped in all test runs are only stored once.
filtered := map[string]TestCase{}
for _, testcase := range junit.TestCases {
if !re.MatchString(testcase.Name) {
continue
}
entry, ok := filtered[testcase.Name]
if !ok || // not present yet
entry.Skipped != "" && testcase.Skipped == "" { // replaced skipped test with real test run
filtered[testcase.Name] = testcase
}
}
junit.TestCases = nil
for _, testcase := range filtered {
junit.TestCases = append(junit.TestCases, testcase)
}
// Re-encode.
data, err := xml.MarshalIndent(junit, "", " ")
if err != nil {
panic(err)
}
// Write to output.
if *output == "-" {
if _, err := os.Stdout.Write(data); err != nil {
panic(err)
}
} else {
if err := ioutil.WriteFile(*output, data, 0644); err != nil {
panic(err)
}
}
}
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script can be used while converting a repo from "dep" to "go mod"
# by calling it after "go mod init" or to update the Kubernetes packages
# in a repo that has already been converted. Only packages that are
# part of kubernetes/kubernetes and thus part of a Kubernetes release
# are modified. Other k8.io packages (like k8s.io/klog, k8s.io/utils)
# need to be updated separately.
set -o pipefail
cmd=$0
function help () {
echo "$cmd <kubernetes version = x.y.z> - update all components from kubernetes/kubernetes to that version"
}
if [ $# -ne 1 ]; then
help
exit 1
fi
case "$1" in -h|--help|help) help; exit 0;; esac
die () {
echo >&2 "$@"
exit 1
}
k8s="$1"
# If the repo imports k8s.io/kubernetes (directly or indirectly), then
# "go mod" will try to find "v0.0.0" versions because
# k8s.io/kubernetes has those in it's go.mod file
# (https://github.com/kubernetes/kubernetes/blob/2bd9643cee5b3b3a5ecbd3af49d09018f0773c77/go.mod#L146-L157).
# (https://github.com/kubernetes/kubernetes/issues/79384).
#
# We need to replicate the replace statements to override those fake
# versions also in our go.mod file (idea and some code from
# https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-521493597).
mods=$( (set -x; curl --silent --show-error --fail "https://raw.githubusercontent.com/kubernetes/kubernetes/v${k8s}/go.mod") |
sed -n 's|.*k8s.io/\(.*\) => ./staging/src/k8s.io/.*|k8s.io/\1|p'
) || die "failed to determine Kubernetes staging modules"
for mod in $mods; do
# The presence of a potentially incomplete go.mod file affects this command,
# so move elsewhere.
modinfo=$(set -x; cd /; env GO111MODULE=on go mod download -json "$mod@kubernetes-${k8s}") ||
die "failed to determine version of $mod: $modinfo"
v=$(echo "$modinfo" | sed -n 's|.*"Version": "\(.*\)".*|\1|p')
(set -x; env GO111MODULE=on go mod edit "-replace=$mod=$mod@$v") || die "'go mod edit' failed"
done
packages=
# Beware that we have to work with packages, not modules (i.e. no -m
# flag), because some modules trigger a "no Go code except tests"
# error. Getting their packages works.
if ! packages=$( (set -x; env GO111MODULE=on go list all) | grep ^k8s.io/ | sed -e 's; *;;'); then
cat >&2 <<EOF
Warning: "GO111MODULE=on go list all" failed, trying individual packages instead.
EOF
if ! packages=$( (set -x; env GO111MODULE=on go list -f '{{ join .Deps "\n" }}' ./...) | grep ^k8s.io/); then
cat >&2 <<EOF
ERROR: could not obtain package list, both of these commands failed:
GO111MODULE=on go list all
GO111MODULE=on go list -f '{{ join .Deps "\n" }}' ./pkg/...
EOF
exit 1
fi
fi
deps=
for package in $packages; do
# Some k8s.io packages do not come from Kubernetes staging and
# thus have different versioning (or none at all...). We need to
# skip those. We know what packages are from staging because we
# now have "replace" statements for them in go.mod.
#
# shellcheck disable=SC2001
module=$(echo "$package" | sed -e 's;k8s.io/\([^/]*\)/.*;k8s.io/\1;')
if grep -q -w "$module *=>" go.mod; then
deps="$deps $(echo "$package" | sed -e "s;\$;@kubernetes-$k8s;" -e 's;^k8s.io/kubernetes\(/.*\)@kubernetes-;k8s.io/kubernetes\1@v;')"
fi
done
# shellcheck disable=SC2086
(set -x; env GO111MODULE=on go get $deps 2>&1) || die "go get failed"
echo "SUCCESS"
#! /bin/bash
#
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs inside a Prow job. It can run unit tests ("make test")
# and E2E testing. This E2E testing covers different scenarios (see
# https://github.com/kubernetes/enhancements/pull/807):
# - running the stable hostpath example against a Kubernetes release
# - running the canary hostpath example against a Kubernetes release
# - building the component in the current repo and running the
# stable hostpath example with that one component replaced against
# a Kubernetes release
#
# The intended usage of this script is that individual repos import
# csi-release-tools, then link their top-level prow.sh to this or
# include it in that file. When including it, several of the variables
# can be overridden in the top-level prow.sh to customize the script
# for the repo.
#
# The expected environment is:
# - $GOPATH/src/<import path> for the repository that is to be tested,
# with PR branch merged (when testing a PR)
# - optional: bazel installed (when testing against Kubernetes master),
# must be recent enough for Kubernetes master
# - running on linux-amd64
# - kind (https://github.com/kubernetes-sigs/kind) installed
# - optional: Go already installed
RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
REPO_DIR="$(pwd)"
# Sets the default value for a variable if not set already and logs the value.
# Any variable set this way is usually something that a repo's .prow.sh
# or the job can set.
configvar () {
# Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"?
# shellcheck disable=SC2140
eval : \$\{"$1":="\$2"\}
eval echo "\$3:" "$1=\${$1}"
}
# Prints the value of a variable + version suffix, falling back to variable + "LATEST".
get_versioned_variable () {
local var="$1"
local version="$2"
local value
eval value="\${${var}_${version}}"
if ! [ "$value" ]; then
eval value="\${${var}_LATEST}"
fi
echo "$value"
}
configvar CSI_PROW_BUILD_PLATFORMS "linux amd64; windows amd64 .exe; linux ppc64le -ppc64le; linux s390x -s390x; linux arm64 -arm64" "Go target platforms (= GOOS + GOARCH) and file suffix of the resulting binaries"
# If we have a vendor directory, then use it. We must be careful to only
# use this for "make" invocations inside the project's repo itself because
# setting it globally can break other go usages (like "go get <some command>"
# which is disabled with GOFLAGS=-mod=vendor).
configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags for using the vendor directory"
# Go versions can be specified seperately for different tasks
# If the pre-installed Go is missing or a different
# version, the required version here will get installed
# from https://golang.org/dl/.
go_from_travis_yml () {
grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//'
}
configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e
configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below
configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below
configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below
# kind version to use. If the pre-installed version is different,
# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases
# (if available), otherwise it is built from source.
configvar CSI_PROW_KIND_VERSION "v0.9.0" "kind"
# kind images to use. Must match the kind version.
# The release notes of each kind release list the supported images.
configvar CSI_PROW_KIND_IMAGES "kindest/node:v1.19.1@sha256:98cf5288864662e37115e362b23e4369c8c4a408f99cbc06e58ac30ddc721600
kindest/node:v1.18.8@sha256:f4bcc97a0ad6e7abaf3f643d890add7efe6ee4ab90baeb374b4f41a4c95567eb
kindest/node:v1.17.11@sha256:5240a7a2c34bf241afb54ac05669f8a46661912eab05705d660971eeb12f6555
kindest/node:v1.16.15@sha256:a89c771f7de234e6547d43695c7ab047809ffc71a0c3b65aa54eda051c45ed20
kindest/node:v1.15.12@sha256:d9b939055c1e852fe3d86955ee24976cab46cba518abcb8b13ba70917e6547a6
kindest/node:v1.14.10@sha256:ce4355398a704fca68006f8a29f37aafb49f8fc2f64ede3ccd0d9198da910146
kindest/node:v1.13.12@sha256:1c1a48c2bfcbae4d5f4fa4310b5ed10756facad0b7a2ca93c7a4b5bae5db29f5" "kind images"
# Use kind node-image --type=bazel by default, but allow to disable that.
configvar CSI_PROW_USE_BAZEL true "use Bazel during 'kind node-image' invocation"
# ginkgo test runner version to use. If the pre-installed version is
# different, the desired version is built from source.
configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo"
# Ginkgo runs the E2E test in parallel. The default is based on the number
# of CPUs, but typically this can be set to something higher in the job.
configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)"
# Enables building the code in the repository. On by default, can be
# disabled in jobs which only use pre-built components.
configvar CSI_PROW_BUILD_JOB true "building code in repo enabled"
# Kubernetes version to test against. This must be a version number
# (like 1.13.3), "latest" (builds Kubernetes from the master branch)
# or "release-x.yy" (builds Kubernetes from a release branch).
#
# The patch version is only relevant for picking the E2E test suite
# that is used for testing. The script automatically picks
# the kind images for the major/minor version of Kubernetes
# that the kind release supports.
#
# This can also be a version that was not released yet at the time
# that the settings below were chose. The script will then
# use the same settings as for "latest" Kubernetes. This works
# as long as there are no breaking changes in Kubernetes, like
# deprecating or changing the implementation of an alpha feature.
configvar CSI_PROW_KUBERNETES_VERSION 1.17.0 "Kubernetes"
# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and
# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST
# instead of latest).
#
# This is used to derive the right defaults for the variables below
# when a Prow job just defines the Kubernetes version.
csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')"
# Work directory. It has to allow running executables, therefore /tmp
# is avoided. Cleaning up after the script is intentionally left to
# the caller.
configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory"
# By default, this script tests sidecars with the CSI hostpath driver,
# using the install_csi_driver function. That function depends on
# a deployment script that it searches for in several places:
#
# - The "deploy" directory in the current repository: this is useful
# for the situation that a component becomes incompatible with the
# shared deployment, because then it can (temporarily!) provide its
# own example until the shared one can be updated; it's also how
# csi-driver-host-path itself provides the example.
#
# - CSI_PROW_DRIVER_VERSION of the CSI_PROW_DRIVER_REPO is checked
# out: this allows other repos to reference a version of the example
# that is known to be compatible.
#
# - The <driver repo>/deploy directory can have multiple sub-directories,
# each with different deployments (stable set of images for Kubernetes 1.13,
# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.).
# This is necessary because there may be incompatible changes in the
# "API" of a component (for example, its command line options or RBAC rules)
# or in its support for different Kubernetes versions (CSIDriverInfo as
# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14).
#
# When testing an update for a component in a PR job, the
# CSI_PROW_DEPLOYMENT variable can be set in the
# .prow.sh of each component when there are breaking changes
# that require using a non-default deployment. The default
# is a deployment named "kubernetes-x.yy" (if available),
# otherwise "kubernetes-latest".
# "none" disables the deployment of the hostpath driver.
#
# When no deploy script is found (nothing in `deploy` directory,
# CSI_PROW_DRIVER_REPO=none), nothing gets deployed.
#
# If the deployment script is called with CSI_PROW_TEST_DRIVER=<file name> as
# environment variable, then it must write a suitable test driver configuration
# into that file in addition to installing the driver.
configvar CSI_PROW_DRIVER_VERSION "v1.3.0" "CSI driver version"
configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo"
configvar CSI_PROW_DEPLOYMENT "" "deployment"
# The install_csi_driver function may work also for other CSI drivers,
# as long as they follow the conventions of the CSI hostpath driver.
# If they don't, then a different install function can be provided in
# a .prow.sh file and this config variable can be overridden.
configvar CSI_PROW_DRIVER_INSTALL "install_csi_driver" "name of the shell function which installs the CSI driver"
# If CSI_PROW_DRIVER_CANARY is set (typically to "canary", but also
# version tag. Usually empty. CSI_PROW_HOSTPATH_CANARY is
# accepted as alternative name because some test-infra jobs
# still use that name.
configvar CSI_PROW_DRIVER_CANARY "${CSI_PROW_HOSTPATH_CANARY}" "driver image override for canary images"
# Image registry to use for canary images.
# Only valid if CSI_PROW_DRIVER_CANARY == "canary".
configvar CSI_PROW_DRIVER_CANARY_REGISTRY "gcr.io/k8s-staging-sig-storage" "registry for canary images"
# The E2E testing can come from an arbitrary repo. The expectation is that
# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836)
# after setting KUBECONFIG. As a special case, if the repository is Kubernetes,
# then `make WHAT=test/e2e/e2e.test` is called first to ensure that
# all generated files are present.
#
# CSI_PROW_E2E_REPO=none disables E2E testing.
tag_from_version () {
version="$1"
shift
case "$version" in
latest) echo "master";;
release-*) echo "$version";;
*) echo "v$version";;
esac
}
configvar CSI_PROW_E2E_VERSION "$(tag_from_version "${CSI_PROW_KUBERNETES_VERSION}")" "E2E version"
configvar CSI_PROW_E2E_REPO "https://github.com/kubernetes/kubernetes" "E2E repo"
configvar CSI_PROW_E2E_IMPORT_PATH "k8s.io/kubernetes" "E2E package"
# csi-sanity testing from the csi-test repo can be run against the installed
# CSI driver. For this to work, deploying the driver must expose the Unix domain
# csi.sock as a TCP service for use by the csi-sanity command, which runs outside
# of the cluster. The alternative would have been to (cross-)compile csi-sanity
# and install it inside the cluster, which is not necessarily easier.
configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo"
configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master
configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package"
configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock"
configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver"
configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver"
# The version of dep to use for 'make test-vendor'. Ignored if the project doesn't
# use dep. Only binary releases of dep are supported (https://github.com/golang/dep/releases).
configvar CSI_PROW_DEP_VERSION v0.5.1 "golang dep version to be used for vendor checking"
# Each job can run one or more of the following tests, identified by
# a single word:
# - unit testing
# - parallel excluding alpha features
# - serial excluding alpha features
# - parallel, only alpha feature
# - serial, only alpha features
# - sanity
#
# Unknown or unsupported entries are ignored.
#
# Testing of alpha features is only supported for CSI_PROW_KUBERNETES_VERSION=latest
# because CSI_PROW_E2E_ALPHA and CSI_PROW_E2E_ALPHA_GATES are not set for
# older Kubernetes releases. The script supports that, it just isn't done because
# it is not needed and would cause additional maintenance effort.
#
# Sanity testing with csi-sanity only covers the CSI driver itself and
# thus only makes sense in repos which provide their own CSI
# driver. Repos can enable sanity testing by setting
# CSI_PROW_TESTS_SANITY=sanity.
configvar CSI_PROW_TESTS "unit parallel serial $(if [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ]; then echo parallel-alpha serial-alpha; fi) sanity" "tests to run"
tests_enabled () {
local t1 t2
# We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a.
# shellcheck disable=SC2206
local tests=(${CSI_PROW_TESTS})
for t1 in "$@"; do
for t2 in "${tests[@]}"; do
if [ "$t1" = "$t2" ]; then
return
fi
done
done
return 1
}
sanity_enabled () {
[ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity"
}
tests_need_kind () {
tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" ||
sanity_enabled
}
tests_need_non_alpha_cluster () {
tests_enabled "parallel" "serial" ||
sanity_enabled
}
tests_need_alpha_cluster () {
tests_enabled "parallel-alpha" "serial-alpha"
}
# Regex for non-alpha, feature-tagged tests that should be run.
#
configvar CSI_PROW_E2E_FOCUS_LATEST '\[Feature:VolumeSnapshotDataSource\]' "non-alpha, feature-tagged tests for latest Kubernetes version"
configvar CSI_PROW_E2E_FOCUS "$(get_versioned_variable CSI_PROW_E2E_FOCUS "${csi_prow_kubernetes_version_suffix}")" "non-alpha, feature-tagged tests"
# Serial vs. parallel is always determined by these regular expressions.
# Individual regular expressions are seperated by spaces for readability
# and expected to not contain spaces. Use dots instead. The complete
# regex for Ginkgo will be created by joining the individual terms.
configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests"
regex_join () {
echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g'
}
# Which tests are alpha depends on the Kubernetes version. We could
# use the same E2E test for all Kubernetes version. This would have
# the advantage that new tests can be applied to older versions
# without having to backport tests.
#
# But the feature tag gets removed from E2E tests when the corresponding
# feature becomes beta, so we would have to track which tests were
# alpha in previous Kubernetes releases. This was considered too
# error prone. Therefore we use E2E tests that match the Kubernetes
# version that is getting tested.
configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for latest Kubernetes version" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough
configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests"
# After the parallel E2E test without alpha features, a test cluster
# with alpha features is brought up and tests that were previously
# disabled are run. The alpha gates in each release have to be listed
# explicitly. If none are set (= variable empty), alpha testing
# is skipped.
#
# Testing against "latest" Kubernetes is problematic because some alpha
# feature which used to work might stop working or change their behavior
# such that the current tests no longer pass. If that happens,
# kubernetes-csi components must be updated, either by disabling
# the failing test for "latest" or by updating the test and not running
# it anymore for older releases.
configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'GenericEphemeralVolume=true,CSIStorageCapacity=true' "alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates"
# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment
default_csi_snapshotter_version () {
if [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ] || [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ]; then
echo "master"
else
echo "v3.0.2"
fi
}
configvar CSI_SNAPSHOTTER_VERSION "$(default_csi_snapshotter_version)" "external-snapshotter version tag"
# Some tests are known to be unusable in a KinD cluster. For example,
# stopping kubelet with "ssh <node IP> systemctl stop kubelet" simply
# doesn't work. Such tests should be written in a way that they verify
# whether they can run with the current cluster provider, but until
# they are, we filter them out by name. Like the other test selection
# variables, this is again a space separated list of regular expressions.
#
# "different node" test skips can be removed once
# https://github.com/kubernetes/kubernetes/pull/82678 has been backported
# to all the K8s versions we test against
configvar CSI_PROW_E2E_SKIP 'Disruptive|different\s+node' "tests that need to be skipped"
# This is the directory for additional result files. Usually set by Prow, but
# if not (for example, when invoking manually) it defaults to the work directory.
configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts"
mkdir -p "${ARTIFACTS}"
run () {
echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2
"$@"
}
info () {
echo >&2 INFO: "$@"
}
warn () {
echo >&2 WARNING: "$@"
}
die () {
echo >&2 ERROR: "$@"
exit 1
}
# For additional tools.
CSI_PROW_BIN="${CSI_PROW_WORK}/bin"
mkdir -p "${CSI_PROW_BIN}"
PATH="${CSI_PROW_BIN}:$PATH"
# Ensure that PATH has the desired version of the Go tools, then run command given as argument.
# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by
# bumping the container image regularly.
run_with_go () {
local version
version="$1"
shift
if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then
run "$@"
else
if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then
run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed"
mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version"
fi
PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@"
fi
}
# Ensure that we have the desired version of kind.
install_kind () {
if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then
return
fi
if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then
chmod u+x "${CSI_PROW_WORK}/bin/kind"
else
git_checkout https://github.com/kubernetes-sigs/kind "${GOPATH}/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 &&
(cd "${GOPATH}/src/sigs.k8s.io/kind" && make install INSTALL_DIR="${CSI_PROW_WORK}/bin")
fi
}
# Ensure that we have the desired version of the ginkgo test runner.
install_ginkgo () {
# CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not.
if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then
return
fi
git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 &&
# We have to get dependencies and hence can't call just "go build".
run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" &&
mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}"
}
# Ensure that we have the desired version of dep.
install_dep () {
if dep version 2>/dev/null | grep -q "version:.*${CSI_PROW_DEP_VERSION}$"; then
return
fi
run curl --fail --location -o "${CSI_PROW_WORK}/bin/dep" "https://github.com/golang/dep/releases/download/v0.5.4/dep-linux-amd64" &&
chmod u+x "${CSI_PROW_WORK}/bin/dep"
}
# This checks out a repo ("https://github.com/kubernetes/kubernetes")
# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at
# a certain revision (a hex commit hash, v1.13.1, master). It's okay
# for that directory to exist already.
git_checkout () {
local repo path revision
repo="$1"
shift
path="$1"
shift
revision="$1"
shift
mkdir -p "$path"
if ! [ -d "$path/.git" ]; then
run git init "$path"
fi
if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then
(cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed"
else
# Might have been because fetching by revision is not
# supported by GitHub (https://github.com/isaacs/github/issues/436).
# Fall back to fetching everything.
(cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed"
(cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed"
fi
# This is useful for local testing or when switching between different revisions in the same
# repo.
(cd "$path" && run git clean -fdx) || die "failed to clean $path"
}
# This clones a repo ("https://github.com/kubernetes/kubernetes")
# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at
# a the head of a specific branch (i.e., release-1.13, master).
# The directory cannot exist.
git_clone_branch () {
local repo path branch parent
repo="$1"
shift
path="$1"
shift
branch="$1"
shift
parent="$(dirname "$path")"
mkdir -p "$parent"
(cd "$parent" && run git clone --single-branch --branch "$branch" "$repo" "$path") || die "cloning $repo" failed
# This is useful for local testing or when switching between different revisions in the same
# repo.
(cd "$path" && run git clean -fdx) || die "failed to clean $path"
}
list_gates () (
set -f; IFS=','
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
set -- $1
while [ "$1" ]; do
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/'
shift
done
)
# Turn feature gates in the format foo=true,bar=false into
# a YAML map with the corresponding API groups for use
# with https://kind.sigs.k8s.io/docs/user/configuration/#runtime-config
list_api_groups () (
set -f; IFS=','
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
set -- $1
while [ "$1" ]; do
if [ "$1" = 'CSIStorageCapacity=true' ]; then
echo ' "storage.k8s.io/v1alpha1": "true"'
fi
shift
done
)
go_version_for_kubernetes () (
local path="$1"
local version="$2"
local go_version
# We use the minimal Go version specified for each K8S release (= minimum_go_version in hack/lib/golang.sh).
# More recent versions might also work, but we don't want to count on that.
go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')"
if ! [ "$go_version" ]; then
die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh."
fi
# Strip the trailing .0. Kubernetes includes it, Go itself doesn't.
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
go_version="$(echo "$go_version" | sed -e 's/\.0$//')"
echo "$go_version"
)
csi_prow_kind_have_kubernetes=false
# Brings up a Kubernetes cluster and sets KUBECONFIG.
# Accepts additional feature gates in the form gate1=true|false,gate2=...
start_cluster () {
local image gates
gates="$1"
if kind get clusters | grep -q csi-prow; then
run kind delete cluster --name=csi-prow || die "kind delete failed"
fi
# Try to find a pre-built kind image if asked to use a specific version.
if ! [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
major_minor=$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/^\([0-9]*\)\.\([0-9]*\).*/\1.\2/')
for i in ${CSI_PROW_KIND_IMAGES}; do
if echo "$i" | grep -q "kindest/node:v${major_minor}"; then
image="$i"
break
fi
done
fi
# Need to build from source?
if ! [ "$image" ]; then
if ! ${csi_prow_kind_have_kubernetes}; then
local version="${CSI_PROW_KUBERNETES_VERSION}"
if [ "$version" = "latest" ]; then
version=master
fi
if ${CSI_PROW_USE_BAZEL}; then
type="bazel"
else
type="docker"
fi
git_clone_branch https://github.com/kubernetes/kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version" || die "checking out Kubernetes $version failed"
go_version="$(go_version_for_kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes"
# Changing into the Kubernetes source code directory is a workaround for https://github.com/kubernetes-sigs/kind/issues/1910
(cd "${CSI_PROW_WORK}/src/kubernetes" && run_with_go "$go_version" kind build node-image --image csiprow/node:latest --type="$type" --kube-root "${CSI_PROW_WORK}/src/kubernetes") || die "'kind build node-image' failed"
csi_prow_kind_have_kubernetes=true
fi
image="csiprow/node:latest"
fi
cat >"${CSI_PROW_WORK}/kind-config.yaml" <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
featureGates:
$(list_gates "$gates")
runtimeConfig:
$(list_api_groups "$gates")
EOF
info "kind-config.yaml:"
cat "${CSI_PROW_WORK}/kind-config.yaml"
if ! run kind create cluster --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then
warn "Cluster creation failed. Will try again with higher verbosity."
info "Available Docker images:"
docker image ls
if ! run kind --loglevel debug create cluster --retain --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then
run kind export logs --name csi-prow "$ARTIFACTS/kind-cluster"
die "Cluster creation failed again, giving up. See the 'kind-cluster' artifact directory for additional logs."
fi
fi
export KUBECONFIG="${HOME}/.kube/config"
}
# Deletes kind cluster inside a prow job
delete_cluster_inside_prow_job() {
# Inside a real Prow job it is better to clean up at runtime
# instead of leaving that to the Prow job cleanup code
# because the later sometimes times out (https://github.com/kubernetes-csi/csi-release-tools/issues/24#issuecomment-554765872).
if [ "$JOB_NAME" ]; then
if kind get clusters | grep -q csi-prow; then
run kind delete cluster --name=csi-prow || die "kind delete failed"
fi
unset KUBECONFIG
fi
}
# Looks for the deployment as specified by CSI_PROW_DEPLOYMENT and CSI_PROW_KUBERNETES_VERSION
# in the given directory.
find_deployment () {
local dir file
dir="$1"
# Fixed deployment name? Use it if it exists, otherwise fail.
if [ "${CSI_PROW_DEPLOYMENT}" ]; then
file="$dir/${CSI_PROW_DEPLOYMENT}/deploy.sh"
if ! [ -e "$file" ]; then
return 1
fi
echo "$file"
return 0
fi
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
file="$dir/kubernetes-$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1.\2/')/deploy.sh"
if ! [ -e "$file" ]; then
file="$dir/kubernetes-latest/deploy.sh"
if ! [ -e "$file" ]; then
return 1
fi
fi
echo "$file"
}
# This installs the CSI driver. It's called with a list of env variables
# that override the default images. CSI_PROW_DRIVER_CANARY overrides all
# image versions with that canary version.
install_csi_driver () {
local images deploy_driver
images="$*"
if [ "${CSI_PROW_DEPLOYMENT}" = "none" ]; then
return 1
fi
if ${CSI_PROW_BUILD_JOB}; then
# Ignore: Double quote to prevent globbing and word splitting.
# Ignore: To read lines rather than words, pipe/redirect to a 'while read' loop.
# shellcheck disable=SC2086 disable=SC2013
for i in $(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//'); do
kind load docker-image --name csi-prow $i:csiprow || die "could not load the $i:latest image into the kind cluster"
done
fi
if deploy_driver="$(find_deployment "$(pwd)/deploy")"; then
:
elif [ "${CSI_PROW_DRIVER_REPO}" = "none" ]; then
return 1
else
git_checkout "${CSI_PROW_DRIVER_REPO}" "${CSI_PROW_WORK}/csi-driver" "${CSI_PROW_DRIVER_VERSION}" --depth=1 || die "checking out CSI driver repo failed"
if deploy_driver="$(find_deployment "${CSI_PROW_WORK}/csi-driver/deploy")"; then
:
else
die "deploy.sh not found in ${CSI_PROW_DRIVER_REPO} ${CSI_PROW_DRIVER_VERSION}. To disable E2E testing, set CSI_PROW_DRIVER_REPO=none"
fi
fi
if [ "${CSI_PROW_DRIVER_CANARY}" != "stable" ]; then
if [ "${CSI_PROW_DRIVER_CANARY}" == "canary" ]; then
images="$images IMAGE_TAG=${CSI_PROW_DRIVER_CANARY} IMAGE_REGISTRY=${CSI_PROW_DRIVER_CANARY_REGISTRY}"
else
images="$images IMAGE_TAG=${CSI_PROW_DRIVER_CANARY}"
fi
fi
# Ignore: Double quote to prevent globbing and word splitting.
# It's intentional here for $images.
# shellcheck disable=SC2086
if ! run env "CSI_PROW_TEST_DRIVER=${CSI_PROW_WORK}/test-driver.yaml" $images "${deploy_driver}"; then
# Collect information about failed deployment before failing.
collect_cluster_info
(start_loggers >/dev/null; wait)
info "For container output see job artifacts."
die "deploying the CSI driver with ${deploy_driver} failed"
fi
}
# Installs all nessesary snapshotter CRDs
install_snapshot_crds() {
# Wait until volumesnapshot CRDs are in place.
CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/client/config/crd"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
CRD_BASE_DIR="${REPO_DIR}/client/config/crd"
fi
echo "Installing snapshot CRDs from ${CRD_BASE_DIR}"
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml" --validate=false
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml" --validate=false
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml" --validate=false
cnt=0
until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io \
&& kubectl get volumesnapshots.snapshot.storage.k8s.io \
&& kubectl get volumesnapshotcontents.snapshot.storage.k8s.io; do
if [ $cnt -gt 30 ]; then
echo >&2 "ERROR: snapshot CRDs not ready after over 1 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot CRDs, attempt #$cnt"
cnt=$((cnt + 1))
sleep 2
done
}
# Install snapshot controller and associated RBAC, retrying until the pod is running.
install_snapshot_controller() {
CONTROLLER_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
CONTROLLER_DIR="${REPO_DIR}"
fi
SNAPSHOT_RBAC_YAML="${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml"
echo "kubectl apply -f ${SNAPSHOT_RBAC_YAML}"
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
kubectl apply -f ${SNAPSHOT_RBAC_YAML}
cnt=0
until kubectl get clusterrolebinding snapshot-controller-role; do
if [ $cnt -gt 30 ]; then
echo "Cluster role bindings:"
kubectl describe clusterrolebinding
echo >&2 "ERROR: snapshot controller RBAC not ready after over 5 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done
SNAPSHOT_CONTROLLER_YAML="${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
# snapshot-controller image built from the PR will get a "csiprow" tag.
# Load it into the "kind" cluster so that we can deploy it.
NEW_TAG="csiprow"
NEW_IMG="snapshot-controller:${NEW_TAG}"
echo "kind load docker-image --name csi-prow ${NEW_IMG}"
kind load docker-image --name csi-prow ${NEW_IMG} || die "could not load the snapshot-controller:csiprow image into the kind cluster"
# deploy snapshot-controller
echo "Deploying snapshot-controller"
# Replace image in SNAPSHOT_CONTROLLER_YAML with snapshot-controller:csiprow and deploy
# NOTE: This logic is similar to the logic here:
# https://github.com/kubernetes-csi/csi-driver-host-path/blob/v1.4.0/deploy/util/deploy-hostpath.sh#L155
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
# Ignore: Use find instead of ls to better handle non-alphanumeric filenames.
# shellcheck disable=SC2012
for i in $(ls ${SNAPSHOT_CONTROLLER_YAML} | sort); do
echo " $i"
# Ignore: Useless cat. Consider 'cmd < file | ..' or 'cmd file | ..' instead.
# shellcheck disable=SC2002
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
modified="$(cat "$i" | while IFS= read -r line; do
nocomments="$(echo "$line" | sed -e 's/ *#.*$//')"
if echo "$nocomments" | grep -q '^[[:space:]]*image:[[:space:]]*'; then
# Split 'image: k8s.gcr.io/sig-storage/snapshot-controller:v3.0.0'
# into image (snapshot-controller:v3.0.0),
# name (snapshot-controller),
# tag (v3.0.0).
image=$(echo "$nocomments" | sed -e 's;.*image:[[:space:]]*;;')
name=$(echo "$image" | sed -e 's;.*/\([^:]*\).*;\1;')
tag=$(echo "$image" | sed -e 's;.*:;;')
# Now replace registry and/or tag
NEW_TAG="csiprow"
line="$(echo "$nocomments" | sed -e "s;$image;${name}:${NEW_TAG};")"
echo " using $line" >&2
fi
echo "$line"
done)"
if ! echo "$modified" | kubectl apply -f -; then
echo "modified version of $i:"
echo "$modified"
exit 1
fi
echo "kubectl apply -f ${SNAPSHOT_CONTROLLER_YAML}(modified)"
done
else
echo "kubectl apply -f ${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
kubectl apply -f "${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
fi
cnt=0
expected_running_pods=$(curl https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${CSI_SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | grep replicas | cut -d ':' -f 2-)
while [ "$(kubectl get pods -l app=snapshot-controller | grep 'Running' -c)" -lt "$expected_running_pods" ]; do
if [ $cnt -gt 30 ]; then
echo "snapshot-controller pod status:"
kubectl describe pods -l app=snapshot-controller
echo >&2 "ERROR: snapshot controller not ready after over 5 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done
}
# collect logs and cluster status (like the version of all components, Kubernetes version, test version)
collect_cluster_info () {
cat <<EOF
=========================================================
Kubernetes:
$(kubectl version)
Driver installation in default namespace:
$(kubectl get all)
Images in cluster:
REPOSITORY TAG REVISION
$(
# Here we iterate over all images that are in use and print some information about them.
# The "revision" label is where our build process puts the version number and revision,
# which is always unique, in contrast to the tag (think "canary"...).
docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep -e csi -e hostpath | while read -r repo tag id; do
echo "$repo" "$tag" "$(docker exec csi-prow-control-plane docker image inspect --format='{{ index .Config.Labels "revision"}}' "$id")"
done
)
=========================================================
EOF
}
# Gets logs of all containers in all namespaces. When passed -f, kubectl will
# keep running and capture new output. Prints the pid of all background processes.
# The caller must kill (when using -f) and/or wait for them.
#
# May be called multiple times and thus appends.
start_loggers () {
kubectl get pods --all-namespaces -o go-template --template='{{range .items}}{{.metadata.namespace}} {{.metadata.name}} {{range .spec.containers}}{{.name}} {{end}}{{"\n"}}{{end}}' | while read -r namespace pod containers; do
for container in $containers; do
mkdir -p "${ARTIFACTS}/$namespace/$pod"
kubectl logs -n "$namespace" "$@" "$pod" "$container" >>"${ARTIFACTS}/$namespace/$pod/$container.log" &
echo "$!"
done
done
}
# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test".
install_e2e () {
if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then
return
fi
git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 &&
if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then
go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" &&
run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}"
else
run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e"
fi
}
# Makes the csi-sanity test suite binary available as
# "${CSI_PROW_WORK}/csi-sanity".
install_sanity () (
if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then
return
fi
git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed"
run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed"
)
# Captures pod output while running some other command.
run_with_loggers () (
loggers=$(start_loggers -f)
trap 'kill $loggers' EXIT
run "$@"
)
# Invokes the filter-junit.go tool.
run_filter_junit () {
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@"
}
# Runs the E2E test suite in a sub-shell.
run_e2e () (
name="$1"
shift
install_e2e || die "building e2e.test failed"
install_ginkgo || die "installing ginkgo failed"
# Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again
# and to avoid the large number of "skipped" tests that we get from using
# the full Kubernetes E2E testsuite while only running a few tests.
move_junit () {
if ls "${ARTIFACTS}"/junit_[0-9]*.xml 2>/dev/null >/dev/null; then
run_filter_junit -t="External Storage" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml
fi
}
trap move_junit EXIT
cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
run_with_loggers ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/test-driver.yaml"
)
# Run csi-sanity against installed CSI driver.
run_sanity () (
install_sanity || die "installing csi-sanity failed"
cat >"${CSI_PROW_WORK}/mkdir_in_pod.sh" <<EOF
#!/bin/sh
kubectl exec "${CSI_PROW_SANITY_POD}" -c "${CSI_PROW_SANITY_CONTAINER}" -- mkdir "\$@" && echo "\$@"
EOF
# Using "rm -rf" as fallback for "rmdir" is a workaround for:
# Node Service
# should work
# /nvme/gopath.tmp/src/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go:624
# STEP: reusing connection to CSI driver at dns:///172.17.0.2:30896
# STEP: creating mount and staging directories
# STEP: creating a single node writer volume
# STEP: getting a node id
# STEP: node staging volume
# STEP: publishing the volume on a node
# STEP: cleaning up calling nodeunpublish
# STEP: cleaning up calling nodeunstage
# STEP: cleaning up deleting the volume
# cleanup: deleting sanity-node-full-35A55673-604D59E1 = 5211b280-4fad-11e9-8127-0242dfe2bdaf
# cleanup: warning: NodeUnpublishVolume: rpc error: code = NotFound desc = volume id 5211b280-4fad-11e9-8127-0242dfe2bdaf does not exit in the volumes list
# rmdir: '/tmp/mount': Directory not empty
# command terminated with exit code 1
#
# Somehow the mount directory was not empty. All tests after that
# failed in "mkdir". This only occurred once, so its uncertain
# why it happened.
cat >"${CSI_PROW_WORK}/rmdir_in_pod.sh" <<EOF
#!/bin/sh
if ! kubectl exec "${CSI_PROW_SANITY_POD}" -c "${CSI_PROW_SANITY_CONTAINER}" -- rmdir "\$@"; then
kubectl exec "${CSI_PROW_SANITY_POD}" -c "${CSI_PROW_SANITY_CONTAINER}" -- rm -rf "\$@"
exit 1
fi
EOF
chmod u+x "${CSI_PROW_WORK}"/*dir_in_pod.sh
# This cannot run in parallel, because -csi.junitfile output
# from different Ginkgo nodes would go to the same file. Also the
# staging and target directories are the same.
run_with_loggers "${CSI_PROW_WORK}/csi-sanity" \
-ginkgo.v \
-csi.junitfile "${ARTIFACTS}/junit_sanity.xml" \
-csi.endpoint "dns:///$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' csi-prow-control-plane):$(kubectl get "services/${CSI_PROW_SANITY_SERVICE}" -o "jsonpath={..nodePort}")" \
-csi.stagingdir "/tmp/staging" \
-csi.mountdir "/tmp/mount" \
-csi.createstagingpathcmd "${CSI_PROW_WORK}/mkdir_in_pod.sh" \
-csi.createmountpathcmd "${CSI_PROW_WORK}/mkdir_in_pod.sh" \
-csi.removestagingpathcmd "${CSI_PROW_WORK}/rmdir_in_pod.sh" \
-csi.removemountpathcmd "${CSI_PROW_WORK}/rmdir_in_pod.sh" \
)
ascii_to_xml () {
# We must escape special characters and remove escape sequences
# (no good representation in the simple XML that we generate
# here). filter_junit.go would choke on them during decoding, even
# when disabling strict parsing.
sed -e 's/&/&amp;/g' -e 's/</\&lt;/g' -e 's/>/\&gt;/g' -e 's/\x1B...//g'
}
# The "make test" output starts each test with "### <test-target>:"
# and then ends when the next test starts or with "make: ***
# [<test-target>] Error 1" when there was a failure. Here we read each
# line of that output, split it up into individual tests and generate
# a make-test.xml file in JUnit format.
make_test_to_junit () {
local ret out testname testoutput
ret=0
# Plain make-test.xml was not delivered as text/xml by the web
# server and ignored by spyglass. It seems that the name has to
# match junit*.xml.
out="${ARTIFACTS}/junit_make_test.xml"
testname=
echo "<testsuite>" >>"$out"
while IFS= read -r line; do
echo "$line" # pass through
if echo "$line" | grep -q "^### [^ ]*:$"; then
if [ "$testname" ]; then
# previous test succesful
echo " </system-out>" >>"$out"
echo " </testcase>" >>"$out"
fi
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
#
# start new test
testname="$(echo "$line" | sed -e 's/^### \([^ ]*\):$/\1/')"
testoutput=
echo " <testcase name=\"$testname\">" >>"$out"
echo " <system-out>" >>"$out"
elif echo "$line" | grep -q '^make: .*Error [0-9]*$'; then
if [ "$testname" ]; then
# Ignore: Consider using { cmd1; cmd2; } >> file instead of individual redirects.
# shellcheck disable=SC2129
#
# end test with failure
echo " </system-out>" >>"$out"
# Include the same text as in <system-out> also in <failure>,
# because then it is easier to view in spyglass (shown directly
# instead of having to click through to stdout).
echo " <failure>" >>"$out"
echo -n "$testoutput" | ascii_to_xml >>"$out"
echo " </failure>" >>"$out"
echo " </testcase>" >>"$out"
fi
# remember failure for exit code
ret=1
# not currently inside a test
testname=
else
if [ "$testname" ]; then
# Test output.
echo "$line" | ascii_to_xml >>"$out"
testoutput="$testoutput$line
"
fi
fi
done
# if still in a test, close it now
if [ "$testname" ]; then
echo " </system-out>" >>"$out"
echo " </testcase>" >>"$out"
fi
echo "</testsuite>" >>"$out"
# this makes the error more visible in spyglass
if [ "$ret" -ne 0 ]; then
echo "ERROR: 'make test' failed"
return 1
fi
}
# version_gt returns true if arg1 is greater than arg2.
#
# This function expects versions to be one of the following formats:
# X.Y.Z, release-X.Y.Z, vX.Y.Z
#
# where X,Y, and Z are any number.
#
# Partial versions (1.2, release-1.2) work as well.
# The follow substrings are stripped before version comparison:
# - "v"
# - "release-"
# - "kubernetes-"
#
# Usage:
# version_gt release-1.3 v1.2.0 (returns true)
# version_gt v1.1.1 v1.2.0 (returns false)
# version_gt 1.1.1 v1.2.0 (returns false)
# version_gt 1.3.1 v1.2.0 (returns true)
# version_gt 1.1.1 release-1.2.0 (returns false)
# version_gt 1.2.0 1.2.2 (returns false)
function version_gt() {
versions=$(for ver in "$@"; do ver=${ver#release-}; ver=${ver#kubernetes-}; echo "${ver#v}"; done)
greaterVersion=${1#"release-"};
greaterVersion=${greaterVersion#"kubernetes-"};
greaterVersion=${greaterVersion#"v"};
test "$(printf '%s' "$versions" | sort -V | head -n 1)" != "$greaterVersion"
}
main () {
local images ret
ret=0
images=
if ${CSI_PROW_BUILD_JOB}; then
# A successful build is required for testing.
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make all "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" "BUILD_PLATFORMS=${CSI_PROW_BUILD_PLATFORMS}" || die "'make all' failed"
# We don't want test failures to prevent E2E testing below, because the failure
# might have been minor or unavoidable, for example when experimenting with
# changes in "release-tools" in a PR (that fails the "is release-tools unmodified"
# test).
if tests_enabled "unit"; then
if [ -f Gopkg.toml ] && ! install_dep; then
warn "installing 'dep' failed, cannot test vendoring"
ret=1
fi
if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" 2>&1 | make_test_to_junit; then
warn "'make test' failed, proceeding anyway"
ret=1
fi
fi
# Required for E2E testing.
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container "GOFLAGS_VENDOR=${GOFLAGS_VENDOR}" || die "'make container' failed"
fi
if tests_need_kind; then
install_kind || die "installing kind failed"
if ${CSI_PROW_BUILD_JOB}; then
cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')"
# Get the image that was just built (if any) from the
# top-level Makefile CMDS variable and set the
# deploy.sh env variables for it. We also need to
# side-load those images into the cluster.
for i in $cmds; do
e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _)
images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow"
# We must avoid the tag "latest" because that implies
# always pulling the image
# (https://github.com/kubernetes-sigs/kind/issues/328).
docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed"
# For components with multiple cmds, the RBAC file should be in the following format:
# rbac-$cmd.yaml
# If this file cannot be found, we can default to the standard location:
# deploy/kubernetes/rbac.yaml
rbac_file_path=$(find . -type f -name "rbac-$i.yaml")
if [ "$rbac_file_path" == "" ]; then
rbac_file_path="$(pwd)/deploy/kubernetes/rbac.yaml"
fi
if [ -e "$rbac_file_path" ]; then
# This is one of those components which has its own RBAC rules (like external-provisioner).
# We are testing a locally built image and also want to test with the the current,
# potentially modified RBAC rules.
e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _)
images="$images ${e}_RBAC=$rbac_file_path"
fi
done
fi
if tests_need_non_alpha_cluster; then
start_cluster || die "starting the non-alpha cluster failed"
# Install necessary snapshot CRDs and snapshot controller
install_snapshot_crds
install_snapshot_controller
# Installing the driver might be disabled.
if ${CSI_PROW_DRIVER_INSTALL} "$images"; then
collect_cluster_info
if sanity_enabled; then
if ! run_sanity; then
ret=1
fi
fi
if tests_enabled "parallel"; then
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \
-focus="External.Storage" \
-skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then
warn "E2E parallel failed"
ret=1
fi
# Run tests that are feature tagged, but non-alpha
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
if ! run_e2e parallel-features ${CSI_PROW_GINKO_PARALLEL} \
-focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_FOCUS}"))" \
-skip="$(regex_join "${CSI_PROW_E2E_SERIAL}")"; then
warn "E2E parallel features failed"
ret=1
fi
fi
if tests_enabled "serial"; then
if ! run_e2e serial \
-focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \
-skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then
warn "E2E serial failed"
ret=1
fi
fi
fi
delete_cluster_inside_prow_job
fi
if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then
# Need to (re)create the cluster.
start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed"
# Install necessary snapshot CRDs and snapshot controller
install_snapshot_crds
install_snapshot_controller
# Installing the driver might be disabled.
if ${CSI_PROW_DRIVER_INSTALL} "$images"; then
collect_cluster_info
if tests_enabled "parallel-alpha"; then
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \
-focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \
-skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then
warn "E2E parallel alpha failed"
ret=1
fi
fi
if tests_enabled "serial-alpha"; then
if ! run_e2e serial-alpha \
-focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \
-skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then
warn "E2E serial alpha failed"
ret=1
fi
fi
fi
delete_cluster_inside_prow_job
fi
fi
# Merge all junit files into one. This gets rid of duplicated "skipped" tests.
if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then
run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"
fi
return "$ret"
}
# This function can be called by a repo's top-level cloudbuild.sh:
# it handles environment set up in the GCR cloud build and then
# invokes "make push-multiarch" to do the actual image building.
gcr_cloud_build () {
# Register gcloud as a Docker credential helper.
# Required for "docker buildx build --push".
gcloud auth configure-docker
if find . -name Dockerfile | grep -v ^./vendor | xargs --no-run-if-empty cat | grep -q ^RUN; then
# Needed for "RUN" steps on non-linux/amd64 platforms.
# See https://github.com/multiarch/qemu-user-static#getting-started
(set -x; docker run --rm --privileged multiarch/qemu-user-static --reset -p yes)
fi
# Extract tag-n-hash value from GIT_TAG (form vYYYYMMDD-tag-n-hash) for REV value.
REV=v$(echo "$GIT_TAG" | cut -f3- -d 'v')
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make push-multiarch REV="${REV}" REGISTRY_NAME="${REGISTRY_NAME}" BUILD_PLATFORMS="${CSI_PROW_BUILD_PLATFORMS}"
}
language: go
sudo: required
services:
- docker
git:
depth: false
matrix:
include:
- go: 1.15
before_script:
- mkdir -p bin
- wget https://github.com/golang/dep/releases/download/v0.5.1/dep-linux-amd64 -O bin/dep
- chmod u+x bin/dep
- export PATH=$PWD/bin:$PATH
script:
- make -k all test GOFLAGS_VENDOR=$( [ -d vendor ] && echo '-mod=vendor' )
after_success:
- if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io;
make push GOFLAGS_VENDOR=$( [ -d vendor ] && echo '-mod=vendor' );
fi
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -f Gopkg.toml ]; then
echo "Repo uses 'dep' for vendoring."
(set -x; dep ensure)
elif [ -f go.mod ]; then
release-tools/verify-go-version.sh "go"
(set -x; env GO111MODULE=on go mod tidy && env GO111MODULE=on go mod vendor)
fi
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function kube::util::sourced_variable {
# Call this function to tell shellcheck that a variable is supposed to
# be used from other calling context. This helps quiet an "unused
# variable" warning from shellcheck and also document your code.
true
}
kube::util::sortable_date() {
date "+%Y%m%d-%H%M%S"
}
# arguments: target, item1, item2, item3, ...
# returns 0 if target is in the given items, 1 otherwise.
kube::util::array_contains() {
local search="$1"
local element
shift
for element; do
if [[ "${element}" == "${search}" ]]; then
return 0
fi
done
return 1
}
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kube::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test. Disable the shellcheck warning telling that trap
# commands should be single quoted to avoid evaluating them at this
# point instead evaluating them at run time. The logic of adding new
# commands to a single trap requires them to be evaluated right away.
# shellcheck disable=SC2064
trap "${new_cmd}" "${trap_add_name}"
done
}
kube::util::download_file() {
local -r url=$1
local -r destination_file=$2
rm "${destination_file}" 2&> /dev/null || true
for i in $(seq 5)
do
if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
echo "Downloading ${url} failed. $((5-i)) retries left."
sleep 1
else
echo "Downloading ${url} succeed"
return 0
fi
done
return 1
}
# Wait for background jobs to finish. Return with
# an error status if any of the jobs failed.
kube::util::wait-for-jobs() {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
return ${fail}
}
# kube::util::join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: kube::util::join , a b c
# -> a,b,c
function kube::util::join {
local IFS="$1"
shift
echo "$*"
}
# kube::util::check-file-in-alphabetical-order <file>
# Check that the file is in alphabetical order
#
function kube::util::check-file-in-alphabetical-order {
local failure_file="$1"
if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
{
echo
echo "${failure_file} is not in alphabetical order. Please sort it:"
echo
echo " LC_ALL=C sort -o ${failure_file} ${failure_file}"
echo
} >&2
false
fi
}
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_blue="${color_start}1;34m"
declare -r color_cyan="${color_start}1;36m"
declare -r color_norm="${color_start}0m"
kube::util::sourced_variable "${color_start}"
kube::util::sourced_variable "${color_red}"
kube::util::sourced_variable "${color_yellow}"
kube::util::sourced_variable "${color_green}"
kube::util::sourced_variable "${color_blue}"
kube::util::sourced_variable "${color_cyan}"
kube::util::sourced_variable "${color_norm}"
fi
# ex: ts=2 sw=2 et filetype=sh
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GO="$1"
if [ ! "$GO" ]; then
echo >&2 "usage: $0 <path to go binary>"
exit 1
fi
die () {
echo "ERROR: $*"
exit 1
}
version=$("$GO" version) || die "determining version of $GO failed"
# shellcheck disable=SC2001
majorminor=$(echo "$version" | sed -e 's/.*go\([0-9]*\)\.\([0-9]*\).*/\1.\2/')
# shellcheck disable=SC2001
expected=$(grep "^ *- go:" "release-tools/travis.yml" | sed -e 's/.*go: *\([0-9]*\)\.\([0-9]*\).*/\1.\2/')
if [ "$majorminor" != "$expected" ]; then
cat >&2 <<EOF
======================================================
WARNING
This projects is tested with Go v$expected.
Your current Go version is v$majorminor.
This may or may not be close enough.
In particular test-gofmt and test-vendor
are known to be sensitive to the version of
Go.
======================================================
EOF
fi
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# The csi-release-tools directory.
TOOLS="$(dirname "${BASH_SOURCE[0]}")"
. "${TOOLS}/util.sh"
# Directory to check. Default is the parent of the tools themselves.
ROOT="${1:-${TOOLS}/..}"
# required version for this script, if not installed on the host we will
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
SHELLCHECK_VERSION="0.6.0"
# upstream shellcheck latest stable image as of January 10th, 2019
SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52"
# fixed name for the shellcheck docker container so we can reliably clean it up
SHELLCHECK_CONTAINER="k8s-shellcheck"
# disabled lints
disabled=(
# this lint disallows non-constant source, which we use extensively without
# any known bugs
1090
# this lint prefers command -v to which, they are not the same
2230
)
# comma separate for passing to shellcheck
join_by() {
local IFS="$1";
shift;
echo "$*";
}
SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")"
readonly SHELLCHECK_DISABLED
# creates the shellcheck container for later use
create_container () {
# TODO(bentheelder): this is a performance hack, we create the container with
# a sleep MAX_INT32 so that it is effectively paused.
# We then repeatedly exec to it to run each shellcheck, and later rm it when
# we're done.
# This is incredibly much faster than creating a container for each shellcheck
# call ...
docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647
}
# removes the shellcheck container
remove_container () {
docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true
}
# ensure we're linting the source tree
cd "${ROOT}"
# find all shell scripts excluding ./_*, ./.git/*, ./vendor*,
# and anything git-ignored
all_shell_scripts=()
while IFS=$'\n' read -r script;
do git check-ignore -q "$script" || all_shell_scripts+=("$script");
done < <(find . -name "*.sh" \
-not \( \
-path ./_\* -o \
-path ./.git\* -o \
-path ./vendor\* \
\))
# detect if the host machine has the required shellcheck version installed
# if so, we will use that instead.
HAVE_SHELLCHECK=false
if which shellcheck &>/dev/null; then
detected_version="$(shellcheck --version | grep 'version: .*')"
if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then
HAVE_SHELLCHECK=true
fi
fi
# tell the user which we've selected and possibly set up the container
if ${HAVE_SHELLCHECK}; then
echo "Using host shellcheck ${SHELLCHECK_VERSION} binary."
else
echo "Using shellcheck ${SHELLCHECK_VERSION} docker image."
# remove any previous container, ensure we will attempt to cleanup on exit,
# and create the container
remove_container
kube::util::trap_add 'remove_container' EXIT
if ! output="$(create_container 2>&1)"; then
{
echo "Failed to create shellcheck container with output: "
echo ""
echo "${output}"
} >&2
exit 1
fi
fi
# lint each script, tracking failures
errors=()
for f in "${all_shell_scripts[@]}"; do
set +o errexit
if ${HAVE_SHELLCHECK}; then
failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}")
else
failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \
shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}")
fi
set -o errexit
if [[ -n "${failedLint}" ]]; then
errors+=( "${failedLint}" )
fi
done
# Check to be sure all the packages that should pass lint are.
if [ ${#errors[@]} -eq 0 ]; then
echo 'Congratulations! All shell files are passing lint.'
else
{
echo "Errors from shellcheck:"
for err in "${errors[@]}"; do
echo "$err"
done
echo
echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"'
echo 'If the above warnings do not make sense, you can exempt them from shellcheck'
echo 'checking by adding the "shellcheck disable" directive'
echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).'
echo
} >&2
false
fi
#! /bin/sh -e
#
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script verifies that the content of a directory managed
# by "git subtree" has not been modified locally. It does that
# by looking for commits that modify the files with the
# subtree prefix (aka directory) while ignoring merge
# commits. Merge commits are where "git subtree" pulls the
# upstream files into the directory.
#
# Theoretically a developer can subvert this check by modifying files
# in a merge commit, but in practice that shouldn't happen.
DIR="$1"
if [ ! "$DIR" ]; then
echo "usage: $0 <directory>" >&2
exit 1
fi
REV=$(git log -n1 --remove-empty --format=format:%H --no-merges -- "$DIR")
if [ "$REV" ]; then
echo "Directory '$DIR' contains non-upstream changes:"
echo
git log --no-merges -- "$DIR"
exit 1
else
echo "$DIR is a clean copy of upstream."
fi
#!/usr/bin/env bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -f Gopkg.toml ]; then
echo "Repo uses 'dep' for vendoring."
case "$(dep version 2>/dev/null | grep 'version *:')" in
*v0.[56789]*)
if dep check; then
echo "vendor up-to-date"
else
exit 1
fi
;;
*) echo "skipping check, dep >= 0.5 required";;
esac
elif [ -f go.mod ]; then
echo "Repo uses 'go mod'."
# shellcheck disable=SC2235
if [ "${JOB_NAME}" ] &&
( [ "${JOB_TYPE}" != "presubmit" ] ||
[ "$( (git diff "${PULL_BASE_SHA}..HEAD" -- go.mod go.sum vendor release-tools;
git diff "${PULL_BASE_SHA}..HEAD" | grep -e '^@@.*@@ import (' -e '^[+-]import') |
wc -l)" -eq 0 ] ); then
echo "Skipping vendor check because the Prow pre-submit job does not affect dependencies."
elif ! (set -x; env GO111MODULE=on go mod tidy); then
echo "ERROR: vendor check failed."
exit 1
elif [ "$(git status --porcelain -- go.mod go.sum | wc -l)" -gt 0 ]; then
echo "ERROR: go module files *not* up-to-date, they did get modified by 'GO111MODULE=on go mod tidy':";
git diff -- go.mod go.sum
exit 1
elif [ -d vendor ]; then
if ! (set -x; env GO111MODULE=on go mod vendor); then
echo "ERROR: vendor check failed."
exit 1
elif [ "$(git status --porcelain -- vendor | wc -l)" -gt 0 ]; then
echo "ERROR: vendor directory *not* up-to-date, it did get modified by 'GO111MODULE=on go mod vendor':"
git status -- vendor
git diff -- vendor
exit 1
else
echo "Go dependencies and vendor directory up-to-date."
fi
else
echo "Go dependencies up-to-date."
fi
fi
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment