package cache
import (
"fmt"
"net/url"
"sync"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type Adapter interface {
GetDownloadURL() *url.URL
GetUploadURL() *url.URL
}
type Factory func(config *common.CacheConfig, timeout time.Duration, objectName string) (Adapter, error)
type FactoriesMap struct {
internal map[string]Factory
lock sync.RWMutex
}
func (m *FactoriesMap) Register(typeName string, factory Factory) error {
m.lock.Lock()
defer m.lock.Unlock()
if len(m.internal) == 0 {
m.internal = make(map[string]Factory)
}
_, ok := m.internal[typeName]
if ok {
return fmt.Errorf("adapter %q already registered", typeName)
}
m.internal[typeName] = factory
return nil
}
func (m *FactoriesMap) Find(typeName string) (Factory, error) {
m.lock.RLock()
defer m.lock.RUnlock()
factory := m.internal[typeName]
if factory == nil {
return nil, fmt.Errorf("factory for cache adapter %q was not registered", typeName)
}
return factory, nil
}
var factories = &FactoriesMap{}
func Factories() *FactoriesMap {
return factories
}
func CreateAdapter(cacheConfig *common.CacheConfig, timeout time.Duration, objectName string) (Adapter, error) {
create, err := Factories().Find(cacheConfig.Type)
if err != nil {
return nil, fmt.Errorf("cache factory not found: %w", err)
}
adapter, err := create(cacheConfig, timeout, objectName)
if err != nil {
return nil, fmt.Errorf("cache adapter could not be initialized: %w", err)
}
return adapter, nil
}
package cache
import (
"fmt"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
var createAdapter = CreateAdapter
func getCacheConfig(build *common.Build) *common.CacheConfig {
if build == nil || build.Runner == nil || build.Runner.Cache == nil {
return nil
}
return build.Runner.Cache
}
func generateBaseObjectName(build *common.Build, config *common.CacheConfig) string {
runnerSegment := ""
if !config.GetShared() {
runnerSegment = path.Join("runner", build.Runner.ShortDescription())
}
return path.Join(config.GetPath(), runnerSegment, "project", strconv.Itoa(build.JobInfo.ProjectID))
}
func generateObjectName(build *common.Build, config *common.CacheConfig, key string) (string, error) {
if key == "" {
return "", nil
}
basePath := generateBaseObjectName(build, config)
path := path.Join(basePath, key)
relative, err := filepath.Rel(basePath, path)
if err != nil {
return "", fmt.Errorf("cache path correctness check failed with: %w", err)
}
if strings.HasPrefix(relative, ".."+string(filepath.Separator)) {
return "", fmt.Errorf("computed cache path outside of project bucket. Please remove `../` from cache key")
}
return path, nil
}
func onAdapter(build *common.Build, key string, handler func(adapter Adapter) *url.URL) *url.URL {
config := getCacheConfig(build)
if config == nil {
logrus.Warning("Cache config not defined. Skipping cache operation.")
return nil
}
objectName, err := generateObjectName(build, config, key)
if err != nil {
logrus.WithError(err).Error("Error while generating cache bucket.")
return nil
}
if objectName == "" {
logrus.Warning("Empty cache key. Skipping adapter selection.")
return nil
}
adapter, err := createAdapter(config, build.GetBuildTimeout(), objectName)
if err != nil {
logrus.WithError(err).Error("Could not create cache adapter")
}
if adapter == nil {
return nil
}
return handler(adapter)
}
func GetCacheDownloadURL(build *common.Build, key string) *url.URL {
return onAdapter(build, key, func(adapter Adapter) *url.URL {
return adapter.GetDownloadURL()
})
}
func GetCacheUploadURL(build *common.Build, key string) *url.URL {
return onAdapter(build, key, func(adapter Adapter) *url.URL {
return adapter.GetUploadURL()
})
}
package gcs
import (
"fmt"
"net/http"
"net/url"
"time"
"cloud.google.com/go/storage"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/cache"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type signedURLGenerator func(bucket string, name string, opts *storage.SignedURLOptions) (string, error)
type gcsAdapter struct {
timeout time.Duration
config *common.CacheGCSConfig
objectName string
generateSignedURL signedURLGenerator
credentialsResolver credentialsResolver
}
func (a *gcsAdapter) GetDownloadURL() *url.URL {
return a.presignURL(http.MethodGet, "")
}
func (a *gcsAdapter) GetUploadURL() *url.URL {
return a.presignURL(http.MethodPut, "application/octet-stream")
}
func (a *gcsAdapter) presignURL(method string, contentType string) *url.URL {
err := a.credentialsResolver.Resolve()
if err != nil {
logrus.Errorf("error while resolving GCS credentials: %v", err)
return nil
}
credentials := a.credentialsResolver.Credentials()
var privateKey []byte
if credentials.PrivateKey != "" {
privateKey = []byte(credentials.PrivateKey)
}
if a.config.BucketName == "" {
logrus.Error("BucketName can't be empty")
return nil
}
rawURL, err := a.generateSignedURL(a.config.BucketName, a.objectName, &storage.SignedURLOptions{
GoogleAccessID: credentials.AccessID,
PrivateKey: privateKey,
Method: method,
Expires: time.Now().Add(a.timeout),
ContentType: contentType,
})
if err != nil {
logrus.Errorf("error while generating GCS pre-signed URL: %v", err)
return nil
}
URL, err := url.Parse(rawURL)
if err != nil {
logrus.Errorf("error while parsing generated URL: %v", err)
return nil
}
return URL
}
func New(config *common.CacheConfig, timeout time.Duration, objectName string) (cache.Adapter, error) {
gcs := config.GCS
if gcs == nil {
return nil, fmt.Errorf("missing GCS configuration")
}
cr, err := credentialsResolverInitializer(gcs)
if err != nil {
return nil, fmt.Errorf("error while initializing GCS credentials resolver: %w", err)
}
a := &gcsAdapter{
config: gcs,
timeout: timeout,
objectName: objectName,
generateSignedURL: storage.SignedURL,
credentialsResolver: cr,
}
return a, nil
}
func init() {
err := cache.Factories().Register("gcs", New)
if err != nil {
panic(err)
}
}
package gcs
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type credentialsResolver interface {
Credentials() *common.CacheGCSCredentials
Resolve() error
}
const TypeServiceAccount = "service_account"
type credentialsFile struct {
Type string `json:"type"`
ClientEmail string `json:"client_email"`
PrivateKey string `json:"private_key"`
}
type defaultCredentialsResolver struct {
config *common.CacheGCSConfig
credentials *common.CacheGCSCredentials
}
func (cr *defaultCredentialsResolver) Credentials() *common.CacheGCSCredentials {
return cr.credentials
}
func (cr *defaultCredentialsResolver) Resolve() error {
if cr.config.CredentialsFile != "" {
return cr.readCredentialsFromFile()
}
return cr.readCredentialsFromConfig()
}
func (cr *defaultCredentialsResolver) readCredentialsFromFile() error {
data, err := ioutil.ReadFile(cr.config.CredentialsFile)
if err != nil {
return fmt.Errorf("error while reading credentials file: %w", err)
}
var credentialsFileContent credentialsFile
err = json.Unmarshal(data, &credentialsFileContent)
if err != nil {
return fmt.Errorf("error while parsing credentials file: %w", err)
}
if credentialsFileContent.Type != TypeServiceAccount {
return fmt.Errorf("unsupported credentials file type: %s", credentialsFileContent.Type)
}
logrus.Debugln("Credentials loaded from file. Skipping direct settings from Runner configuration file")
cr.credentials.AccessID = credentialsFileContent.ClientEmail
cr.credentials.PrivateKey = credentialsFileContent.PrivateKey
return nil
}
func (cr *defaultCredentialsResolver) readCredentialsFromConfig() error {
if cr.config.AccessID == "" || cr.config.PrivateKey == "" {
return fmt.Errorf("GCS config present, but credentials are not configured")
}
cr.credentials.AccessID = cr.config.AccessID
cr.credentials.PrivateKey = cr.config.PrivateKey
return nil
}
func newDefaultCredentialsResolver(config *common.CacheGCSConfig) (*defaultCredentialsResolver, error) {
if config == nil {
return nil, fmt.Errorf("config can't be nil")
}
credentials := &defaultCredentialsResolver{
config: config,
credentials: &common.CacheGCSCredentials{},
}
return credentials, nil
}
var credentialsResolverInitializer = newDefaultCredentialsResolver
package s3
import (
"fmt"
"net/url"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/cache"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type s3Adapter struct {
timeout time.Duration
config *common.CacheS3Config
objectName string
client minioClient
}
func (a *s3Adapter) GetDownloadURL() *url.URL {
URL, err := a.client.PresignedGetObject(a.config.BucketName, a.objectName, a.timeout, nil)
if err != nil {
logrus.WithError(err).Error("error while generating S3 pre-signed URL")
return nil
}
return URL
}
func (a *s3Adapter) GetUploadURL() *url.URL {
URL, err := a.client.PresignedPutObject(a.config.BucketName, a.objectName, a.timeout)
if err != nil {
logrus.WithError(err).Error("error while generating S3 pre-signed URL")
return nil
}
return URL
}
func New(config *common.CacheConfig, timeout time.Duration, objectName string) (cache.Adapter, error) {
s3 := config.S3
if s3 == nil {
return nil, fmt.Errorf("missing S3 configuration")
}
client, err := newMinioClient(s3)
if err != nil {
return nil, fmt.Errorf("error while creating S3 cache storage client: %w", err)
}
a := &s3Adapter{
config: s3,
timeout: timeout,
objectName: objectName,
client: client,
}
return a, nil
}
func init() {
err := cache.Factories().Register("s3", New)
if err != nil {
panic(err)
}
}
package s3
import (
"bytes"
"encoding/xml"
"io/ioutil"
"net/http"
)
type bucketLocationTripper struct {
bucketLocation string
}
// The Minio Golang library always attempts to query the bucket location and
// currently has no way of statically setting that value. To avoid that
// lookup, the Runner cache uses the library only to generate the URLs,
// forgoing the library's API for uploading and downloading files. The custom
// Roundtripper stubs out any network requests that would normally be made via
// the library.
func (b *bucketLocationTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
var buffer bytes.Buffer
err = xml.NewEncoder(&buffer).Encode(b.bucketLocation)
if err != nil {
return
}
res = &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(&buffer),
}
return
}
func (b *bucketLocationTripper) CancelRequest(req *http.Request) {
// Do nothing
}
package s3
import (
"net/url"
"time"
"github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
const DefaultAWSS3Server = "s3.amazonaws.com"
type minioClient interface {
PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (*url.URL, error)
PresignedPutObject(bucketName string, objectName string, expires time.Duration) (*url.URL, error)
}
var newMinio = minio.New
var newMinioWithCredentials = minio.NewWithCredentials
var newMinioClient = func(s3 *common.CacheS3Config) (minioClient, error) {
var client *minio.Client
var err error
if s3.ShouldUseIAMCredentials() {
iam := credentials.NewIAM("")
client, err = newMinioWithCredentials(DefaultAWSS3Server, iam, true, "")
} else {
client, err = newMinio(s3.ServerAddress, s3.AccessKey, s3.SecretKey, !s3.Insecure)
}
if err != nil {
return nil, err
}
client.SetCustomTransport(&bucketLocationTripper{
bucketLocation: s3.BucketLocation,
})
return client, nil
}
package commands
import (
"fmt"
"net/http"
"regexp"
"strings"
"sync"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/session"
"github.com/prometheus/client_golang/prometheus"
)
var numBuildsDesc = prometheus.NewDesc(
"gitlab_runner_jobs",
"The current number of running builds.",
[]string{"runner", "state", "stage", "executor_stage"},
nil,
)
var requestConcurrencyDesc = prometheus.NewDesc(
"gitlab_runner_request_concurrency",
"The current number of concurrent requests for a new job",
[]string{"runner"},
nil,
)
var requestConcurrencyExceededDesc = prometheus.NewDesc(
"gitlab_runner_request_concurrency_exceeded_total",
"Counter tracking exceeding of request concurrency",
[]string{"runner"},
nil,
)
type statePermutation struct {
runner string
buildState common.BuildRuntimeState
buildStage common.BuildStage
executorStage common.ExecutorStage
}
func newStatePermutationFromBuild(build *common.Build) statePermutation {
return statePermutation{
runner: build.Runner.ShortDescription(),
buildState: build.CurrentState,
buildStage: build.CurrentStage,
executorStage: build.CurrentExecutorStage(),
}
}
type runnerCounter struct {
builds int
requests int
requestConcurrencyExceeded int
}
type buildsHelper struct {
counters map[string]*runnerCounter
builds []*common.Build
lock sync.Mutex
jobsTotal *prometheus.CounterVec
jobDurationHistogram *prometheus.HistogramVec
}
func (b *buildsHelper) getRunnerCounter(runner *common.RunnerConfig) *runnerCounter {
if b.counters == nil {
b.counters = make(map[string]*runnerCounter)
}
counter, _ := b.counters[runner.Token]
if counter == nil {
counter = &runnerCounter{}
b.counters[runner.Token] = counter
}
return counter
}
func (b *buildsHelper) findSessionByURL(url string) *session.Session {
b.lock.Lock()
defer b.lock.Unlock()
for _, build := range b.builds {
if strings.HasPrefix(url, build.Session.Endpoint+"/") {
return build.Session
}
}
return nil
}
func (b *buildsHelper) acquireBuild(runner *common.RunnerConfig) bool {
b.lock.Lock()
defer b.lock.Unlock()
counter := b.getRunnerCounter(runner)
if runner.Limit > 0 && counter.builds >= runner.Limit {
// Too many builds
return false
}
counter.builds++
return true
}
func (b *buildsHelper) releaseBuild(runner *common.RunnerConfig) bool {
b.lock.Lock()
defer b.lock.Unlock()
counter := b.getRunnerCounter(runner)
if counter.builds > 0 {
counter.builds--
return true
}
return false
}
func (b *buildsHelper) acquireRequest(runner *common.RunnerConfig) bool {
b.lock.Lock()
defer b.lock.Unlock()
counter := b.getRunnerCounter(runner)
if counter.requests >= runner.GetRequestConcurrency() {
counter.requestConcurrencyExceeded++
return false
}
counter.requests++
return true
}
func (b *buildsHelper) releaseRequest(runner *common.RunnerConfig) bool {
b.lock.Lock()
defer b.lock.Unlock()
counter := b.getRunnerCounter(runner)
if counter.requests > 0 {
counter.requests--
return true
}
return false
}
func (b *buildsHelper) addBuild(build *common.Build) {
if build == nil {
return
}
b.lock.Lock()
defer b.lock.Unlock()
runners := make(map[int]bool)
projectRunners := make(map[int]bool)
for _, otherBuild := range b.builds {
if otherBuild.Runner.Token != build.Runner.Token {
continue
}
runners[otherBuild.RunnerID] = true
if otherBuild.JobInfo.ProjectID != build.JobInfo.ProjectID {
continue
}
projectRunners[otherBuild.ProjectRunnerID] = true
}
for {
if !runners[build.RunnerID] {
break
}
build.RunnerID++
}
for {
if !projectRunners[build.ProjectRunnerID] {
break
}
build.ProjectRunnerID++
}
b.builds = append(b.builds, build)
b.jobsTotal.WithLabelValues(build.Runner.ShortDescription()).Inc()
return
}
func (b *buildsHelper) removeBuild(deleteBuild *common.Build) bool {
b.lock.Lock()
defer b.lock.Unlock()
b.jobDurationHistogram.WithLabelValues(deleteBuild.Runner.ShortDescription()).Observe(deleteBuild.Duration().Seconds())
for idx, build := range b.builds {
if build == deleteBuild {
b.builds = append(b.builds[0:idx], b.builds[idx+1:]...)
return true
}
}
return false
}
func (b *buildsHelper) buildsCount() int {
b.lock.Lock()
defer b.lock.Unlock()
return len(b.builds)
}
func (b *buildsHelper) statesAndStages() map[statePermutation]int {
b.lock.Lock()
defer b.lock.Unlock()
data := make(map[statePermutation]int)
for _, build := range b.builds {
state := newStatePermutationFromBuild(build)
if _, ok := data[state]; ok {
data[state]++
} else {
data[state] = 1
}
}
return data
}
func (b *buildsHelper) runnersCounters() map[string]*runnerCounter {
b.lock.Lock()
defer b.lock.Unlock()
data := make(map[string]*runnerCounter)
for token, counter := range b.counters {
data[helpers.ShortenToken(token)] = counter
}
return data
}
// Describe implements prometheus.Collector.
func (b *buildsHelper) Describe(ch chan<- *prometheus.Desc) {
ch <- numBuildsDesc
ch <- requestConcurrencyDesc
ch <- requestConcurrencyExceededDesc
b.jobsTotal.Describe(ch)
b.jobDurationHistogram.Describe(ch)
}
// Collect implements prometheus.Collector.
func (b *buildsHelper) Collect(ch chan<- prometheus.Metric) {
builds := b.statesAndStages()
for state, count := range builds {
ch <- prometheus.MustNewConstMetric(
numBuildsDesc,
prometheus.GaugeValue,
float64(count),
state.runner,
string(state.buildState),
string(state.buildStage),
string(state.executorStage),
)
}
counters := b.runnersCounters()
for runner, counter := range counters {
ch <- prometheus.MustNewConstMetric(
requestConcurrencyDesc,
prometheus.GaugeValue,
float64(counter.requests),
runner,
)
ch <- prometheus.MustNewConstMetric(
requestConcurrencyExceededDesc,
prometheus.CounterValue,
float64(counter.requestConcurrencyExceeded),
runner,
)
}
b.jobsTotal.Collect(ch)
b.jobDurationHistogram.Collect(ch)
}
func (b *buildsHelper) ListJobsHandler(w http.ResponseWriter, r *http.Request) {
version := r.URL.Query().Get("v")
if version == "" {
version = "1"
}
handlers := map[string]http.HandlerFunc{
"1": b.listJobsHandlerV1,
"2": b.listJobsHandlerV2,
}
handler, ok := handlers[version]
if !ok {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "Request version %q not supported", version)
return
}
w.Header().Add("X-List-Version", version)
w.Header().Add("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
handler(w, r)
}
func (b *buildsHelper) listJobsHandlerV1(w http.ResponseWriter, r *http.Request) {
for _, job := range b.builds {
fmt.Fprintf(
w,
"id=%d url=%s state=%s stage=%s executor_stage=%s\n",
job.ID, job.RepoCleanURL(),
job.CurrentState, job.CurrentStage, job.CurrentExecutorStage(),
)
}
}
func (b *buildsHelper) listJobsHandlerV2(w http.ResponseWriter, r *http.Request) {
for _, job := range b.builds {
url := CreateJobURL(job.RepoCleanURL(), job.ID)
fmt.Fprintf(
w,
"url=%s state=%s stage=%s executor_stage=%s duration=%s\n",
url, job.CurrentState, job.CurrentStage, job.CurrentExecutorStage(), job.Duration(),
)
}
}
func CreateJobURL(projectURL string, jobID int) string {
r := regexp.MustCompile("(\\.git$)?")
URL := r.ReplaceAllString(projectURL, "")
return fmt.Sprintf("%s/-/jobs/%d", URL, jobID)
}
func newBuildsHelper() buildsHelper {
return buildsHelper{
jobsTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gitlab_runner_jobs_total",
Help: "Total number of handled jobs",
},
[]string{"runner"},
),
jobDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "gitlab_runner_job_duration_seconds",
Help: "Histogram of job durations",
Buckets: []float64{30, 60, 300, 600, 1800, 3600, 7200, 10800, 18000, 36000},
},
[]string{"runner"},
),
}
}
package commands
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
func getDefaultConfigFile() string {
return filepath.Join(getDefaultConfigDirectory(), "config.toml")
}
func getDefaultCertificateDirectory() string {
return filepath.Join(getDefaultConfigDirectory(), "certs")
}
type configOptions struct {
config *common.Config
ConfigFile string `short:"c" long:"config" env:"CONFIG_FILE" description:"Config file"`
}
func (c *configOptions) saveConfig() error {
return c.config.SaveConfig(c.ConfigFile)
}
func (c *configOptions) loadConfig() error {
config := common.NewConfig()
err := config.LoadConfig(c.ConfigFile)
if err != nil {
return err
}
c.config = config
return nil
}
func (c *configOptions) RunnerByName(name string) (*common.RunnerConfig, error) {
if c.config == nil {
return nil, fmt.Errorf("config has not been loaded")
}
for _, runner := range c.config.Runners {
if runner.Name == name {
return runner, nil
}
}
return nil, fmt.Errorf("could not find a runner with the name '%s'", name)
}
type configOptionsWithListenAddress struct {
configOptions
ListenAddress string `long:"listen-address" env:"LISTEN_ADDRESS" description:"Metrics / pprof server listening address"`
}
func (c *configOptionsWithListenAddress) listenAddress() (string, error) {
address := c.config.ListenAddress
if c.ListenAddress != "" {
address = c.ListenAddress
}
if address == "" {
return "", nil
}
_, port, err := net.SplitHostPort(address)
if err != nil && !strings.Contains(err.Error(), "missing port in address") {
return "", err
}
if len(port) == 0 {
return fmt.Sprintf("%s:%d", address, common.DefaultMetricsServerPort), nil
}
return address, nil
}
func init() {
configFile := os.Getenv("CONFIG_FILE")
if configFile == "" {
err := os.Setenv("CONFIG_FILE", getDefaultConfigFile())
if err != nil {
logrus.WithError(err).Fatal("Couldn't set CONFIG_FILE environment variable")
}
}
network.CertificateDirectory = getDefaultCertificateDirectory()
}
// +build linux darwin freebsd openbsd
package commands
import (
"os"
"path/filepath"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
func getDefaultConfigDirectory() string {
if os.Getuid() == 0 {
return "/etc/gitlab-runner"
} else if homeDir := helpers.GetHomeDir(); homeDir != "" {
return filepath.Join(homeDir, ".gitlab-runner")
} else if currentDir := helpers.GetCurrentWorkingDirectory(); currentDir != "" {
return currentDir
}
panic("Cannot get default config file location")
}
package commands
import (
"os"
"os/exec"
"strings"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/ayufan/golang-cli-helpers"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/gitlab_ci_yaml_parser"
// Force to load all executors, executes init() on them
_ "gitlab.com/gitlab-org/gitlab-runner/executors/custom"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/docker"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/parallels"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/shell"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/ssh"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/virtualbox"
)
type ExecCommand struct {
common.RunnerSettings
Job string
Timeout int `long:"timeout" description:"Job execution timeout (in seconds)"`
}
func (c *ExecCommand) runCommand(name string, arg ...string) (string, error) {
cmd := exec.Command(name, arg...)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
result, err := cmd.Output()
return string(result), err
}
func (c *ExecCommand) createBuild(repoURL string, abortSignal chan os.Signal) (build *common.Build, err error) {
// Check if we have uncommitted changes
_, err = c.runCommand("git", "diff", "--quiet", "HEAD")
if err != nil {
logrus.Warningln("You most probably have uncommitted changes.")
logrus.Warningln("These changes will not be tested.")
}
// Parse Git settings
sha, err := c.runCommand("git", "rev-parse", "HEAD")
if err != nil {
return
}
beforeSha, err := c.runCommand("git", "rev-parse", "HEAD~1")
if err != nil {
beforeSha = "0000000000000000000000000000000000000000"
}
refName, err := c.runCommand("git", "rev-parse", "--abbrev-ref", "HEAD")
if err != nil {
return
}
jobResponse := common.JobResponse{
ID: 1,
Token: "",
AllowGitFetch: false,
JobInfo: common.JobInfo{
Name: "",
Stage: "",
ProjectID: 1,
ProjectName: "",
},
GitInfo: common.GitInfo{
RepoURL: repoURL,
Ref: strings.TrimSpace(refName),
Sha: strings.TrimSpace(sha),
BeforeSha: strings.TrimSpace(beforeSha),
},
RunnerInfo: common.RunnerInfo{
Timeout: c.getTimeout(),
},
}
runner := &common.RunnerConfig{
RunnerSettings: c.RunnerSettings,
}
build, err = common.NewBuild(jobResponse, runner, abortSignal, nil)
return
}
func (c *ExecCommand) getTimeout() int {
if c.Timeout > 0 {
return c.Timeout
}
return common.DefaultExecTimeout
}
func (c *ExecCommand) Execute(context *cli.Context) {
wd, err := os.Getwd()
if err != nil {
logrus.Fatalln(err)
}
switch len(context.Args()) {
case 1:
c.Job = context.Args().Get(0)
default:
cli.ShowSubcommandHelp(context)
os.Exit(1)
return
}
c.Executor = context.Command.Name
abortSignal := make(chan os.Signal)
doneSignal := make(chan int, 1)
go waitForInterrupts(nil, abortSignal, doneSignal, nil)
// Add self-volume to docker
if c.RunnerSettings.Docker == nil {
c.RunnerSettings.Docker = &common.DockerConfig{}
}
c.RunnerSettings.Docker.Volumes = append(c.RunnerSettings.Docker.Volumes, wd+":"+wd+":ro")
// Create build
build, err := c.createBuild(wd, abortSignal)
if err != nil {
logrus.Fatalln(err)
}
parser := gitlab_ci_yaml_parser.NewGitLabCiYamlParser(c.Job)
err = parser.ParseYaml(&build.JobResponse)
if err != nil {
logrus.Fatalln(err)
}
err = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})
if err != nil {
logrus.Fatalln(err)
}
}
func init() {
cmd := &ExecCommand{}
flags := clihelpers.GetFlagsFromStruct(cmd)
cliCmd := cli.Command{
Name: "exec",
Usage: "execute a build locally",
}
for _, executor := range common.GetExecutors() {
subCmd := cli.Command{
Name: executor,
Usage: "use " + executor + " executor",
Action: cmd.Execute,
Flags: flags,
}
cliCmd.Subcommands = append(cliCmd.Subcommands, subCmd)
}
common.RegisterCommand(cliCmd)
}
package commands
import (
"sync"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type healthData struct {
failures int
lastCheck time.Time
}
type healthHelper struct {
healthy map[string]*healthData
healthyLock sync.Mutex
}
func (mr *healthHelper) getHealth(id string) *healthData {
if mr.healthy == nil {
mr.healthy = map[string]*healthData{}
}
health := mr.healthy[id]
if health == nil {
health = &healthData{
lastCheck: time.Now(),
}
mr.healthy[id] = health
}
return health
}
func (mr *healthHelper) isHealthy(id string) bool {
mr.healthyLock.Lock()
defer mr.healthyLock.Unlock()
health := mr.getHealth(id)
if health.failures < common.HealthyChecks {
return true
}
if time.Since(health.lastCheck) > common.HealthCheckInterval*time.Second {
logrus.Errorln("Runner", id, "is not healthy, but will be checked!")
health.failures = 0
health.lastCheck = time.Now()
return true
}
return false
}
func (mr *healthHelper) makeHealthy(id string, healthy bool) {
mr.healthyLock.Lock()
defer mr.healthyLock.Unlock()
health := mr.getHealth(id)
if healthy {
health.failures = 0
health.lastCheck = time.Now()
} else {
health.failures++
if health.failures >= common.HealthyChecks {
logrus.Errorln("Runner", id, "is not healthy and will be disabled!")
}
}
}
package helpers
import (
"io/ioutil"
"os"
"time"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/archives"
"gitlab.com/gitlab-org/gitlab-runner/log"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
type ArtifactsDownloaderCommand struct {
common.JobCredentials
retryHelper
network common.Network
}
func (c *ArtifactsDownloaderCommand) download(file string) error {
switch c.network.DownloadArtifacts(c.JobCredentials, file) {
case common.DownloadSucceeded:
return nil
case common.DownloadNotFound:
return os.ErrNotExist
case common.DownloadForbidden:
return os.ErrPermission
case common.DownloadFailed:
return retryableErr{err: os.ErrInvalid}
default:
return os.ErrInvalid
}
}
func (c *ArtifactsDownloaderCommand) Execute(context *cli.Context) {
log.SetRunnerFormatter()
if len(c.URL) == 0 || len(c.Token) == 0 {
logrus.Fatalln("Missing runner credentials")
}
if c.ID <= 0 {
logrus.Fatalln("Missing build ID")
}
// Create temporary file
file, err := ioutil.TempFile("", "artifacts")
if err != nil {
logrus.Fatalln(err)
}
file.Close()
defer os.Remove(file.Name())
// Download artifacts file
err = c.doRetry(func() error {
return c.download(file.Name())
})
if err != nil {
logrus.Fatalln(err)
}
// Extract artifacts file
err = archives.ExtractZipFile(file.Name())
if err != nil {
logrus.Fatalln(err)
}
}
func init() {
common.RegisterCommand2("artifacts-downloader", "download and extract build artifacts (internal)", &ArtifactsDownloaderCommand{
network: network.NewGitLabClient(),
retryHelper: retryHelper{
Retry: 2,
RetryTime: time.Second,
},
})
}
package helpers
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/archives"
"gitlab.com/gitlab-org/gitlab-runner/log"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
const DefaultUploadName = "default"
type ArtifactsUploaderCommand struct {
common.JobCredentials
fileArchiver
retryHelper
network common.Network
Name string `long:"name" description:"The name of the archive"`
ExpireIn string `long:"expire-in" description:"When to expire artifacts"`
Format common.ArtifactFormat `long:"artifact-format" description:"Format of generated artifacts"`
Type string `long:"artifact-type" description:"Type of generated artifacts"`
}
func (c *ArtifactsUploaderCommand) generateZipArchive(w *io.PipeWriter) {
err := archives.CreateZipArchive(w, c.sortedFiles())
w.CloseWithError(err)
}
func (c *ArtifactsUploaderCommand) generateGzipStream(w *io.PipeWriter) {
err := archives.CreateGzipArchive(w, c.sortedFiles())
w.CloseWithError(err)
}
func (c *ArtifactsUploaderCommand) openRawStream() (io.ReadCloser, error) {
fileNames := c.sortedFiles()
if len(fileNames) > 1 {
return nil, errors.New("only one file can be send as raw")
}
return os.Open(fileNames[0])
}
func (c *ArtifactsUploaderCommand) createReadStream() (string, io.ReadCloser, error) {
if len(c.files) == 0 {
return "", nil, nil
}
name := filepath.Base(c.Name)
if name == "" || name == "." {
name = DefaultUploadName
}
switch c.Format {
case common.ArtifactFormatZip, common.ArtifactFormatDefault:
pr, pw := io.Pipe()
go c.generateZipArchive(pw)
return name + ".zip", pr, nil
case common.ArtifactFormatGzip:
pr, pw := io.Pipe()
go c.generateGzipStream(pw)
return name + ".gz", pr, nil
case common.ArtifactFormatRaw:
file, err := c.openRawStream()
return name, file, err
default:
return "", nil, fmt.Errorf("unsupported archive format: %s", c.Format)
}
}
func (c *ArtifactsUploaderCommand) createAndUpload() error {
artifactsName, stream, err := c.createReadStream()
if err != nil {
return err
}
if stream == nil {
logrus.Errorln("No files to upload")
return nil
}
defer stream.Close()
// Create the archive
options := common.ArtifactsOptions{
BaseName: artifactsName,
ExpireIn: c.ExpireIn,
Format: c.Format,
Type: c.Type,
}
// Upload the data
switch c.network.UploadRawArtifacts(c.JobCredentials, stream, options) {
case common.UploadSucceeded:
return nil
case common.UploadForbidden:
return os.ErrPermission
case common.UploadTooLarge:
return errors.New("too large")
case common.UploadFailed:
return retryableErr{err: os.ErrInvalid}
default:
return os.ErrInvalid
}
}
func (c *ArtifactsUploaderCommand) Execute(*cli.Context) {
log.SetRunnerFormatter()
if len(c.URL) == 0 || len(c.Token) == 0 {
logrus.Fatalln("Missing runner credentials")
}
if c.ID <= 0 {
logrus.Fatalln("Missing build ID")
}
// Enumerate files
err := c.enumerate()
if err != nil {
logrus.Fatalln(err)
}
// If the upload fails, exit with a non-zero exit code to indicate an issue?
err = c.doRetry(c.createAndUpload)
if err != nil {
logrus.Fatalln(err)
}
}
func init() {
common.RegisterCommand2("artifacts-uploader", "create and upload build artifacts (internal)", &ArtifactsUploaderCommand{
network: network.NewGitLabClient(),
retryHelper: retryHelper{
Retry: 2,
RetryTime: time.Second,
},
Name: "artifacts",
})
}
package helpers
import (
"net/http"
"os"
"path/filepath"
"time"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/archives"
"gitlab.com/gitlab-org/gitlab-runner/helpers/url"
"gitlab.com/gitlab-org/gitlab-runner/log"
)
type CacheArchiverCommand struct {
fileArchiver
retryHelper
File string `long:"file" description:"The path to file"`
URL string `long:"url" description:"URL of remote cache resource"`
Timeout int `long:"timeout" description:"Overall timeout for cache uploading request (in minutes)"`
client *CacheClient
}
func (c *CacheArchiverCommand) getClient() *CacheClient {
if c.client == nil {
c.client = NewCacheClient(c.Timeout)
}
return c.client
}
func (c *CacheArchiverCommand) upload() error {
logrus.Infoln("Uploading", filepath.Base(c.File), "to", url_helpers.CleanURL(c.URL))
file, err := os.Open(c.File)
if err != nil {
return err
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
return err
}
req, err := http.NewRequest("PUT", c.URL, file)
if err != nil {
return retryableErr{err: err}
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Last-Modified", fi.ModTime().Format(http.TimeFormat))
req.ContentLength = fi.Size()
resp, err := c.getClient().Do(req)
if err != nil {
return retryableErr{err: err}
}
defer resp.Body.Close()
return retryOnServerError(resp)
}
func (c *CacheArchiverCommand) Execute(*cli.Context) {
log.SetRunnerFormatter()
if c.File == "" {
logrus.Fatalln("Missing --file")
}
// Enumerate files
err := c.enumerate()
if err != nil {
logrus.Fatalln(err)
}
// Check if list of files changed
if !c.isFileChanged(c.File) {
logrus.Infoln("Archive is up to date!")
return
}
// Create archive
err = archives.CreateZipFile(c.File, c.sortedFiles())
if err != nil {
logrus.Fatalln(err)
}
// Upload archive if needed
if c.URL != "" {
err := c.doRetry(c.upload)
if err != nil {
logrus.Fatalln(err)
}
} else {
logrus.Infoln("No URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.")
}
}
func init() {
common.RegisterCommand2("cache-archiver", "create and upload cache artifacts (internal)", &CacheArchiverCommand{
retryHelper: retryHelper{
Retry: 2,
RetryTime: time.Second,
},
})
}
package helpers
import (
"net"
"net/http"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type CacheClient struct {
http.Client
}
func (c *CacheClient) prepareClient(timeout int) {
if timeout > 0 {
c.Timeout = time.Duration(timeout) * time.Minute
} else {
c.Timeout = time.Duration(common.DefaultCacheRequestTimeout) * time.Minute
}
}
func (c *CacheClient) prepareTransport() {
c.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
}
}
func NewCacheClient(timeout int) *CacheClient {
client := &CacheClient{}
client.prepareClient(timeout)
client.prepareTransport()
return client
}
package helpers
import (
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/archives"
url_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/url"
"gitlab.com/gitlab-org/gitlab-runner/log"
)
type CacheExtractorCommand struct {
retryHelper
File string `long:"file" description:"The file containing your cache artifacts"`
URL string `long:"url" description:"URL of remote cache resource"`
Timeout int `long:"timeout" description:"Overall timeout for cache downloading request (in minutes)"`
client *CacheClient
}
func (c *CacheExtractorCommand) getClient() *CacheClient {
if c.client == nil {
c.client = NewCacheClient(c.Timeout)
}
return c.client
}
func checkIfUpToDate(path string, resp *http.Response) (bool, time.Time) {
fi, _ := os.Lstat(path)
date, _ := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
return fi != nil && !date.After(fi.ModTime()), date
}
func (c *CacheExtractorCommand) download() error {
err := os.MkdirAll(filepath.Dir(c.File), 0700)
if err != nil {
return err
}
resp, err := c.getCache()
if err != nil {
return err
}
defer resp.Body.Close()
upToDate, date := checkIfUpToDate(c.File, resp)
if upToDate {
logrus.Infoln(filepath.Base(c.File), "is up to date")
return nil
}
file, err := ioutil.TempFile(filepath.Dir(c.File), "cache")
if err != nil {
return err
}
defer os.Remove(file.Name())
defer file.Close()
logrus.Infoln("Downloading", filepath.Base(c.File), "from", url_helpers.CleanURL(c.URL))
_, err = io.Copy(file, resp.Body)
if err != nil {
return retryableErr{err: err}
}
err = os.Chtimes(file.Name(), time.Now(), date)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
err = os.Rename(file.Name(), c.File)
if err != nil {
return err
}
return nil
}
func (c *CacheExtractorCommand) getCache() (*http.Response, error) {
resp, err := c.getClient().Get(c.URL)
if err != nil {
return nil, retryableErr{err: err}
}
if resp.StatusCode == http.StatusNotFound {
resp.Body.Close()
return nil, os.ErrNotExist
}
return resp, retryOnServerError(resp)
}
func (c *CacheExtractorCommand) Execute(context *cli.Context) {
log.SetRunnerFormatter()
if len(c.File) == 0 {
logrus.Fatalln("Missing cache file")
}
if c.URL != "" {
err := c.doRetry(c.download)
if err != nil {
logrus.Fatalln(err)
}
} else {
logrus.Infoln("No URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.")
}
err := archives.ExtractZipFile(c.File)
if err != nil && !os.IsNotExist(err) {
logrus.Fatalln(err)
}
}
func init() {
common.RegisterCommand2("cache-extractor", "download and extract cache artifacts (internal)", &CacheExtractorCommand{
retryHelper: retryHelper{
Retry: 2,
RetryTime: time.Second,
},
})
}
package helpers
import (
"os"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
// CacheInitCommand will take a single directory/file path and initialize it
// correctly for it to be used for cache. This command tries to support spaces
// in directories name by using the the flags to specify which entries you want
// to initialize.
type CacheInitCommand struct{}
func (c *CacheInitCommand) Execute(ctx *cli.Context) {
if ctx.NArg() == 0 {
logrus.Fatal("No arguments passed, at least 1 path is required.")
}
for _, path := range ctx.Args() {
err := os.Chmod(path, os.ModePerm)
if err != nil {
logrus.WithError(err).Error("failed to chmod path")
}
}
}
func init() {
common.RegisterCommand2("cache-init", "changed permissions for cache paths (internal)", &CacheInitCommand{})
}
package helpers
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"time"
"github.com/sirupsen/logrus"
)
type fileArchiver struct {
Paths []string `long:"path" description:"Add paths to archive"`
Untracked bool `long:"untracked" description:"Add git untracked files"`
Verbose bool `long:"verbose" description:"Detailed information"`
wd string
files map[string]os.FileInfo
}
func (c *fileArchiver) isChanged(modTime time.Time) bool {
for _, info := range c.files {
if modTime.Before(info.ModTime()) {
return true
}
}
return false
}
func (c *fileArchiver) isFileChanged(fileName string) bool {
ai, err := os.Stat(fileName)
if ai != nil {
if !c.isChanged(ai.ModTime()) {
return false
}
} else if !os.IsNotExist(err) {
logrus.Warningln(err)
}
return true
}
func (c *fileArchiver) sortedFiles() []string {
files := make([]string, len(c.files))
i := 0
for file := range c.files {
files[i] = file
i++
}
sort.Strings(files)
return files
}
func (c *fileArchiver) add(path string) (err error) {
// Always use slashes
path = filepath.ToSlash(path)
// Check if file exist
info, err := os.Lstat(path)
if err == nil {
c.files[path] = info
}
return
}
func (c *fileArchiver) process(match string) bool {
var absolute, relative string
var err error
absolute, err = filepath.Abs(match)
if err == nil {
// Let's try to find a real relative path to an absolute from working directory
relative, err = filepath.Rel(c.wd, absolute)
}
if err == nil {
// Process path only if it lives in our build directory
if !strings.HasPrefix(relative, ".."+string(filepath.Separator)) {
err = c.add(relative)
} else {
err = errors.New("not supported: outside build directory")
}
}
if err == nil {
return true
} else if os.IsNotExist(err) {
// We hide the error that file doesn't exist
return false
}
logrus.Warningf("%s: %v", match, err)
return false
}
func (c *fileArchiver) processPaths() {
for _, path := range c.Paths {
matches, err := filepath.Glob(path)
if err != nil {
logrus.Warningf("%s: %v", path, err)
continue
}
found := 0
for _, match := range matches {
err := filepath.Walk(match, func(path string, info os.FileInfo, err error) error {
if c.process(path) {
found++
}
return nil
})
if err != nil {
logrus.Warningln("Walking", match, err)
}
}
if found == 0 {
logrus.Warningf("%s: no matching files", path)
} else {
logrus.Infof("%s: found %d matching files", path, found)
}
}
}
func (c *fileArchiver) processUntracked() {
if !c.Untracked {
return
}
found := 0
var output bytes.Buffer
cmd := exec.Command("git", "ls-files", "-o", "-z")
cmd.Env = os.Environ()
cmd.Stdout = &output
cmd.Stderr = os.Stderr
logrus.Debugln("Executing command:", strings.Join(cmd.Args, " "))
err := cmd.Run()
if err == nil {
reader := bufio.NewReader(&output)
for {
line, err := reader.ReadString(0)
if err == io.EOF {
break
} else if err != nil {
logrus.Warningln(err)
break
}
if c.process(line[:len(line)-1]) {
found++
}
}
if found == 0 {
logrus.Warningf("untracked: no files")
} else {
logrus.Infof("untracked: found %d files", found)
}
} else {
logrus.Warningf("untracked: %v", err)
}
}
func (c *fileArchiver) enumerate() error {
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Failed to get current working directory: %w", err)
}
c.wd = wd
c.files = make(map[string]os.FileInfo)
c.processPaths()
c.processUntracked()
return nil
}
package helpers
import (
"fmt"
"net"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type HealthCheckCommand struct{}
func (c *HealthCheckCommand) Execute(ctx *cli.Context) {
var addr, port string
for _, e := range os.Environ() {
parts := strings.Split(e, "=")
if len(parts) != 2 {
continue
} else if strings.HasSuffix(parts[0], "_TCP_ADDR") {
addr = parts[1]
} else if strings.HasSuffix(parts[0], "_TCP_PORT") {
port = parts[1]
}
}
if addr == "" || port == "" {
logrus.Fatalln("No HOST or PORT found")
}
fmt.Fprintf(os.Stdout, "waiting for TCP connection to %s:%s...", addr, port)
for {
conn, err := net.Dial("tcp", net.JoinHostPort(addr, port))
if err != nil {
time.Sleep(time.Second)
continue
}
conn.Close()
return
}
}
func init() {
common.RegisterCommand2("health-check", "check health for a specific address", &HealthCheckCommand{})
}
package helpers
import (
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
type retryHelper struct {
Retry int `long:"retry" description:"How many times to retry upload"`
RetryTime time.Duration `long:"retry-time" description:"How long to wait between retries"`
}
// retryableErr indicates that an error can be retried. To specify that an error
// can be retried simply wrap the original error. For example:
//
// retryableErr{err: errors.New("some error")}
type retryableErr struct {
err error
}
func (e retryableErr) Error() string {
return e.err.Error()
}
func (r *retryHelper) doRetry(handler func() error) error {
err := handler()
for i := 0; i < r.Retry; i++ {
if _, ok := err.(retryableErr); !ok {
return err
}
time.Sleep(r.RetryTime)
logrus.WithError(err).Warningln("Retrying...")
err = handler()
}
return err
}
// retryOnServerError will take the response and check if the the error should
// be of type retryableErr or not. When the status code is of 5xx it will be a
// retryableErr.
func retryOnServerError(resp *http.Response) error {
if resp.StatusCode/100 == 2 {
return nil
}
resp.Body.Close()
err := fmt.Errorf("received: %s", resp.Status)
if resp.StatusCode/100 == 5 {
err = retryableErr{err: err}
}
return err
}
package commands
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type ListCommand struct {
configOptions
}
func (c *ListCommand) Execute(context *cli.Context) {
err := c.loadConfig()
if err != nil {
logrus.Warningln(err)
return
}
logrus.WithFields(logrus.Fields{
"ConfigFile": c.ConfigFile,
}).Println("Listing configured runners")
for _, runner := range c.config.Runners {
logrus.WithFields(logrus.Fields{
"Executor": runner.RunnerSettings.Executor,
"Token": runner.RunnerCredentials.Token,
"URL": runner.RunnerCredentials.URL,
}).Println(runner.Name)
}
}
func init() {
common.RegisterCommand2("list", "List all configured runners", &ListCommand{})
}
package commands
import (
"errors"
"fmt"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"runtime"
"syscall"
"time"
service "github.com/ayufan/golang-kardianos-service"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate"
prometheus_helper "gitlab.com/gitlab-org/gitlab-runner/helpers/prometheus"
"gitlab.com/gitlab-org/gitlab-runner/helpers/sentry"
service_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/service"
"gitlab.com/gitlab-org/gitlab-runner/log"
"gitlab.com/gitlab-org/gitlab-runner/network"
"gitlab.com/gitlab-org/gitlab-runner/session"
)
var (
concurrentDesc = prometheus.NewDesc(
"gitlab_runner_concurrent",
"The current value of concurrent setting",
nil,
nil,
)
limitDesc = prometheus.NewDesc(
"gitlab_runner_limit",
"The current value of concurrent setting",
[]string{"runner"},
nil,
)
)
type RunCommand struct {
configOptionsWithListenAddress
network common.Network
healthHelper
buildsHelper buildsHelper
ServiceName string `short:"n" long:"service" description:"Use different names for different services"`
WorkingDirectory string `short:"d" long:"working-directory" description:"Specify custom working directory"`
User string `short:"u" long:"user" description:"Use specific user to execute shell scripts"`
Syslog bool `long:"syslog" description:"Log to system service logger" env:"LOG_SYSLOG"`
sentryLogHook sentry.LogHook
prometheusLogHook prometheus_helper.LogHook
failuresCollector *prometheus_helper.FailuresCollector
networkRequestStatusesCollector prometheus.Collector
sessionServer *session.Server
// abortBuilds is used to abort running builds
abortBuilds chan os.Signal
// runSignal is used to abort current operation (scaling workers, waiting for config)
runSignal chan os.Signal
// reloadSignal is used to trigger forceful config reload
reloadSignal chan os.Signal
// stopSignals is to catch a signals notified to process: SIGTERM, SIGQUIT, Interrupt, Kill
stopSignals chan os.Signal
// stopSignal is used to preserve the signal that was used to stop the
// process In case this is SIGQUIT it makes to finish all builds and session
// server.
stopSignal os.Signal
// runFinished is used to notify that run() did finish
runFinished chan bool
currentWorkers int
}
func (mr *RunCommand) log() *logrus.Entry {
return logrus.WithField("builds", mr.buildsHelper.buildsCount())
}
// Start is the method implementing `github.com/ayufan/golang-kardianos-service`.`Interface`
// interface. It's responsible for a non-blocking initialization of the process. When it exits,
// the main control flow is passed to runWait() configured as service's RunWait method. Take a look
// into Execute() for details.
func (mr *RunCommand) Start(_ service.Service) error {
mr.abortBuilds = make(chan os.Signal)
mr.runSignal = make(chan os.Signal, 1)
mr.reloadSignal = make(chan os.Signal, 1)
mr.runFinished = make(chan bool, 1)
mr.stopSignals = make(chan os.Signal)
mr.log().Info("Starting multi-runner from ", mr.ConfigFile, "...")
userModeWarning(false)
if len(mr.WorkingDirectory) > 0 {
err := os.Chdir(mr.WorkingDirectory)
if err != nil {
return err
}
}
err := mr.loadConfig()
if err != nil {
return err
}
// Start should not block. Do the actual work async.
go mr.run()
return nil
}
func (mr *RunCommand) loadConfig() error {
err := mr.configOptions.loadConfig()
if err != nil {
return err
}
// Set log level
err = mr.updateLoggingConfiguration()
if err != nil {
return err
}
// pass user to execute scripts as specific user
if mr.User != "" {
mr.config.User = mr.User
}
mr.healthy = nil
mr.log().Println("Configuration loaded")
mr.log().Debugln(helpers.ToYAML(mr.config))
// initialize sentry
if mr.config.SentryDSN != nil {
var err error
mr.sentryLogHook, err = sentry.NewLogHook(*mr.config.SentryDSN)
if err != nil {
mr.log().WithError(err).Errorln("Sentry failure")
}
} else {
mr.sentryLogHook = sentry.LogHook{}
}
return nil
}
func (mr *RunCommand) updateLoggingConfiguration() error {
reloadNeeded := false
if mr.config.LogLevel != nil && !log.Configuration().IsLevelSetWithCli() {
err := log.Configuration().SetLevel(*mr.config.LogLevel)
if err != nil {
return err
}
reloadNeeded = true
}
if mr.config.LogFormat != nil && !log.Configuration().IsFormatSetWithCli() {
err := log.Configuration().SetFormat(*mr.config.LogFormat)
if err != nil {
return err
}
reloadNeeded = true
}
if reloadNeeded {
log.Configuration().ReloadConfiguration()
}
return nil
}
// run is the main method of RunCommand. It's started asynchronously by services support
// through `Start` method and is responsible for initializing all goroutines handling
// concurrent, multi-runner execution of jobs.
// When mr.stopSignal is broadcasted (after `Stop` is called by services support)
// this method waits for all workers to be terminated and closes the mr.runFinished
// channel, which is the signal that the command was properly terminated (this is the only
// valid, properly terminated exit flow for `gitlab-runner run`).
func (mr *RunCommand) run() {
mr.setupMetricsAndDebugServer()
mr.setupSessionServer()
runners := make(chan *common.RunnerConfig)
go mr.feedRunners(runners)
signal.Notify(mr.stopSignals, syscall.SIGQUIT, syscall.SIGTERM, os.Interrupt, os.Kill)
signal.Notify(mr.reloadSignal, syscall.SIGHUP)
startWorker := make(chan int)
stopWorker := make(chan bool)
go mr.startWorkers(startWorker, stopWorker, runners)
workerIndex := 0
// Update number of workers and reload configuration.
// Exits when mr.runSignal receives a signal.
for mr.stopSignal == nil {
signaled := mr.updateWorkers(&workerIndex, startWorker, stopWorker)
if signaled != nil {
break
}
signaled = mr.updateConfig()
if signaled != nil {
break
}
}
// Wait for workers to shutdown
for mr.currentWorkers > 0 {
stopWorker <- true
mr.currentWorkers--
}
mr.log().Info("All workers stopped. Can exit now")
close(mr.runFinished)
}
func (mr *RunCommand) setupMetricsAndDebugServer() {
listenAddress, err := mr.listenAddress()
if err != nil {
mr.log().Errorf("invalid listen address: %s", err.Error())
return
}
if listenAddress == "" {
mr.log().Info("listen_address not defined, metrics & debug endpoints disabled")
return
}
// We separate out the listener creation here so that we can return an error if
// the provided address is invalid or there is some other listener error.
listener, err := net.Listen("tcp", listenAddress)
if err != nil {
mr.log().WithError(err).Fatal("Failed to create listener for metrics server")
}
mux := http.NewServeMux()
go func() {
err := http.Serve(listener, mux)
if err != nil {
mr.log().WithError(err).Fatal("Metrics server terminated")
}
}()
mr.serveMetrics(mux)
mr.serveDebugData(mux)
mr.servePprof(mux)
mr.log().
WithField("address", listenAddress).
Info("Metrics server listening")
}
func (mr *RunCommand) serveMetrics(mux *http.ServeMux) {
registry := prometheus.NewRegistry()
// Metrics about the runner's business logic.
registry.MustRegister(&mr.buildsHelper)
registry.MustRegister(mr)
// Metrics about API connections
registry.MustRegister(mr.networkRequestStatusesCollector)
// Metrics about jobs failures
registry.MustRegister(mr.failuresCollector)
// Metrics about catched errors
registry.MustRegister(&mr.prometheusLogHook)
// Metrics about the program's build version.
registry.MustRegister(common.AppVersion.NewMetricsCollector())
// Go-specific metrics about the process (GC stats, goroutines, etc.).
registry.MustRegister(prometheus.NewGoCollector())
// Go-unrelated process metrics (memory usage, file descriptors, etc.).
registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
// Register all executor provider collectors
for _, provider := range common.GetExecutorProviders() {
if collector, ok := provider.(prometheus.Collector); ok && collector != nil {
registry.MustRegister(collector)
}
}
mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
}
func (mr *RunCommand) serveDebugData(mux *http.ServeMux) {
mux.HandleFunc("/debug/jobs/list", mr.buildsHelper.ListJobsHandler)
}
func (mr *RunCommand) servePprof(mux *http.ServeMux) {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
func (mr *RunCommand) setupSessionServer() {
if mr.config.SessionServer.ListenAddress == "" {
mr.log().Info("[session_server].listen_address not defined, session endpoints disabled")
return
}
var err error
mr.sessionServer, err = session.NewServer(
session.ServerConfig{
AdvertiseAddress: mr.config.SessionServer.AdvertiseAddress,
ListenAddress: mr.config.SessionServer.ListenAddress,
ShutdownTimeout: common.ShutdownTimeout * time.Second,
},
mr.log(),
certificate.X509Generator{},
mr.buildsHelper.findSessionByURL,
)
if err != nil {
mr.log().WithError(err).Fatal("Failed to create session server")
}
go func() {
err := mr.sessionServer.Start()
if err != nil {
mr.log().WithError(err).Fatal("Session server terminated")
}
}()
mr.log().
WithField("address", mr.config.SessionServer.ListenAddress).
Info("Session server listening")
}
// feedRunners works until a stopSignal was saved.
// It is responsible for feeding the runners (workers) to channel, which
// asynchronously ends with job requests being made and jobs being executed
// by concurrent workers.
// This is also the place where check interval is calculated and
// applied.
func (mr *RunCommand) feedRunners(runners chan *common.RunnerConfig) {
for mr.stopSignal == nil {
mr.log().Debugln("Feeding runners to channel")
config := mr.config
// If no runners wait full interval to test again
if len(config.Runners) == 0 {
time.Sleep(config.GetCheckInterval())
continue
}
interval := config.GetCheckInterval() / time.Duration(len(config.Runners))
// Feed runner with waiting exact amount of time
for _, runner := range config.Runners {
mr.feedRunner(runner, runners)
time.Sleep(interval)
}
}
mr.log().
WithField("StopSignal", mr.stopSignal).
Debug("Stopping feeding runners to channel")
}
func (mr *RunCommand) feedRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {
if !mr.isHealthy(runner.UniqueID()) {
return
}
runners <- runner
}
// startWorkers is responsible for starting the workers (up to the number
// defined by `concurrent`) and assigning a runner processing method to them.
func (mr *RunCommand) startWorkers(startWorker chan int, stopWorker chan bool, runners chan *common.RunnerConfig) {
for mr.stopSignal == nil {
id := <-startWorker
go mr.processRunners(id, stopWorker, runners)
}
}
// processRunners is responsible for processing a Runner on a worker (when received
// a runner information sent to the channel by feedRunners) and for terminating the worker
// (when received an information on stoWorker chan - provided by updateWorkers)
func (mr *RunCommand) processRunners(id int, stopWorker chan bool, runners chan *common.RunnerConfig) {
mr.log().
WithField("worker", id).
Debugln("Starting worker")
for mr.stopSignal == nil {
select {
case runner := <-runners:
err := mr.processRunner(id, runner, runners)
if err != nil {
mr.log().
WithFields(logrus.Fields{
"runner": runner.ShortDescription(),
"executor": runner.Executor,
}).
WithError(err).
Warn("Failed to process runner")
}
// force GC cycle after processing build
runtime.GC()
case <-stopWorker:
mr.log().
WithField("worker", id).
Debugln("Stopping worker")
return
}
}
<-stopWorker
}
// processRunner is responsible for handling one job on a specified runner.
// First it acquires the Build to check if `limit` was met. If it's still in the capacity
// it creates the debug session (for debug terminal), triggers a job request to configured
// GitLab instance and finally creates and finishes the job.
// To speed-up jobs handling before starting the job this method "requeues" the runner to another
// worker (by feeding the channel normally handled by feedRunners).
func (mr *RunCommand) processRunner(id int, runner *common.RunnerConfig, runners chan *common.RunnerConfig) (err error) {
provider := common.GetExecutor(runner.Executor)
if provider == nil {
return
}
executorData, err := provider.Acquire(runner)
if err != nil {
return fmt.Errorf("failed to update executor: %w", err)
}
defer provider.Release(runner, executorData)
if !mr.buildsHelper.acquireBuild(runner) {
logrus.WithFields(logrus.Fields{
"runner": runner.ShortDescription(),
"worker": id,
}).Debug("Failed to request job, runner limit met")
return
}
defer mr.buildsHelper.releaseBuild(runner)
buildSession, sessionInfo, err := mr.createSession(provider)
if err != nil {
return
}
// Receive a new build
trace, jobData, err := mr.requestJob(runner, sessionInfo)
if err != nil || jobData == nil {
return
}
defer func() {
if err != nil {
fmt.Fprintln(trace, err.Error())
trace.Fail(err, common.RunnerSystemFailure)
} else {
trace.Fail(nil, common.NoneFailure)
}
}()
// Create a new build
build, err := common.NewBuild(*jobData, runner, mr.abortBuilds, executorData)
if err != nil {
return
}
build.Session = buildSession
build.ArtifactUploader = mr.network.UploadRawArtifacts
// Add build to list of builds to assign numbers
mr.buildsHelper.addBuild(build)
defer mr.buildsHelper.removeBuild(build)
// Process the same runner by different worker again
// to speed up taking the builds
mr.requeueRunner(runner, runners)
// Process a build
return build.Run(mr.config, trace)
}
// createSession checks if debug server is supported by configured executor and if the
// debug server was configured. If both requirements are met, then it creates a debug session
// that will be assigned to newly created job.
func (mr *RunCommand) createSession(provider common.ExecutorProvider) (*session.Session, *common.SessionInfo, error) {
var features common.FeaturesInfo
if err := provider.GetFeatures(&features); err != nil {
return nil, nil, err
}
if mr.sessionServer == nil || !features.Session {
return nil, nil, nil
}
sess, err := session.NewSession(mr.log())
if err != nil {
return nil, nil, err
}
sessionInfo := &common.SessionInfo{
URL: mr.sessionServer.AdvertiseAddress + sess.Endpoint,
Certificate: string(mr.sessionServer.CertificatePublicKey),
Authorization: sess.Token,
}
return sess, sessionInfo, err
}
// requestJob will check if the runner can send another concurrent request to
// GitLab, if not the return value is nil.
func (mr *RunCommand) requestJob(runner *common.RunnerConfig, sessionInfo *common.SessionInfo) (common.JobTrace, *common.JobResponse, error) {
if !mr.buildsHelper.acquireRequest(runner) {
mr.log().WithField("runner", runner.ShortDescription()).
Debugln("Failed to request job: runner requestConcurrency meet")
return nil, nil, nil
}
defer mr.buildsHelper.releaseRequest(runner)
jobData, healthy := mr.network.RequestJob(*runner, sessionInfo)
mr.makeHealthy(runner.UniqueID(), healthy)
if jobData == nil {
return nil, nil, nil
}
// Make sure to always close output
jobCredentials := &common.JobCredentials{
ID: jobData.ID,
Token: jobData.Token,
}
trace, err := mr.network.ProcessJob(*runner, jobCredentials)
if err != nil {
jobInfo := common.UpdateJobInfo{
ID: jobCredentials.ID,
State: common.Failed,
FailureReason: common.RunnerSystemFailure,
}
// send failure once
mr.network.UpdateJob(*runner, jobCredentials, jobInfo)
return nil, nil, err
}
trace.SetFailuresCollector(mr.failuresCollector)
return trace, jobData, nil
}
// requeueRunner feeds the runners channel in a non-blocking way. This replicates the
// behavior of feedRunners and speeds-up jobs handling. But if the channel is full, the
// method just exits without blocking.
func (mr *RunCommand) requeueRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {
runnerLog := mr.log().WithField("runner", runner.ShortDescription())
select {
case runners <- runner:
runnerLog.Debugln("Requeued the runner")
default:
runnerLog.Debugln("Failed to requeue the runner")
}
}
// updateWorkers, called periodically from run() is responsible for scaling the pool
// of workers. By worker we don't understand a `[[runners]]` entry, but a "slot" that will
// use one of the runners to request and handle a job.
// The size of the workers pool is controlled by `concurrent` setting. This method is responsible
// for the fact that `concurrent` defines the upper number of jobs that can be concurrently handled
// by GitLab Runner process.
func (mr *RunCommand) updateWorkers(workerIndex *int, startWorker chan int, stopWorker chan bool) os.Signal {
concurrentLimit := mr.config.Concurrent
if concurrentLimit < 1 {
mr.log().Fatalln("Concurrent is less than 1 - no jobs will be processed")
}
for mr.currentWorkers > concurrentLimit {
// Too many workers. Trigger stop on one of them
// or exit if termination signal was broadcasted.
select {
case stopWorker <- true:
case signaled := <-mr.runSignal:
return signaled
}
mr.currentWorkers--
}
for mr.currentWorkers < concurrentLimit {
// Too few workers. Trigger a creation of a new one
// or exit if termination signal was broadcasted.
select {
case startWorker <- *workerIndex:
case signaled := <-mr.runSignal:
return signaled
}
mr.currentWorkers++
*workerIndex++
}
return nil
}
func (mr *RunCommand) updateConfig() os.Signal {
select {
case <-time.After(common.ReloadConfigInterval * time.Second):
err := mr.checkConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
}
case <-mr.reloadSignal:
err := mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
}
case signaled := <-mr.runSignal:
return signaled
}
return nil
}
func (mr *RunCommand) checkConfig() (err error) {
info, err := os.Stat(mr.ConfigFile)
if err != nil {
return err
}
if !mr.config.ModTime.Before(info.ModTime()) {
return nil
}
err = mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
// don't reload the same file
mr.config.ModTime = info.ModTime()
return
}
return nil
}
// Stop is the method implementing `github.com/ayufan/golang-kardianos-service`.`Interface`
// interface. It's responsible for triggering the process stop.
// First it starts a goroutine that starts broadcasting the interrupt signal (used to stop
// workers scaling goroutine).
// Next it triggers graceful shutdown, which will be handled only if a proper signal is used.
// At the end it triggers the forceful shutdown, which handles the forceful the process termination.
func (mr *RunCommand) Stop(_ service.Service) error {
go mr.interruptRun()
defer func() {
if mr.sessionServer != nil {
mr.sessionServer.Close()
}
}()
err := mr.handleGracefulShutdown()
if err == nil {
return nil
}
mr.log().
WithError(err).
Warning("Graceful shutdown not finished properly")
err = mr.handleForcefulShutdown()
if err == nil {
return nil
}
mr.log().
WithError(err).
Warning("Forceful shutdown not finished properly")
return err
}
// interruptRun broadcasts interrupt signal, which exits the workers
// scaling goroutine.
func (mr *RunCommand) interruptRun() {
mr.log().Debug("Broadcasting interrupt signal")
// Pump interrupt signal
for {
mr.runSignal <- mr.stopSignal
}
}
// handleGracefulShutdown is responsible for handling the "graceful" strategy of exiting.
// It's executed only when specific signal is used to terminate the process.
// At this moment feedRunners() should exit and workers scaling is being terminated.
// This means that new jobs will be not requested. handleGracefulShutdown() will ensure that
// the process will not exit until `mr.runFinished` is closed, so all jobs were finished and
// all workers terminated. It may however exit if another signal - other than the gracefulShutdown
// signal - is received.
func (mr *RunCommand) handleGracefulShutdown() error {
// We wait till we have a SIGQUIT
for mr.stopSignal == syscall.SIGQUIT {
mr.log().
WithField("StopSignal", mr.stopSignal).
Warning("Starting graceful shutdown, waiting for builds to finish")
// Wait for other signals to finish builds
select {
case mr.stopSignal = <-mr.stopSignals:
// We received a new signal
case <-mr.runFinished:
// Everything finished we can exit now
return nil
}
}
return fmt.Errorf("received: %v", mr.stopSignal)
}
// handleForcefulShutdown is executed if handleGracefulShutdown exited with an error
// (which means that a signal forcing shutdown was used instead of the signal
// specific for graceful shutdown).
// It calls mr.abortAllBuilds which will broadcast abort signal which finally
// ends with jobs termination.
// Next it waits for one of the following events:
// 1. Another signal was sent to process, which is handled as force exit and
// triggers exit of the method and finally process termination without
// waiting for anything else.
// 2. ShutdownTimeout is exceeded. If waiting for shutdown will take more than
// defined time, the process will be forceful terminated just like in the
// case when second signal is sent.
// 3. mr.runFinished was closed, which means that all termination was done
// properly.
//
// After this method exits, Stop returns it error and finally the
// `github.com/ayufan/golang-kardianos-service` service mechanism will finish
// process execution.
func (mr *RunCommand) handleForcefulShutdown() error {
mr.log().
WithField("StopSignal", mr.stopSignal).
Warning("Starting forceful shutdown")
go mr.abortAllBuilds()
// Wait for graceful shutdown or abort after timeout
for {
select {
case mr.stopSignal = <-mr.stopSignals:
return fmt.Errorf("forced exit: %v", mr.stopSignal)
case <-time.After(common.ShutdownTimeout * time.Second):
return errors.New("shutdown timed out")
case <-mr.runFinished:
// Everything finished we can exit now
return nil
}
}
}
// abortAllBuilds broadcasts abort signal, which ends with all currently executed
// jobs being interrupted and terminated.
func (mr *RunCommand) abortAllBuilds() {
mr.log().Debug("Broadcasting job abort signal")
// Pump signal to abort all current builds
for {
mr.abortBuilds <- mr.stopSignal
}
}
func (mr *RunCommand) Execute(_ *cli.Context) {
svcConfig := &service.Config{
Name: mr.ServiceName,
DisplayName: mr.ServiceName,
Description: defaultDescription,
Arguments: []string{"run"},
Option: service.KeyValue{
"RunWait": mr.runWait,
},
}
svc, err := service_helpers.New(mr, svcConfig)
if err != nil {
logrus.WithError(err).
Fatalln("Service creation failed")
}
if mr.Syslog {
log.SetSystemLogger(logrus.StandardLogger(), svc)
}
logrus.AddHook(&mr.sentryLogHook)
logrus.AddHook(&mr.prometheusLogHook)
err = svc.Run()
if err != nil {
logrus.WithError(err).
Fatal("Service run failed")
}
}
// runWait is the blocking mechanism for `github.com/ayufan/golang-kardianos-service`
// service. It's started after Start exited and should block the control flow. When it exits,
// then the Stop is executed and service shutdown should be handled.
// For Runner it waits for the stopSignal to be received by the process. When it will happen,
// it's saved in mr.stopSignal and runWait() exits, triggering the shutdown handling.
func (mr *RunCommand) runWait() {
mr.log().Debugln("Waiting for stop signal")
// Save the stop signal and exit to execute Stop()
mr.stopSignal = <-mr.stopSignals
}
// Describe implements prometheus.Collector.
func (mr *RunCommand) Describe(ch chan<- *prometheus.Desc) {
ch <- concurrentDesc
ch <- limitDesc
}
// Collect implements prometheus.Collector.
func (mr *RunCommand) Collect(ch chan<- prometheus.Metric) {
config := mr.config
ch <- prometheus.MustNewConstMetric(
concurrentDesc,
prometheus.GaugeValue,
float64(config.Concurrent),
)
for _, runner := range config.Runners {
ch <- prometheus.MustNewConstMetric(
limitDesc,
prometheus.GaugeValue,
float64(runner.Limit),
runner.ShortDescription(),
)
}
}
func init() {
requestStatusesCollector := network.NewAPIRequestStatusesMap()
common.RegisterCommand2("run", "run multi runner service", &RunCommand{
ServiceName: defaultServiceName,
network: network.NewGitLabClientWithRequestStatusesMap(requestStatusesCollector),
networkRequestStatusesCollector: requestStatusesCollector,
prometheusLogHook: prometheus_helper.NewLogHook(),
failuresCollector: prometheus_helper.NewFailuresCollector(),
buildsHelper: newBuildsHelper(),
})
}
package commands
import (
"bufio"
"fmt"
"os"
"os/signal"
"runtime"
"strings"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
type configTemplate struct {
*common.Config
ConfigFile string `long:"config" env:"TEMPLATE_CONFIG_FILE" description:"Path to the configuration template file"`
}
func (c *configTemplate) Enabled() bool {
return c.ConfigFile != ""
}
func (c *configTemplate) MergeTo(config *common.RunnerConfig) error {
err := c.loadConfigTemplate()
if err != nil {
return errors.Wrap(err, "couldn't load configuration template file")
}
if len(c.Runners) != 1 {
return errors.New("configuration template must contain exactly one [[runners]] entry")
}
err = mergo.Merge(config, c.Runners[0])
if err != nil {
return errors.Wrap(err, "error while merging configuration with configuration template")
}
return nil
}
func (c *configTemplate) loadConfigTemplate() error {
config := common.NewConfig()
err := config.LoadConfig(c.ConfigFile)
if err != nil {
return err
}
c.Config = config
return nil
}
type RegisterCommand struct {
context *cli.Context
network common.Network
reader *bufio.Reader
registered bool
configOptions
ConfigTemplate configTemplate `namespace:"template"`
TagList string `long:"tag-list" env:"RUNNER_TAG_LIST" description:"Tag list"`
NonInteractive bool `short:"n" long:"non-interactive" env:"REGISTER_NON_INTERACTIVE" description:"Run registration unattended"`
LeaveRunner bool `long:"leave-runner" env:"REGISTER_LEAVE_RUNNER" description:"Don't remove runner if registration fails"`
RegistrationToken string `short:"r" long:"registration-token" env:"REGISTRATION_TOKEN" description:"Runner's registration token"`
RunUntagged bool `long:"run-untagged" env:"REGISTER_RUN_UNTAGGED" description:"Register to run untagged builds; defaults to 'true' when 'tag-list' is empty"`
Locked bool `long:"locked" env:"REGISTER_LOCKED" description:"Lock Runner for current project, defaults to 'true'"`
AccessLevel string `long:"access-level" env:"REGISTER_ACCESS_LEVEL" description:"Set access_level of the runner to not_protected or ref_protected; defaults to not_protected"`
MaximumTimeout int `long:"maximum-timeout" env:"REGISTER_MAXIMUM_TIMEOUT" description:"What is the maximum timeout (in seconds) that will be set for job when using this Runner"`
Paused bool `long:"paused" env:"REGISTER_PAUSED" description:"Set Runner to be paused, defaults to 'false'"`
// TODO: Remove in 13.0 https://gitlab.com/gitlab-org/gitlab-runner/issues/6404
DockerServices []string `long:"docker-services" json:"docker-services" env:"DOCKER_SERVICES" description:"DEPRECATED will be remove in 13.0: Add service that is started with container from main register command"`
common.RunnerConfig
}
type AccessLevel string
const (
NotProtected AccessLevel = "not_protected"
RefProtected AccessLevel = "ref_protected"
)
const (
defaultDockerWindowCacheDir = "c:\\cache"
)
func (s *RegisterCommand) askOnce(prompt string, result *string, allowEmpty bool) bool {
println(prompt)
if *result != "" {
print("["+*result, "]: ")
}
if s.reader == nil {
s.reader = bufio.NewReader(os.Stdin)
}
data, _, err := s.reader.ReadLine()
if err != nil {
panic(err)
}
newResult := string(data)
newResult = strings.TrimSpace(newResult)
if newResult != "" {
*result = newResult
return true
}
if allowEmpty || *result != "" {
return true
}
return false
}
func (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) string {
allowEmpty := len(allowEmptyOptional) > 0 && allowEmptyOptional[0]
result := s.context.String(key)
result = strings.TrimSpace(result)
if s.NonInteractive || prompt == "" {
if result == "" && !allowEmpty {
logrus.Panicln("The", key, "needs to be entered")
}
return result
}
for {
if s.askOnce(prompt, &result, allowEmpty) {
break
}
}
return result
}
func (s *RegisterCommand) askExecutor() {
for {
names := common.GetExecutors()
executors := strings.Join(names, ", ")
s.Executor = s.ask("executor", "Please enter the executor: "+executors+":", true)
if common.GetExecutor(s.Executor) != nil {
return
}
message := "Invalid executor specified"
if s.NonInteractive {
logrus.Panicln(message)
} else {
logrus.Errorln(message)
}
}
}
func (s *RegisterCommand) askDocker() {
s.askBasicDocker("ruby:2.6")
for _, volume := range s.Docker.Volumes {
parts := strings.Split(volume, ":")
if parts[len(parts)-1] == "/cache" {
return
}
}
s.Docker.Volumes = append(s.Docker.Volumes, "/cache")
}
func (s *RegisterCommand) askDockerWindows() {
s.askBasicDocker("mcr.microsoft.com/windows/servercore:1809")
for _, volume := range s.Docker.Volumes {
// This does not cover all the possibilities since we don't have access
// to volume parsing package since it's internal.
if strings.Contains(volume, defaultDockerWindowCacheDir) {
return
}
}
s.Docker.Volumes = append(s.Docker.Volumes, defaultDockerWindowCacheDir)
}
func (s *RegisterCommand) askBasicDocker(exampleHelperImage string) {
if s.Docker == nil {
s.Docker = &common.DockerConfig{}
}
s.Docker.Image = s.ask("docker-image", fmt.Sprintf("Please enter the default Docker image (e.g. %s):", exampleHelperImage))
}
func (s *RegisterCommand) askParallels() {
s.Parallels.BaseName = s.ask("parallels-base-name", "Please enter the Parallels VM (e.g. my-vm):")
}
func (s *RegisterCommand) askVirtualBox() {
s.VirtualBox.BaseName = s.ask("virtualbox-base-name", "Please enter the VirtualBox VM (e.g. my-vm):")
}
func (s *RegisterCommand) askSSHServer() {
s.SSH.Host = s.ask("ssh-host", "Please enter the SSH server address (e.g. my.server.com):")
s.SSH.Port = s.ask("ssh-port", "Please enter the SSH server port (e.g. 22):", true)
}
func (s *RegisterCommand) askSSHLogin() {
s.SSH.User = s.ask("ssh-user", "Please enter the SSH user (e.g. root):")
s.SSH.Password = s.ask("ssh-password", "Please enter the SSH password (e.g. docker.io):", true)
s.SSH.IdentityFile = s.ask("ssh-identity-file", "Please enter path to SSH identity file (e.g. /home/user/.ssh/id_rsa):", true)
}
func (s *RegisterCommand) addRunner(runner *common.RunnerConfig) {
s.config.Runners = append(s.config.Runners, runner)
}
func (s *RegisterCommand) askRunner() {
s.URL = s.ask("url", "Please enter the gitlab-ci coordinator URL (e.g. https://gitlab.com/):")
if s.Token != "" {
logrus.Infoln("Token specified trying to verify runner...")
logrus.Warningln("If you want to register use the '-r' instead of '-t'.")
if !s.network.VerifyRunner(s.RunnerCredentials) {
logrus.Panicln("Failed to verify this runner. Perhaps you are having network problems")
}
} else {
// we store registration token as token, since we pass that to RunnerCredentials
s.Token = s.ask("registration-token", "Please enter the gitlab-ci token for this runner:")
s.Name = s.ask("name", "Please enter the gitlab-ci description for this runner:")
s.TagList = s.ask("tag-list", "Please enter the gitlab-ci tags for this runner (comma separated):", true)
if s.TagList == "" {
s.RunUntagged = true
}
parameters := common.RegisterRunnerParameters{
Description: s.Name,
Tags: s.TagList,
Locked: s.Locked,
AccessLevel: s.AccessLevel,
RunUntagged: s.RunUntagged,
MaximumTimeout: s.MaximumTimeout,
Active: !s.Paused,
}
result := s.network.RegisterRunner(s.RunnerCredentials, parameters)
if result == nil {
logrus.Panicln("Failed to register this runner. Perhaps you are having network problems")
}
s.Token = result.Token
s.registered = true
}
}
func (s *RegisterCommand) askExecutorOptions() {
kubernetes := s.Kubernetes
machine := s.Machine
docker := s.Docker
ssh := s.SSH
parallels := s.Parallels
virtualbox := s.VirtualBox
custom := s.Custom
s.Kubernetes = nil
s.Machine = nil
s.Docker = nil
s.SSH = nil
s.Parallels = nil
s.VirtualBox = nil
s.Custom = nil
s.Referees = nil
executorFns := map[string]func(){
"kubernetes": func() {
s.Kubernetes = kubernetes
},
"docker+machine": func() {
s.Machine = machine
s.Docker = docker
s.askDocker()
},
"docker-ssh+machine": func() {
s.Machine = machine
s.Docker = docker
s.SSH = ssh
s.askDocker()
s.askSSHLogin()
},
"docker": func() {
s.Docker = docker
s.askDocker()
},
"docker-windows": func() {
s.Docker = docker
s.askDockerWindows()
},
"docker-ssh": func() {
s.Docker = docker
s.SSH = ssh
s.askDocker()
s.askSSHLogin()
},
"ssh": func() {
s.SSH = ssh
s.askSSHServer()
s.askSSHLogin()
},
"parallels": func() {
s.SSH = ssh
s.Parallels = parallels
s.askParallels()
s.askSSHServer()
},
"virtualbox": func() {
s.SSH = ssh
s.VirtualBox = virtualbox
s.askVirtualBox()
s.askSSHLogin()
},
"shell": func() {
if runtime.GOOS == "windows" && s.RunnerConfig.Shell == "" {
s.Shell = "powershell"
}
},
"custom": func() {
s.Custom = custom
},
}
executorFn, ok := executorFns[s.Executor]
if ok {
executorFn()
}
}
func (s *RegisterCommand) Execute(context *cli.Context) {
userModeWarning(true)
s.context = context
err := s.loadConfig()
if err != nil {
logrus.Panicln(err)
}
validAccessLevels := []AccessLevel{NotProtected, RefProtected}
if !accessLevelValid(validAccessLevels, AccessLevel(s.AccessLevel)) {
logrus.Panicln("Given access-level is not valid. " +
"Please refer to gitlab-runner register -h for the correct options.")
}
s.askRunner()
if !s.LeaveRunner {
defer func() {
// De-register runner on panic
if r := recover(); r != nil {
if s.registered {
s.network.UnregisterRunner(s.RunnerCredentials)
}
// pass panic to next defer
panic(r)
}
}()
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
go func() {
signal := <-signals
s.network.UnregisterRunner(s.RunnerCredentials)
logrus.Fatalf("RECEIVED SIGNAL: %v", signal)
}()
}
if s.config.Concurrent < s.Limit {
logrus.Warningf("Specified limit (%d) larger then current concurrent limit (%d). Concurrent limit will not be enlarged.", s.Limit, s.config.Concurrent)
}
s.askExecutor()
s.askExecutorOptions()
s.transformDockerServices(s.DockerServices)
s.mergeTemplate()
s.addRunner(&s.RunnerConfig)
err = s.saveConfig()
if err != nil {
logrus.Panicln(err)
}
logrus.Printf("Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!")
}
// TODO: Remove in 13.0 https://gitlab.com/gitlab-org/gitlab-runner/issues/6404
//
// transformDockerServices will take the value from `DockerServices`
// and convert the value of each entry into a `common.DockerService` definition.
//
// This is to keep backward compatibility when the user passes
// `--docker-services alpine:3.11 --docker-services ruby:3.10` we parse this
// correctly and create the service definition.
func (s *RegisterCommand) transformDockerServices(services []string) {
for _, service := range services {
s.Docker.Services = append(
s.Docker.Services,
&common.DockerService{
Service: common.Service{Name: service},
},
)
}
}
func (s *RegisterCommand) mergeTemplate() {
if !s.ConfigTemplate.Enabled() {
return
}
logrus.Infof("Merging configuration from template file %q", s.ConfigTemplate.ConfigFile)
err := s.ConfigTemplate.MergeTo(&s.RunnerConfig)
if err != nil {
logrus.WithError(err).Fatal("Could not handle configuration merging from template file")
}
}
func getHostname() string {
hostname, _ := os.Hostname()
return hostname
}
func newRegisterCommand() *RegisterCommand {
return &RegisterCommand{
RunnerConfig: common.RunnerConfig{
Name: getHostname(),
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{},
Cache: &common.CacheConfig{},
Machine: &common.DockerMachine{},
Docker: &common.DockerConfig{},
SSH: &ssh.Config{},
Parallels: &common.ParallelsConfig{},
VirtualBox: &common.VirtualBoxConfig{},
},
},
Locked: true,
Paused: false,
network: network.NewGitLabClient(),
}
}
func accessLevelValid(levels []AccessLevel, givenLevel AccessLevel) bool {
if givenLevel == "" {
return true
}
for _, level := range levels {
if givenLevel == level {
return true
}
}
return false
}
func init() {
common.RegisterCommand2("register", "register a new runner", newRegisterCommand())
}
package commands
import (
"fmt"
"os"
"runtime"
"github.com/ayufan/golang-kardianos-service"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/service"
)
const (
defaultServiceName = "gitlab-runner"
defaultDescription = "GitLab Runner"
)
type NullService struct {
}
func (n *NullService) Start(s service.Service) error {
return nil
}
func (n *NullService) Stop(s service.Service) error {
return nil
}
func runServiceInstall(s service.Service, c *cli.Context) error {
if user := c.String("user"); user == "" && os.Getuid() == 0 {
logrus.Fatal("Please specify user that will run gitlab-runner service")
}
if configFile := c.String("config"); configFile != "" {
// try to load existing config
config := common.NewConfig()
err := config.LoadConfig(configFile)
if err != nil {
return err
}
// save config for the first time
if !config.Loaded {
err = config.SaveConfig(configFile)
if err != nil {
return err
}
}
}
return service.Control(s, "install")
}
func runServiceStatus(displayName string, s service.Service) error {
err := s.Status()
if err == nil {
fmt.Println(displayName+":", "Service is running!")
} else {
fmt.Fprintln(os.Stderr, displayName+":", err)
os.Exit(1)
}
return nil
}
func getServiceArguments(c *cli.Context) (arguments []string) {
if wd := c.String("working-directory"); wd != "" {
arguments = append(arguments, "--working-directory", wd)
}
if config := c.String("config"); config != "" {
arguments = append(arguments, "--config", config)
}
if sn := c.String("service"); sn != "" {
arguments = append(arguments, "--service", sn)
}
syslog := !c.IsSet("syslog") || c.Bool("syslog")
if syslog {
arguments = append(arguments, "--syslog")
}
return
}
func createServiceConfig(c *cli.Context) (svcConfig *service.Config) {
svcConfig = &service.Config{
Name: c.String("service"),
DisplayName: c.String("service"),
Description: defaultDescription,
Arguments: []string{"run"},
}
svcConfig.Arguments = append(svcConfig.Arguments, getServiceArguments(c)...)
switch runtime.GOOS {
case "linux":
if os.Getuid() != 0 {
logrus.Fatal("Please run the commands as root")
}
if user := c.String("user"); user != "" {
svcConfig.Arguments = append(svcConfig.Arguments, "--user", user)
}
case "darwin":
svcConfig.Option = service.KeyValue{
"KeepAlive": true,
"RunAtLoad": true,
"UserService": os.Getuid() != 0,
}
if user := c.String("user"); user != "" {
if os.Getuid() == 0 {
svcConfig.Arguments = append(svcConfig.Arguments, "--user", user)
} else {
logrus.Fatalln("The --user is not supported for non-root users")
}
}
case "windows":
svcConfig.Option = service.KeyValue{
"Password": c.String("password"),
}
svcConfig.UserName = c.String("user")
}
return
}
func RunServiceControl(c *cli.Context) {
svcConfig := createServiceConfig(c)
s, err := service_helpers.New(&NullService{}, svcConfig)
if err != nil {
logrus.Fatal(err)
}
switch c.Command.Name {
case "install":
err = runServiceInstall(s, c)
case "status":
err = runServiceStatus(svcConfig.DisplayName, s)
default:
err = service.Control(s, c.Command.Name)
}
if err != nil {
logrus.Fatal(err)
}
}
func getFlags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "service, n",
Value: defaultServiceName,
Usage: "Specify service name to use",
},
}
}
func getInstallFlags() []cli.Flag {
installFlags := getFlags()
installFlags = append(installFlags, cli.StringFlag{
Name: "working-directory, d",
Value: helpers.GetCurrentWorkingDirectory(),
Usage: "Specify custom root directory where all data are stored",
})
installFlags = append(installFlags, cli.StringFlag{
Name: "config, c",
Value: getDefaultConfigFile(),
Usage: "Specify custom config file",
})
installFlags = append(installFlags, cli.BoolFlag{
Name: "syslog",
Usage: "Setup system logging integration",
})
if runtime.GOOS == "windows" {
installFlags = append(installFlags, cli.StringFlag{
Name: "user, u",
Value: "",
Usage: "Specify user-name to secure the runner",
})
installFlags = append(installFlags, cli.StringFlag{
Name: "password, p",
Value: "",
Usage: "Specify user password to install service (required)",
})
} else if os.Getuid() == 0 {
installFlags = append(installFlags, cli.StringFlag{
Name: "user, u",
Value: "",
Usage: "Specify user-name to secure the runner",
})
}
return installFlags
}
func init() {
flags := getFlags()
installFlags := getInstallFlags()
common.RegisterCommand(cli.Command{
Name: "install",
Usage: "install service",
Action: RunServiceControl,
Flags: installFlags,
})
common.RegisterCommand(cli.Command{
Name: "uninstall",
Usage: "uninstall service",
Action: RunServiceControl,
Flags: flags,
})
common.RegisterCommand(cli.Command{
Name: "start",
Usage: "start service",
Action: RunServiceControl,
Flags: flags,
})
common.RegisterCommand(cli.Command{
Name: "stop",
Usage: "stop service",
Action: RunServiceControl,
Flags: flags,
})
common.RegisterCommand(cli.Command{
Name: "restart",
Usage: "restart service",
Action: RunServiceControl,
Flags: flags,
})
common.RegisterCommand(cli.Command{
Name: "status",
Usage: "get status of a service",
Action: RunServiceControl,
Flags: flags,
})
}
package commands
import (
"os"
"os/signal"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/tevino/abool"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
type RunSingleCommand struct {
common.RunnerConfig
network common.Network
WaitTimeout int `long:"wait-timeout" description:"How long to wait in seconds before receiving the first job"`
lastBuild time.Time
runForever bool
MaxBuilds int `long:"max-builds" description:"How many builds to process before exiting"`
finished *abool.AtomicBool
interruptSignals chan os.Signal
}
func waitForInterrupts(finished *abool.AtomicBool, abortSignal chan os.Signal, doneSignal chan int, interruptSignals chan os.Signal) {
if interruptSignals == nil {
interruptSignals = make(chan os.Signal)
}
signal.Notify(interruptSignals, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
interrupt := <-interruptSignals
if finished != nil {
finished.Set()
}
// request stop, but wait for force exit
for interrupt == syscall.SIGQUIT {
logrus.Warningln("Requested quit, waiting for builds to finish")
interrupt = <-interruptSignals
}
logrus.Warningln("Requested exit:", interrupt)
go func() {
for {
abortSignal <- interrupt
}
}()
select {
case newSignal := <-interruptSignals:
logrus.Fatalln("forced exit:", newSignal)
case <-time.After(common.ShutdownTimeout * time.Second):
logrus.Fatalln("shutdown timed out")
case <-doneSignal:
}
}
// Things to do after a build
func (r *RunSingleCommand) postBuild() {
if r.MaxBuilds > 0 {
r.MaxBuilds--
}
r.lastBuild = time.Now()
}
func (r *RunSingleCommand) processBuild(data common.ExecutorData, abortSignal chan os.Signal) (err error) {
jobData, healthy := r.network.RequestJob(r.RunnerConfig, nil)
if !healthy {
logrus.Println("Runner is not healthy!")
select {
case <-time.After(common.NotHealthyCheckInterval * time.Second):
case <-abortSignal:
}
return
}
if jobData == nil {
select {
case <-time.After(common.CheckInterval):
case <-abortSignal:
}
return
}
config := common.NewConfig()
newBuild, err := common.NewBuild(*jobData, &r.RunnerConfig, abortSignal, data)
if err != nil {
return
}
jobCredentials := &common.JobCredentials{
ID: jobData.ID,
Token: jobData.Token,
}
trace, err := r.network.ProcessJob(r.RunnerConfig, jobCredentials)
if err != nil {
return err
}
defer trace.Fail(err, common.NoneFailure)
err = newBuild.Run(config, trace)
r.postBuild()
return
}
func (r *RunSingleCommand) checkFinishedConditions() {
if r.MaxBuilds < 1 && !r.runForever {
logrus.Println("This runner has processed its build limit, so now exiting")
r.finished.Set()
}
if r.WaitTimeout > 0 && int(time.Since(r.lastBuild).Seconds()) > r.WaitTimeout {
logrus.Println("This runner has not received a job in", r.WaitTimeout, "seconds, so now exiting")
r.finished.Set()
}
return
}
func (r *RunSingleCommand) Execute(c *cli.Context) {
if len(r.URL) == 0 {
logrus.Fatalln("Missing URL")
}
if len(r.Token) == 0 {
logrus.Fatalln("Missing Token")
}
if len(r.Executor) == 0 {
logrus.Fatalln("Missing Executor")
}
executorProvider := common.GetExecutor(r.Executor)
if executorProvider == nil {
logrus.Fatalln("Unknown executor:", r.Executor)
}
logrus.Println("Starting runner for", r.URL, "with token", r.ShortDescription(), "...")
r.finished = abool.New()
abortSignal := make(chan os.Signal)
doneSignal := make(chan int, 1)
r.runForever = r.MaxBuilds == 0
go waitForInterrupts(r.finished, abortSignal, doneSignal, r.interruptSignals)
r.lastBuild = time.Now()
for !r.finished.IsSet() {
data, err := executorProvider.Acquire(&r.RunnerConfig)
if err != nil {
logrus.Warningln("Executor update:", err)
}
pErr := r.processBuild(data, abortSignal)
if pErr != nil {
logrus.WithError(pErr).Error("Failed to process build")
}
r.checkFinishedConditions()
executorProvider.Release(&r.RunnerConfig, data)
}
doneSignal <- 0
}
func init() {
common.RegisterCommand2("run-single", "start single runner", &RunSingleCommand{
network: network.NewGitLabClient(),
})
}
package commands
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
type UnregisterCommand struct {
configOptions
common.RunnerCredentials
network common.Network
Name string `toml:"name" json:"name" short:"n" long:"name" description:"Name of the runner you wish to unregister"`
AllRunners bool `toml:"all_runners" json:"all-runners" long:"all-runners" description:"Unregister all runners"`
}
func (c *UnregisterCommand) unregisterAllRunners() (runners []*common.RunnerConfig) {
logrus.Warningln("Unregistering all runners")
for _, r := range c.config.Runners {
if !c.network.UnregisterRunner(r.RunnerCredentials) {
logrus.Errorln("Failed to unregister runner", r.Name)
//If unregister fails, leave the runner in the config
runners = append(runners, r)
}
}
return
}
func (c *UnregisterCommand) unregisterSingleRunner() (runners []*common.RunnerConfig) {
if len(c.Name) > 0 { // Unregister when given a name
runnerConfig, err := c.RunnerByName(c.Name)
if err != nil {
logrus.Fatalln(err)
}
c.RunnerCredentials = runnerConfig.RunnerCredentials
}
// Unregister given Token and URL of the runner
if !c.network.UnregisterRunner(c.RunnerCredentials) {
logrus.Fatalln("Failed to unregister runner", c.Name)
}
for _, otherRunner := range c.config.Runners {
if otherRunner.RunnerCredentials == c.RunnerCredentials {
continue
}
runners = append(runners, otherRunner)
}
return
}
func (c *UnregisterCommand) Execute(context *cli.Context) {
userModeWarning(false)
err := c.loadConfig()
if err != nil {
logrus.Fatalln(err)
return
}
var runners []*common.RunnerConfig
if c.AllRunners {
runners = c.unregisterAllRunners()
} else {
runners = c.unregisterSingleRunner()
}
// check if anything changed
if len(c.config.Runners) == len(runners) {
return
}
c.config.Runners = runners
// save config file
err = c.saveConfig()
if err != nil {
logrus.Fatalln("Failed to update", c.ConfigFile, err)
}
logrus.Println("Updated", c.ConfigFile)
}
func init() {
common.RegisterCommand2("unregister", "unregister specific runner", &UnregisterCommand{
network: network.NewGitLabClient(),
})
}
package commands
import (
"os"
"runtime"
"github.com/sirupsen/logrus"
)
func userModeWarning(withRun bool) {
logrus.WithFields(logrus.Fields{
"GOOS": runtime.GOOS,
"uid": os.Getuid(),
}).Debugln("Checking runtime mode")
// everything is supported on windows
if runtime.GOOS == "windows" {
return
}
systemMode := os.Getuid() == 0
// We support services on Linux, Windows and Darwin
noServices :=
runtime.GOOS != "linux" &&
runtime.GOOS != "darwin"
// We don't support services installed as an User on Linux
noUserService :=
!systemMode &&
runtime.GOOS == "linux"
if systemMode {
logrus.Infoln("Running in system-mode.")
} else {
logrus.Warningln("Running in user-mode.")
}
if withRun {
if noServices {
logrus.Warningln("You need to manually start builds processing:")
logrus.Warningln("$ gitlab-runner run")
} else if noUserService {
logrus.Warningln("The user-mode requires you to manually start builds processing:")
logrus.Warningln("$ gitlab-runner run")
}
}
if !systemMode {
logrus.Warningln("Use sudo for system-mode:")
logrus.Warningln("$ sudo gitlab-runner...")
}
logrus.Infoln("")
}
package commands
import (
"errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/network"
)
type VerifyCommand struct {
configOptions
common.RunnerCredentials
network common.Network
Name string `toml:"name" json:"name" short:"n" long:"name" description:"Name of the runner you wish to verify"`
DeleteNonExisting bool `long:"delete" description:"Delete no longer existing runners?"`
}
func (c *VerifyCommand) Execute(context *cli.Context) {
userModeWarning(true)
err := c.loadConfig()
if err != nil {
logrus.Fatalln(err)
return
}
// check if there's something to verify
toVerify, okRunners, err := c.selectRunners()
if err != nil {
logrus.Fatalln(err)
return
}
// verify if runner exist
for _, runner := range toVerify {
if c.network.VerifyRunner(runner.RunnerCredentials) {
okRunners = append(okRunners, runner)
}
}
// check if anything changed
if len(c.config.Runners) == len(okRunners) {
return
}
if !c.DeleteNonExisting {
logrus.Fatalln("Failed to verify runners")
return
}
c.config.Runners = okRunners
// save config file
err = c.saveConfig()
if err != nil {
logrus.Fatalln("Failed to update", c.ConfigFile, err)
}
logrus.Println("Updated", c.ConfigFile)
}
func (c *VerifyCommand) selectRunners() (toVerify []*common.RunnerConfig, okRunners []*common.RunnerConfig, err error) {
var selectorPresent = c.Name != "" || c.RunnerCredentials.URL != "" || c.RunnerCredentials.Token != ""
for _, runner := range c.config.Runners {
selected := !selectorPresent || runner.Name == c.Name || runner.RunnerCredentials.SameAs(&c.RunnerCredentials)
if selected {
toVerify = append(toVerify, runner)
} else {
okRunners = append(okRunners, runner)
}
}
if selectorPresent && len(toVerify) == 0 {
err = errors.New("no runner matches the filtering parameters")
}
return
}
func init() {
common.RegisterCommand2("verify", "verify all registered runners", &VerifyCommand{
network: network.NewGitLabClient(),
})
}
package common
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags"
"gitlab.com/gitlab-org/gitlab-runner/helpers/tls"
"gitlab.com/gitlab-org/gitlab-runner/referees"
"gitlab.com/gitlab-org/gitlab-runner/session"
"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
"gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
type GitStrategy int
const (
GitClone GitStrategy = iota
GitFetch
GitNone
)
const (
gitCleanFlagsDefault = "-ffdx"
gitCleanFlagsNone = "none"
)
type SubmoduleStrategy int
const (
SubmoduleInvalid SubmoduleStrategy = iota
SubmoduleNone
SubmoduleNormal
SubmoduleRecursive
)
type BuildRuntimeState string
const (
BuildRunStatePending BuildRuntimeState = "pending"
BuildRunRuntimeRunning BuildRuntimeState = "running"
BuildRunRuntimeFinished BuildRuntimeState = "finished"
BuildRunRuntimeCanceled BuildRuntimeState = "canceled"
BuildRunRuntimeTerminated BuildRuntimeState = "terminated"
BuildRunRuntimeTimedout BuildRuntimeState = "timedout"
)
type BuildStage string
const (
BuildStagePrepareExecutor BuildStage = "prepare_executor"
BuildStagePrepare BuildStage = "prepare_script"
BuildStageGetSources BuildStage = "get_sources"
BuildStageRestoreCache BuildStage = "restore_cache"
BuildStageDownloadArtifacts BuildStage = "download_artifacts"
BuildStageUserScript BuildStage = "build_script"
BuildStageAfterScript BuildStage = "after_script"
BuildStageArchiveCache BuildStage = "archive_cache"
BuildStageUploadOnSuccessArtifacts BuildStage = "upload_artifacts_on_success"
BuildStageUploadOnFailureArtifacts BuildStage = "upload_artifacts_on_failure"
)
type Build struct {
JobResponse `yaml:",inline"`
SystemInterrupt chan os.Signal `json:"-" yaml:"-"`
RootDir string `json:"-" yaml:"-"`
BuildDir string `json:"-" yaml:"-"`
CacheDir string `json:"-" yaml:"-"`
Hostname string `json:"-" yaml:"-"`
Runner *RunnerConfig `json:"runner"`
ExecutorData ExecutorData
ExecutorFeatures FeaturesInfo `json:"-" yaml:"-"`
// Unique ID for all running builds on this runner
RunnerID int `json:"runner_id"`
// Unique ID for all running builds on this runner and this project
ProjectRunnerID int `json:"project_runner_id"`
CurrentStage BuildStage
CurrentState BuildRuntimeState
Session *session.Session
executorStageResolver func() ExecutorStage
logger BuildLogger
allVariables JobVariables
createdAt time.Time
Referees []referees.Referee
ArtifactUploader func(config JobCredentials, reader io.Reader, options ArtifactsOptions) UploadState
}
func (b *Build) Log() *logrus.Entry {
return b.Runner.Log().WithField("job", b.ID).WithField("project", b.JobInfo.ProjectID)
}
func (b *Build) ProjectUniqueName() string {
return fmt.Sprintf("runner-%s-project-%d-concurrent-%d",
b.Runner.ShortDescription(), b.JobInfo.ProjectID, b.ProjectRunnerID)
}
func (b *Build) ProjectSlug() (string, error) {
url, err := url.Parse(b.GitInfo.RepoURL)
if err != nil {
return "", err
}
if url.Host == "" {
return "", errors.New("only URI reference supported")
}
slug := url.Path
slug = strings.TrimSuffix(slug, ".git")
slug = path.Clean(slug)
if slug == "." {
return "", errors.New("invalid path")
}
if strings.Contains(slug, "..") {
return "", errors.New("it doesn't look like a valid path")
}
return slug, nil
}
func (b *Build) ProjectUniqueDir(sharedDir bool) string {
dir, err := b.ProjectSlug()
if err != nil {
dir = fmt.Sprintf("project-%d", b.JobInfo.ProjectID)
}
// for shared dirs path is constructed like this:
// <some-path>/runner-short-id/concurrent-id/group-name/project-name/
// ex.<some-path>/01234567/0/group/repo/
if sharedDir {
dir = path.Join(
fmt.Sprintf("%s", b.Runner.ShortDescription()),
fmt.Sprintf("%d", b.ProjectRunnerID),
dir,
)
}
return dir
}
func (b *Build) FullProjectDir() string {
return helpers.ToSlash(b.BuildDir)
}
func (b *Build) TmpProjectDir() string {
return helpers.ToSlash(b.BuildDir) + ".tmp"
}
func (b *Build) getCustomBuildDir(rootDir, overrideKey string, customBuildDirEnabled, sharedDir bool) (string, error) {
dir := b.GetAllVariables().Get(overrideKey)
if dir == "" {
return path.Join(rootDir, b.ProjectUniqueDir(sharedDir)), nil
}
if !customBuildDirEnabled {
return "", MakeBuildError("setting %s is not allowed, enable `custom_build_dir` feature", overrideKey)
}
if !strings.HasPrefix(dir, rootDir) {
return "", MakeBuildError("the %s=%q has to be within %q",
overrideKey, dir, rootDir)
}
return dir, nil
}
func (b *Build) StartBuild(rootDir, cacheDir string, customBuildDirEnabled, sharedDir bool) error {
if rootDir == "" {
return MakeBuildError("the builds_dir is not configured")
}
if cacheDir == "" {
return MakeBuildError("the cache_dir is not configured")
}
// We set RootDir and invalidate variables
// to be able to use CI_BUILDS_DIR
b.RootDir = rootDir
b.CacheDir = path.Join(cacheDir, b.ProjectUniqueDir(false))
b.refreshAllVariables()
var err error
b.BuildDir, err = b.getCustomBuildDir(b.RootDir, "GIT_CLONE_PATH", customBuildDirEnabled, sharedDir)
if err != nil {
return err
}
// We invalidate variables to be able to use
// CI_CACHE_DIR and CI_PROJECT_DIR
b.refreshAllVariables()
return nil
}
func (b *Build) executeStage(ctx context.Context, buildStage BuildStage, executor Executor) error {
b.CurrentStage = buildStage
b.Log().WithField("build_stage", buildStage).Debug("Executing build stage")
shell := executor.Shell()
if shell == nil {
return errors.New("No shell defined")
}
script, err := GenerateShellScript(buildStage, *shell)
if err != nil {
return err
}
// Nothing to execute
if script == "" {
return nil
}
cmd := ExecutorCommand{
Context: ctx,
Script: script,
Stage: buildStage,
}
switch buildStage {
case BuildStageUserScript, BuildStageAfterScript: // use custom build environment
cmd.Predefined = false
default: // all other stages use a predefined build environment
cmd.Predefined = true
}
section := helpers.BuildSection{
Name: string(buildStage),
SkipMetrics: !b.JobResponse.Features.TraceSections,
Run: func() error { return executor.Run(cmd) },
}
return section.Execute(&b.logger)
}
func (b *Build) executeUploadArtifacts(ctx context.Context, state error, executor Executor) (err error) {
if state == nil {
return b.executeStage(ctx, BuildStageUploadOnSuccessArtifacts, executor)
}
return b.executeStage(ctx, BuildStageUploadOnFailureArtifacts, executor)
}
func (b *Build) executeScript(ctx context.Context, executor Executor) error {
// track job start and create referees
startTime := time.Now()
b.createReferees(executor)
// Prepare stage
err := b.executeStage(ctx, BuildStagePrepare, executor)
if err == nil {
err = b.attemptExecuteStage(ctx, BuildStageGetSources, executor, b.GetGetSourcesAttempts())
}
if err == nil {
err = b.attemptExecuteStage(ctx, BuildStageRestoreCache, executor, b.GetRestoreCacheAttempts())
}
if err == nil {
err = b.attemptExecuteStage(ctx, BuildStageDownloadArtifacts, executor, b.GetDownloadArtifactsAttempts())
}
if err == nil {
// Execute user build script (before_script + script)
err = b.executeStage(ctx, BuildStageUserScript, executor)
// Execute after script (after_script)
timeoutContext, timeoutCancel := context.WithTimeout(ctx, AfterScriptTimeout)
defer timeoutCancel()
b.executeStage(timeoutContext, BuildStageAfterScript, executor)
}
// Execute post script (cache store, artifacts upload)
if err == nil {
err = b.executeStage(ctx, BuildStageArchiveCache, executor)
}
artifactUploadError := b.executeUploadArtifacts(ctx, err, executor)
// track job end and execute referees
endTime := time.Now()
b.executeUploadReferees(ctx, startTime, endTime)
// Use job's error as most important
if err != nil {
return err
}
// Otherwise, use uploadError
return artifactUploadError
}
func (b *Build) createReferees(executor Executor) {
b.Referees = referees.CreateReferees(executor, b.Runner.Referees, b.Log())
}
func (b *Build) executeUploadReferees(ctx context.Context, startTime time.Time, endTime time.Time) {
if b.Referees == nil || b.ArtifactUploader == nil {
b.Log().Debug("Skipping referees execution")
return
}
jobCredentials := JobCredentials{
ID: b.JobResponse.ID,
Token: b.JobResponse.Token,
URL: b.Runner.RunnerCredentials.URL,
}
// execute and upload the results of each referee
for _, referee := range b.Referees {
if referee == nil {
continue
}
reader, err := referee.Execute(ctx, startTime, endTime)
// keep moving even if a subset of the referees have failed
if err != nil {
continue
}
// referee ran successfully, upload its results to GitLab as an artifact
b.ArtifactUploader(jobCredentials, reader, ArtifactsOptions{
BaseName: referee.ArtifactBaseName(),
Type: referee.ArtifactType(),
Format: ArtifactFormat(referee.ArtifactFormat()),
})
}
}
func (b *Build) attemptExecuteStage(ctx context.Context, buildStage BuildStage, executor Executor, attempts int) (err error) {
if attempts < 1 || attempts > 10 {
return fmt.Errorf("Number of attempts out of the range [1, 10] for stage: %s", buildStage)
}
for attempt := 0; attempt < attempts; attempt++ {
if err = b.executeStage(ctx, buildStage, executor); err == nil {
return
}
}
return
}
func (b *Build) GetBuildTimeout() time.Duration {
buildTimeout := b.RunnerInfo.Timeout
if buildTimeout <= 0 {
buildTimeout = DefaultTimeout
}
return time.Duration(buildTimeout) * time.Second
}
func (b *Build) handleError(err error) error {
switch err {
case context.Canceled:
b.CurrentState = BuildRunRuntimeCanceled
return &BuildError{Inner: errors.New("canceled")}
case context.DeadlineExceeded:
b.CurrentState = BuildRunRuntimeTimedout
return &BuildError{
Inner: fmt.Errorf("execution took longer than %v seconds", b.GetBuildTimeout()),
FailureReason: JobExecutionTimeout,
}
default:
b.CurrentState = BuildRunRuntimeFinished
return err
}
}
func (b *Build) run(ctx context.Context, executor Executor) (err error) {
b.CurrentState = BuildRunRuntimeRunning
buildFinish := make(chan error, 1)
runContext, runCancel := context.WithCancel(context.Background())
defer runCancel()
if term, ok := executor.(terminal.InteractiveTerminal); b.Session != nil && ok {
b.Session.SetInteractiveTerminal(term)
}
if proxyPooler, ok := executor.(proxy.Pooler); b.Session != nil && ok {
b.Session.SetProxyPool(proxyPooler)
}
// Run build script
go func() {
buildFinish <- b.executeScript(runContext, executor)
}()
// Wait for signals: cancel, timeout, abort or finish
b.Log().Debugln("Waiting for signals...")
select {
case <-ctx.Done():
err = b.handleError(ctx.Err())
case signal := <-b.SystemInterrupt:
err = fmt.Errorf("aborted: %v", signal)
b.CurrentState = BuildRunRuntimeTerminated
case err = <-buildFinish:
b.CurrentState = BuildRunRuntimeFinished
return err
}
b.Log().WithError(err).Debugln("Waiting for build to finish...")
// Wait till we receive that build did finish
runCancel()
b.waitForBuildFinish(buildFinish, WaitForBuildFinishTimeout)
return err
}
// waitForBuildFinish will wait for the build to finish or timeout, whichever
// comes first. This is to prevent issues where something in the build can't be
// killed or processed and results into the Job running until the GitLab Runner
// process exists.
func (b *Build) waitForBuildFinish(buildFinish <-chan error, timeout time.Duration) {
select {
case <-buildFinish:
return
case <-time.After(timeout):
b.logger.Warningln("Timed out waiting for the build to finish")
return
}
}
func (b *Build) retryCreateExecutor(options ExecutorPrepareOptions, provider ExecutorProvider, logger BuildLogger) (executor Executor, err error) {
for tries := 0; tries < PreparationRetries; tries++ {
executor = provider.Create()
if executor == nil {
err = errors.New("failed to create executor")
return
}
b.executorStageResolver = executor.GetCurrentStage
err = executor.Prepare(options)
if err == nil {
break
}
executor.Cleanup()
executor = nil
if _, ok := err.(*BuildError); ok {
break
} else if options.Context.Err() != nil {
return nil, b.handleError(options.Context.Err())
}
logger.SoftErrorln("Preparation failed:", err)
logger.Infoln("Will be retried in", PreparationRetryInterval, "...")
time.Sleep(PreparationRetryInterval)
}
return
}
func (b *Build) waitForTerminal(ctx context.Context, timeout time.Duration) error {
if b.Session == nil || !b.Session.Connected() {
return nil
}
timeout = b.getTerminalTimeout(ctx, timeout)
b.logger.Infoln(
fmt.Sprintf(
"Terminal is connected, will time out in %s...",
timeout.Round(time.Second),
),
)
select {
case <-ctx.Done():
err := b.Session.Kill()
if err != nil {
b.Log().WithError(err).Warn("Failed to kill session")
}
return errors.New("build cancelled, killing session")
case <-time.After(timeout):
err := fmt.Errorf(
"Terminal session timed out (maximum time allowed - %s)",
timeout.Round(time.Second),
)
b.logger.Infoln(err.Error())
b.Session.TimeoutCh <- err
return err
case err := <-b.Session.DisconnectCh:
b.logger.Infoln("Terminal disconnected")
return fmt.Errorf("terminal disconnected: %w", err)
case signal := <-b.SystemInterrupt:
b.logger.Infoln("Terminal disconnected")
err := b.Session.Kill()
if err != nil {
b.Log().WithError(err).Warn("Failed to kill session")
}
return fmt.Errorf("terminal disconnected by system signal: %v", signal)
}
}
// getTerminalTimeout checks if the the job timeout comes before the
// configured terminal timeout.
func (b *Build) getTerminalTimeout(ctx context.Context, timeout time.Duration) time.Duration {
expiryTime, _ := ctx.Deadline()
if expiryTime.Before(time.Now().Add(timeout)) {
timeout = expiryTime.Sub(time.Now())
}
return timeout
}
func (b *Build) setTraceStatus(trace JobTrace, err error) {
logger := b.logger.WithFields(logrus.Fields{
"duration": b.Duration(),
})
if err == nil {
logger.Infoln("Job succeeded")
trace.Success()
return
}
if buildError, ok := err.(*BuildError); ok {
logger.SoftErrorln("Job failed:", err)
failureReason := buildError.FailureReason
if failureReason == "" {
failureReason = ScriptFailure
}
trace.Fail(err, failureReason)
return
}
logger.Errorln("Job failed (system failure):", err)
trace.Fail(err, RunnerSystemFailure)
}
func (b *Build) CurrentExecutorStage() ExecutorStage {
if b.executorStageResolver == nil {
b.executorStageResolver = func() ExecutorStage {
return ExecutorStage("")
}
}
return b.executorStageResolver()
}
func (b *Build) Run(globalConfig *Config, trace JobTrace) (err error) {
var executor Executor
b.logger = NewBuildLogger(trace, b.Log())
b.logger.Println("Running with", AppVersion.Line())
if b.Runner != nil && b.Runner.ShortDescription() != "" {
b.logger.Println(" on", b.Runner.Name, b.Runner.ShortDescription())
}
b.CurrentState = BuildRunStatePending
defer func() {
b.setTraceStatus(trace, err)
if executor != nil {
executor.Cleanup()
}
}()
ctx, cancel := context.WithTimeout(context.Background(), b.GetBuildTimeout())
defer cancel()
trace.SetCancelFunc(cancel)
trace.SetMasked(b.GetAllVariables().Masked())
options := ExecutorPrepareOptions{
Config: b.Runner,
Build: b,
Trace: trace,
User: globalConfig.User,
Context: ctx,
}
provider := GetExecutor(b.Runner.Executor)
if provider == nil {
return errors.New("executor not found")
}
provider.GetFeatures(&b.ExecutorFeatures)
section := helpers.BuildSection{
Name: string(BuildStagePrepareExecutor),
SkipMetrics: !b.JobResponse.Features.TraceSections,
Run: func() error {
executor, err = b.retryCreateExecutor(options, provider, b.logger)
return err
},
}
err = section.Execute(&b.logger)
if err == nil {
err = b.run(ctx, executor)
if err := b.waitForTerminal(ctx, globalConfig.SessionServer.GetSessionTimeout()); err != nil {
b.Log().WithError(err).Debug("Stopped waiting for terminal")
}
}
if executor != nil {
executor.Finish(err)
}
return err
}
func (b *Build) String() string {
return helpers.ToYAML(b)
}
func (b *Build) GetDefaultVariables() JobVariables {
return JobVariables{
{Key: "CI_BUILDS_DIR", Value: filepath.FromSlash(b.RootDir), Public: true, Internal: true, File: false},
{Key: "CI_PROJECT_DIR", Value: filepath.FromSlash(b.FullProjectDir()), Public: true, Internal: true, File: false},
{Key: "CI_CONCURRENT_ID", Value: strconv.Itoa(b.RunnerID), Public: true, Internal: true, File: false},
{Key: "CI_CONCURRENT_PROJECT_ID", Value: strconv.Itoa(b.ProjectRunnerID), Public: true, Internal: true, File: false},
{Key: "CI_SERVER", Value: "yes", Public: true, Internal: true, File: false},
}
}
func (b *Build) GetDefaultFeatureFlagsVariables() JobVariables {
variables := make(JobVariables, 0)
for _, featureFlag := range featureflags.GetAll() {
variables = append(variables, JobVariable{
Key: featureFlag.Name,
Value: featureFlag.DefaultValue,
Public: true,
Internal: true,
File: false,
})
}
return variables
}
func (b *Build) GetSharedEnvVariable() JobVariable {
env := JobVariable{Value: "true", Public: true, Internal: true, File: false}
if b.IsSharedEnv() {
env.Key = "CI_SHARED_ENVIRONMENT"
} else {
env.Key = "CI_DISPOSABLE_ENVIRONMENT"
}
return env
}
func (b *Build) GetTLSVariables(caFile, certFile, keyFile string) JobVariables {
variables := JobVariables{}
if b.TLSCAChain != "" {
variables = append(variables, JobVariable{
Key: caFile,
Value: b.TLSCAChain,
Public: true,
Internal: true,
File: true,
})
}
if b.TLSAuthCert != "" && b.TLSAuthKey != "" {
variables = append(variables, JobVariable{
Key: certFile,
Value: b.TLSAuthCert,
Public: true,
Internal: true,
File: true,
})
variables = append(variables, JobVariable{
Key: keyFile,
Value: b.TLSAuthKey,
Internal: true,
File: true,
})
}
return variables
}
func (b *Build) GetCITLSVariables() JobVariables {
return b.GetTLSVariables(tls.VariableCAFile, tls.VariableCertFile, tls.VariableKeyFile)
}
func (b *Build) IsSharedEnv() bool {
return b.ExecutorFeatures.Shared
}
func (b *Build) refreshAllVariables() {
b.allVariables = nil
}
func (b *Build) GetAllVariables() JobVariables {
if b.allVariables != nil {
return b.allVariables
}
variables := make(JobVariables, 0)
variables = append(variables, b.GetDefaultFeatureFlagsVariables()...)
if b.Runner != nil {
variables = append(variables, b.Runner.GetVariables()...)
}
variables = append(variables, b.GetDefaultVariables()...)
variables = append(variables, b.GetCITLSVariables()...)
variables = append(variables, b.Variables...)
variables = append(variables, b.GetSharedEnvVariable())
variables = append(variables, AppVersion.Variables()...)
b.allVariables = variables.Expand()
return b.allVariables
}
// GetRemoteURL checks if the default clone URL is overwritten by the runner
// configuration option: 'CloneURL'. If it is, we use that to create the clone
// URL.
func (b *Build) GetRemoteURL() string {
cloneURL := strings.TrimRight(b.Runner.CloneURL, "/")
if !strings.HasPrefix(cloneURL, "http") {
return b.GitInfo.RepoURL
}
variables := b.GetAllVariables()
ciJobToken := variables.Get("CI_JOB_TOKEN")
ciProjectPath := variables.Get("CI_PROJECT_PATH")
splits := strings.SplitAfterN(cloneURL, "://", 2)
return fmt.Sprintf("%sgitlab-ci-token:%s@%s/%s.git", splits[0], ciJobToken, splits[1], ciProjectPath)
}
func (b *Build) GetGitStrategy() GitStrategy {
switch b.GetAllVariables().Get("GIT_STRATEGY") {
case "clone":
return GitClone
case "fetch":
return GitFetch
case "none":
return GitNone
default:
if b.AllowGitFetch {
return GitFetch
}
return GitClone
}
}
func (b *Build) GetGitCheckout() bool {
if b.GetGitStrategy() == GitNone {
return false
}
strCheckout := b.GetAllVariables().Get("GIT_CHECKOUT")
if len(strCheckout) == 0 {
return true
}
checkout, err := strconv.ParseBool(strCheckout)
if err != nil {
return true
}
return checkout
}
func (b *Build) GetSubmoduleStrategy() SubmoduleStrategy {
if b.GetGitStrategy() == GitNone {
return SubmoduleNone
}
switch b.GetAllVariables().Get("GIT_SUBMODULE_STRATEGY") {
case "normal":
return SubmoduleNormal
case "recursive":
return SubmoduleRecursive
case "none", "":
// Default (legacy) behavior is to not update/init submodules
return SubmoduleNone
default:
// Will cause an error in AbstractShell) writeSubmoduleUpdateCmds
return SubmoduleInvalid
}
}
func (b *Build) GetGitCleanFlags() []string {
flags := b.GetAllVariables().Get("GIT_CLEAN_FLAGS")
if flags == "" {
flags = gitCleanFlagsDefault
}
if flags == gitCleanFlagsNone {
return []string{}
}
return strings.Fields(flags)
}
func (b *Build) IsDebugTraceEnabled() bool {
trace, err := strconv.ParseBool(b.GetAllVariables().Get("CI_DEBUG_TRACE"))
if err != nil {
trace = false
}
if b.Runner.DebugTraceDisabled {
if trace == true {
b.logger.Warningln("CI_DEBUG_TRACE usage is disabled on this Runner")
}
return false
}
return trace
}
func (b *Build) GetDockerAuthConfig() string {
return b.GetAllVariables().Get("DOCKER_AUTH_CONFIG")
}
func (b *Build) GetGetSourcesAttempts() int {
retries, err := strconv.Atoi(b.GetAllVariables().Get("GET_SOURCES_ATTEMPTS"))
if err != nil {
return DefaultGetSourcesAttempts
}
return retries
}
func (b *Build) GetDownloadArtifactsAttempts() int {
retries, err := strconv.Atoi(b.GetAllVariables().Get("ARTIFACT_DOWNLOAD_ATTEMPTS"))
if err != nil {
return DefaultArtifactDownloadAttempts
}
return retries
}
func (b *Build) GetRestoreCacheAttempts() int {
retries, err := strconv.Atoi(b.GetAllVariables().Get("RESTORE_CACHE_ATTEMPTS"))
if err != nil {
return DefaultRestoreCacheAttempts
}
return retries
}
func (b *Build) GetCacheRequestTimeout() int {
timeout, err := strconv.Atoi(b.GetAllVariables().Get("CACHE_REQUEST_TIMEOUT"))
if err != nil {
return DefaultCacheRequestTimeout
}
return timeout
}
func (b *Build) Duration() time.Duration {
return time.Since(b.createdAt)
}
func NewBuild(jobData JobResponse, runnerConfig *RunnerConfig, systemInterrupt chan os.Signal, executorData ExecutorData) (*Build, error) {
// Attempt to perform a deep copy of the RunnerConfig
runnerConfigCopy, err := runnerConfig.DeepCopy()
if err != nil {
return nil, fmt.Errorf("deep copy of runner config failed: %w", err)
}
return &Build{
JobResponse: jobData,
Runner: runnerConfigCopy,
SystemInterrupt: systemInterrupt,
ExecutorData: executorData,
createdAt: time.Now(),
}, nil
}
func (b *Build) IsFeatureFlagOn(name string) bool {
value := b.GetAllVariables().Get(name)
on, err := featureflags.IsOn(value)
if err != nil {
logrus.WithError(err).
WithField("name", name).
WithField("value", value).
Error("Error while parsing the value of feature flag")
return false
}
return on
}
func (b *Build) IsLFSSmudgeDisabled() bool {
disabled, err := strconv.ParseBool(b.GetAllVariables().Get("GIT_LFS_SKIP_SMUDGE"))
if err != nil {
return false
}
return disabled
}
package common
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/url"
)
type BuildLogger struct {
log JobTrace
entry *logrus.Entry
}
func (e *BuildLogger) WithFields(fields logrus.Fields) BuildLogger {
return NewBuildLogger(e.log, e.entry.WithFields(fields))
}
func (e *BuildLogger) SendRawLog(args ...interface{}) {
if e.log != nil {
fmt.Fprint(e.log, args...)
}
}
func (e *BuildLogger) sendLog(logger func(args ...interface{}), logPrefix string, args ...interface{}) {
if e.log != nil {
logLine := url_helpers.ScrubSecrets(logPrefix + fmt.Sprintln(args...))
e.SendRawLog(logLine)
e.SendRawLog(helpers.ANSI_RESET)
if e.log.IsStdout() {
return
}
}
if len(args) == 0 {
return
}
logger(args...)
}
func (e *BuildLogger) WriterLevel(level logrus.Level) *io.PipeWriter {
return e.entry.WriterLevel(level)
}
func (e *BuildLogger) Debugln(args ...interface{}) {
if e.entry == nil {
return
}
e.entry.Debugln(args...)
}
func (e *BuildLogger) Println(args ...interface{}) {
if e.entry == nil {
return
}
e.sendLog(e.entry.Debugln, helpers.ANSI_CLEAR, args...)
}
func (e *BuildLogger) Infoln(args ...interface{}) {
if e.entry == nil {
return
}
e.sendLog(e.entry.Println, helpers.ANSI_BOLD_GREEN, args...)
}
func (e *BuildLogger) Warningln(args ...interface{}) {
if e.entry == nil {
return
}
e.sendLog(e.entry.Warningln, helpers.ANSI_YELLOW+"WARNING: ", args...)
}
func (e *BuildLogger) SoftErrorln(args ...interface{}) {
if e.entry == nil {
return
}
e.sendLog(e.entry.Warningln, helpers.ANSI_BOLD_RED+"ERROR: ", args...)
}
func (e *BuildLogger) Errorln(args ...interface{}) {
if e.entry == nil {
return
}
e.sendLog(e.entry.Errorln, helpers.ANSI_BOLD_RED+"ERROR: ", args...)
}
func NewBuildLogger(log JobTrace, entry *logrus.Entry) BuildLogger {
return BuildLogger{
log: log,
entry: entry,
}
}
package common
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"gitlab.com/ayufan/golang-cli-helpers"
)
var commands []cli.Command
type Commander interface {
Execute(c *cli.Context)
}
func RegisterCommand(command cli.Command) {
logrus.Debugln("Registering", command.Name, "command...")
commands = append(commands, command)
}
func RegisterCommand2(name, usage string, data Commander, flags ...cli.Flag) {
RegisterCommand(cli.Command{
Name: name,
Usage: usage,
Action: data.Execute,
Flags: append(flags, clihelpers.GetFlagsFromStruct(data)...),
})
}
func GetCommands() []cli.Command {
return commands
}
package common
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"os"
"path/filepath"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/docker/go-units"
"github.com/sirupsen/logrus"
api "k8s.io/api/core/v1"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
"gitlab.com/gitlab-org/gitlab-runner/helpers/timeperiod"
"gitlab.com/gitlab-org/gitlab-runner/referees"
)
type DockerPullPolicy string
type DockerSysCtls map[string]string
const (
PullPolicyAlways = "always"
PullPolicyNever = "never"
PullPolicyIfNotPresent = "if-not-present"
)
// Get returns one of the predefined values or returns an error if the value can't match the predefined
func (p DockerPullPolicy) Get() (DockerPullPolicy, error) {
// Default policy is always
if p == "" {
return PullPolicyAlways, nil
}
// Verify pull policy
if p != PullPolicyNever &&
p != PullPolicyIfNotPresent &&
p != PullPolicyAlways {
return "", fmt.Errorf("unsupported docker-pull-policy: %v", p)
}
return p, nil
}
type DockerConfig struct {
docker_helpers.DockerCredentials
Hostname string `toml:"hostname,omitempty" json:"hostname" long:"hostname" env:"DOCKER_HOSTNAME" description:"Custom container hostname"`
Image string `toml:"image" json:"image" long:"image" env:"DOCKER_IMAGE" description:"Docker image to be used"`
Runtime string `toml:"runtime,omitempty" json:"runtime" long:"runtime" env:"DOCKER_RUNTIME" description:"Docker runtime to be used"`
Memory string `toml:"memory,omitempty" json:"memory" long:"memory" env:"DOCKER_MEMORY" description:"Memory limit (format: <number>[<unit>]). Unit can be one of b, k, m, or g. Minimum is 4M."`
MemorySwap string `toml:"memory_swap,omitempty" json:"memory_swap" long:"memory-swap" env:"DOCKER_MEMORY_SWAP" description:"Total memory limit (memory + swap, format: <number>[<unit>]). Unit can be one of b, k, m, or g."`
MemoryReservation string `toml:"memory_reservation,omitempty" json:"memory_reservation" long:"memory-reservation" env:"DOCKER_MEMORY_RESERVATION" description:"Memory soft limit (format: <number>[<unit>]). Unit can be one of b, k, m, or g."`
CPUSetCPUs string `toml:"cpuset_cpus,omitempty" json:"cpuset_cpus" long:"cpuset-cpus" env:"DOCKER_CPUSET_CPUS" description:"String value containing the cgroups CpusetCpus to use"`
CPUS string `toml:"cpus,omitempty" json:"cpus" long:"cpus" env:"DOCKER_CPUS" description:"Number of CPUs"`
CPUShares int64 `toml:"cpu_shares,omitzero" json:"cpu_shares" long:"cpu-shares" env:"DOCKER_CPU_SHARES" description:"Number of CPU shares"`
DNS []string `toml:"dns,omitempty" json:"dns" long:"dns" env:"DOCKER_DNS" description:"A list of DNS servers for the container to use"`
DNSSearch []string `toml:"dns_search,omitempty" json:"dns_search" long:"dns-search" env:"DOCKER_DNS_SEARCH" description:"A list of DNS search domains"`
Privileged bool `toml:"privileged,omitzero" json:"privileged" long:"privileged" env:"DOCKER_PRIVILEGED" description:"Give extended privileges to container"`
DisableEntrypointOverwrite bool `toml:"disable_entrypoint_overwrite,omitzero" json:"disable_entrypoint_overwrite" long:"disable-entrypoint-overwrite" env:"DOCKER_DISABLE_ENTRYPOINT_OVERWRITE" description:"Disable the possibility for a container to overwrite the default image entrypoint"`
UsernsMode string `toml:"userns_mode,omitempty" json:"userns_mode" long:"userns" env:"DOCKER_USERNS_MODE" description:"User namespace to use"`
CapAdd []string `toml:"cap_add" json:"cap_add" long:"cap-add" env:"DOCKER_CAP_ADD" description:"Add Linux capabilities"`
CapDrop []string `toml:"cap_drop" json:"cap_drop" long:"cap-drop" env:"DOCKER_CAP_DROP" description:"Drop Linux capabilities"`
OomKillDisable bool `toml:"oom_kill_disable,omitzero" json:"oom_kill_disable" long:"oom-kill-disable" env:"DOCKER_OOM_KILL_DISABLE" description:"Do not kill processes in a container if an out-of-memory (OOM) error occurs"`
OomScoreAdjust int `toml:"oom_score_adjust,omitzero" json:"oom_score_adjust" long:"oom-score-adjust" env:"DOCKER_OOM_SCORE_ADJUST" description:"Adjust OOM score"`
SecurityOpt []string `toml:"security_opt" json:"security_opt" long:"security-opt" env:"DOCKER_SECURITY_OPT" description:"Security Options"`
Devices []string `toml:"devices" json:"devices" long:"devices" env:"DOCKER_DEVICES" description:"Add a host device to the container"`
DisableCache bool `toml:"disable_cache,omitzero" json:"disable_cache" long:"disable-cache" env:"DOCKER_DISABLE_CACHE" description:"Disable all container caching"`
Volumes []string `toml:"volumes,omitempty" json:"volumes" long:"volumes" env:"DOCKER_VOLUMES" description:"Bind-mount a volume and create it if it doesn't exist prior to mounting. Can be specified multiple times once per mountpoint, e.g. --docker-volumes 'test0:/test0' --docker-volumes 'test1:/test1'"`
VolumeDriver string `toml:"volume_driver,omitempty" json:"volume_driver" long:"volume-driver" env:"DOCKER_VOLUME_DRIVER" description:"Volume driver to be used"`
CacheDir string `toml:"cache_dir,omitempty" json:"cache_dir" long:"cache-dir" env:"DOCKER_CACHE_DIR" description:"Directory where to store caches"`
ExtraHosts []string `toml:"extra_hosts,omitempty" json:"extra_hosts" long:"extra-hosts" env:"DOCKER_EXTRA_HOSTS" description:"Add a custom host-to-IP mapping"`
VolumesFrom []string `toml:"volumes_from,omitempty" json:"volumes_from" long:"volumes-from" env:"DOCKER_VOLUMES_FROM" description:"A list of volumes to inherit from another container"`
NetworkMode string `toml:"network_mode,omitempty" json:"network_mode" long:"network-mode" env:"DOCKER_NETWORK_MODE" description:"Add container to a custom network"`
Links []string `toml:"links,omitempty" json:"links" long:"links" env:"DOCKER_LINKS" description:"Add link to another container"`
Services []*DockerService `toml:"services,omitempty"`
WaitForServicesTimeout int `toml:"wait_for_services_timeout,omitzero" json:"wait_for_services_timeout" long:"wait-for-services-timeout" env:"DOCKER_WAIT_FOR_SERVICES_TIMEOUT" description:"How long to wait for service startup"`
AllowedImages []string `toml:"allowed_images,omitempty" json:"allowed_images" long:"allowed-images" env:"DOCKER_ALLOWED_IMAGES" description:"Whitelist allowed images"`
AllowedServices []string `toml:"allowed_services,omitempty" json:"allowed_services" long:"allowed-services" env:"DOCKER_ALLOWED_SERVICES" description:"Whitelist allowed services"`
PullPolicy DockerPullPolicy `toml:"pull_policy,omitempty" json:"pull_policy" long:"pull-policy" env:"DOCKER_PULL_POLICY" description:"Image pull policy: never, if-not-present, always"`
ShmSize int64 `toml:"shm_size,omitempty" json:"shm_size" long:"shm-size" env:"DOCKER_SHM_SIZE" description:"Shared memory size for docker images (in bytes)"`
Tmpfs map[string]string `toml:"tmpfs,omitempty" json:"tmpfs" long:"tmpfs" env:"DOCKER_TMPFS" description:"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in the main container, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command"`
ServicesTmpfs map[string]string `toml:"services_tmpfs,omitempty" json:"services_tmpfs" long:"services-tmpfs" env:"DOCKER_SERVICES_TMPFS" description:"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in all the service containers, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command"`
SysCtls DockerSysCtls `toml:"sysctls,omitempty" json:"sysctls" long:"sysctls" env:"DOCKER_SYSCTLS" description:"Sysctl options, a toml table/json object of key=value. Value is expected to be a string."`
HelperImage string `toml:"helper_image,omitempty" json:"helper_image" long:"helper-image" env:"DOCKER_HELPER_IMAGE" description:"[ADVANCED] Override the default helper image used to clone repos and upload artifacts"`
}
type DockerMachine struct {
IdleCount int `long:"idle-nodes" env:"MACHINE_IDLE_COUNT" description:"Maximum idle machines"`
IdleTime int `toml:"IdleTime,omitzero" long:"idle-time" env:"MACHINE_IDLE_TIME" description:"Minimum time after node can be destroyed"`
MaxBuilds int `toml:"MaxBuilds,omitzero" long:"max-builds" env:"MACHINE_MAX_BUILDS" description:"Maximum number of builds processed by machine"`
MachineDriver string `long:"machine-driver" env:"MACHINE_DRIVER" description:"The driver to use when creating machine"`
MachineName string `long:"machine-name" env:"MACHINE_NAME" description:"The template for machine name (needs to include %s)"`
MachineOptions []string `long:"machine-options" env:"MACHINE_OPTIONS" description:"Additional machine creation options"`
OffPeakPeriods []string `long:"off-peak-periods" env:"MACHINE_OFF_PEAK_PERIODS" description:"Time periods when the scheduler is in the OffPeak mode"`
OffPeakTimezone string `long:"off-peak-timezone" env:"MACHINE_OFF_PEAK_TIMEZONE" description:"Timezone for the OffPeak periods (defaults to Local)"`
OffPeakIdleCount int `long:"off-peak-idle-count" env:"MACHINE_OFF_PEAK_IDLE_COUNT" description:"Maximum idle machines when the scheduler is in the OffPeak mode"`
OffPeakIdleTime int `long:"off-peak-idle-time" env:"MACHINE_OFF_PEAK_IDLE_TIME" description:"Minimum time after machine can be destroyed when the scheduler is in the OffPeak mode"`
offPeakTimePeriods *timeperiod.TimePeriod
}
type ParallelsConfig struct {
BaseName string `toml:"base_name" json:"base_name" long:"base-name" env:"PARALLELS_BASE_NAME" description:"VM name to be used"`
TemplateName string `toml:"template_name,omitempty" json:"template_name" long:"template-name" env:"PARALLELS_TEMPLATE_NAME" description:"VM template to be created"`
DisableSnapshots bool `toml:"disable_snapshots,omitzero" json:"disable_snapshots" long:"disable-snapshots" env:"PARALLELS_DISABLE_SNAPSHOTS" description:"Disable snapshoting to speedup VM creation"`
TimeServer string `toml:"time_server,omitempty" json:"time_server" long:"time-server" env:"PARALLELS_TIME_SERVER" description:"Timeserver to sync the guests time from. Defaults to time.apple.com"`
}
type VirtualBoxConfig struct {
BaseName string `toml:"base_name" json:"base_name" long:"base-name" env:"VIRTUALBOX_BASE_NAME" description:"VM name to be used"`
BaseSnapshot string `toml:"base_snapshot,omitempty" json:"base_snapshot" long:"base-snapshot" env:"VIRTUALBOX_BASE_SNAPSHOT" description:"Name or UUID of a specific VM snapshot to clone"`
DisableSnapshots bool `toml:"disable_snapshots,omitzero" json:"disable_snapshots" long:"disable-snapshots" env:"VIRTUALBOX_DISABLE_SNAPSHOTS" description:"Disable snapshoting to speedup VM creation"`
}
type CustomConfig struct {
ConfigExec string `toml:"config_exec,omitempty" json:"config_exec" long:"config-exec" env:"CUSTOM_CONFIG_EXEC" description:"Executable that allows to inject configuration values to the executor"`
ConfigArgs []string `toml:"config_args,omitempty" json:"config_args" long:"config-args" description:"Arguments for the config executable"`
ConfigExecTimeout *int `toml:"config_exec_timeout,omitempty" json:"config_exec_timeout" long:"config-exec-timeout" env:"CUSTOM_CONFIG_EXEC_TIMEOUT" description:"Timeout for the config executable (in seconds)"`
PrepareExec string `toml:"prepare_exec,omitempty" json:"prepare_exec" long:"prepare-exec" env:"CUSTOM_PREPARE_EXEC" description:"Executable that prepares executor"`
PrepareArgs []string `toml:"prepare_args,omitempty" json:"prepare_args" long:"prepare-args" description:"Arguments for the prepare executable"`
PrepareExecTimeout *int `toml:"prepare_exec_timeout,omitempty" json:"prepare_exec_timeout" long:"prepare-exec-timeout" env:"CUSTOM_PREPARE_EXEC_TIMEOUT" description:"Timeout for the prepare executable (in seconds)"`
RunExec string `toml:"run_exec" json:"run_exec" long:"run-exec" env:"CUSTOM_RUN_EXEC" description:"Executable that runs the job script in executor"`
RunArgs []string `toml:"run_args,omitempty" json:"run_args" long:"run-args" description:"Arguments for the run executable"`
CleanupExec string `toml:"cleanup_exec,omitempty" json:"cleanup_exec" long:"cleanup-exec" env:"CUSTOM_CLEANUP_EXEC" description:"Executable that cleanups after executor run"`
CleanupArgs []string `toml:"cleanup_args,omitempty" json:"cleanup_args" long:"cleanup-args" description:"Arguments for the cleanup executable"`
CleanupExecTimeout *int `toml:"cleanup_exec_timeout,omitempty" json:"cleanup_exec_timeout" long:"cleanup-exec-timeout" env:"CUSTOM_CLEANUP_EXEC_TIMEOUT" description:"Timeout for the cleanup executable (in seconds)"`
GracefulKillTimeout *int `toml:"graceful_kill_timeout,omitempty" json:"graceful_kill_timeout" long:"graceful-kill-timeout" env:"CUSTOM_GRACEFUL_KILL_TIMEOUT" description:"Graceful timeout for scripts execution after SIGTERM is sent to the process (in seconds). This limits the time given for scripts to perform the cleanup before exiting"`
ForceKillTimeout *int `toml:"force_kill_timeout,omitempty" json:"force_kill_timeout" long:"force-kill-timeout" env:"CUSTOM_FORCE_KILL_TIMEOUT" description:"Force timeout for scripts execution (in seconds). Counted from the force kill call; if process will be not terminated, Runner will abandon process termination and log an error"`
}
type KubernetesPullPolicy string
// Get returns one of the predefined values in kubernetes notation or returns an error if the value can't match the predefined
func (p KubernetesPullPolicy) Get() (KubernetesPullPolicy, error) {
switch {
case p == "":
return "", nil
case p == PullPolicyAlways:
return "Always", nil
case p == PullPolicyNever:
return "Never", nil
case p == PullPolicyIfNotPresent:
return "IfNotPresent", nil
}
return "", fmt.Errorf("unsupported kubernetes-pull-policy: %v", p)
}
type KubernetesConfig struct {
Host string `toml:"host" json:"host" long:"host" env:"KUBERNETES_HOST" description:"Optional Kubernetes master host URL (auto-discovery attempted if not specified)"`
CertFile string `toml:"cert_file,omitempty" json:"cert_file" long:"cert-file" env:"KUBERNETES_CERT_FILE" description:"Optional Kubernetes master auth certificate"`
KeyFile string `toml:"key_file,omitempty" json:"key_file" long:"key-file" env:"KUBERNETES_KEY_FILE" description:"Optional Kubernetes master auth private key"`
CAFile string `toml:"ca_file,omitempty" json:"ca_file" long:"ca-file" env:"KUBERNETES_CA_FILE" description:"Optional Kubernetes master auth ca certificate"`
BearerTokenOverwriteAllowed bool `toml:"bearer_token_overwrite_allowed" json:"bearer_token_overwrite_allowed" long:"bearer_token_overwrite_allowed" env:"KUBERNETES_BEARER_TOKEN_OVERWRITE_ALLOWED" description:"Bool to authorize builds to specify their own bearer token for creation."`
BearerToken string `toml:"bearer_token,omitempty" json:"bearer_token" long:"bearer_token" env:"KUBERNETES_BEARER_TOKEN" description:"Optional Kubernetes service account token used to start build pods."`
Image string `toml:"image" json:"image" long:"image" env:"KUBERNETES_IMAGE" description:"Default docker image to use for builds when none is specified"`
Namespace string `toml:"namespace" json:"namespace" long:"namespace" env:"KUBERNETES_NAMESPACE" description:"Namespace to run Kubernetes jobs in"`
NamespaceOverwriteAllowed string `toml:"namespace_overwrite_allowed" json:"namespace_overwrite_allowed" long:"namespace_overwrite_allowed" env:"KUBERNETES_NAMESPACE_OVERWRITE_ALLOWED" description:"Regex to validate 'KUBERNETES_NAMESPACE_OVERWRITE' value"`
Privileged bool `toml:"privileged,omitzero" json:"privileged" long:"privileged" env:"KUBERNETES_PRIVILEGED" description:"Run all containers with the privileged flag enabled"`
CPULimit string `toml:"cpu_limit,omitempty" json:"cpu_limit" long:"cpu-limit" env:"KUBERNETES_CPU_LIMIT" description:"The CPU allocation given to build containers"`
MemoryLimit string `toml:"memory_limit,omitempty" json:"memory_limit" long:"memory-limit" env:"KUBERNETES_MEMORY_LIMIT" description:"The amount of memory allocated to build containers"`
ServiceCPULimit string `toml:"service_cpu_limit,omitempty" json:"service_cpu_limit" long:"service-cpu-limit" env:"KUBERNETES_SERVICE_CPU_LIMIT" description:"The CPU allocation given to build service containers"`
ServiceMemoryLimit string `toml:"service_memory_limit,omitempty" json:"service_memory_limit" long:"service-memory-limit" env:"KUBERNETES_SERVICE_MEMORY_LIMIT" description:"The amount of memory allocated to build service containers"`
HelperCPULimit string `toml:"helper_cpu_limit,omitempty" json:"helper_cpu_limit" long:"helper-cpu-limit" env:"KUBERNETES_HELPER_CPU_LIMIT" description:"The CPU allocation given to build helper containers"`
HelperMemoryLimit string `toml:"helper_memory_limit,omitempty" json:"helper_memory_limit" long:"helper-memory-limit" env:"KUBERNETES_HELPER_MEMORY_LIMIT" description:"The amount of memory allocated to build helper containers"`
CPURequest string `toml:"cpu_request,omitempty" json:"cpu_request" long:"cpu-request" env:"KUBERNETES_CPU_REQUEST" description:"The CPU allocation requested for build containers"`
MemoryRequest string `toml:"memory_request,omitempty" json:"memory_request" long:"memory-request" env:"KUBERNETES_MEMORY_REQUEST" description:"The amount of memory requested from build containers"`
ServiceCPURequest string `toml:"service_cpu_request,omitempty" json:"service_cpu_request" long:"service-cpu-request" env:"KUBERNETES_SERVICE_CPU_REQUEST" description:"The CPU allocation requested for build service containers"`
ServiceMemoryRequest string `toml:"service_memory_request,omitempty" json:"service_memory_request" long:"service-memory-request" env:"KUBERNETES_SERVICE_MEMORY_REQUEST" description:"The amount of memory requested for build service containers"`
HelperCPURequest string `toml:"helper_cpu_request,omitempty" json:"helper_cpu_request" long:"helper-cpu-request" env:"KUBERNETES_HELPER_CPU_REQUEST" description:"The CPU allocation requested for build helper containers"`
HelperMemoryRequest string `toml:"helper_memory_request,omitempty" json:"helper_memory_request" long:"helper-memory-request" env:"KUBERNETES_HELPER_MEMORY_REQUEST" description:"The amount of memory requested for build helper containers"`
PullPolicy KubernetesPullPolicy `toml:"pull_policy,omitempty" json:"pull_policy" long:"pull-policy" env:"KUBERNETES_PULL_POLICY" description:"Policy for if/when to pull a container image (never, if-not-present, always). The cluster default will be used if not set"`
NodeSelector map[string]string `toml:"node_selector,omitempty" json:"node_selector" long:"node-selector" env:"KUBERNETES_NODE_SELECTOR" description:"A toml table/json object of key=value. Value is expected to be a string. When set this will create pods on k8s nodes that match all the key=value pairs."`
NodeTolerations map[string]string `toml:"node_tolerations,omitempty" json:"node_tolerations" long:"node-tolerations" env:"KUBERNETES_NODE_TOLERATIONS" description:"A toml table/json object of key=value:effect. Value and effect are expected to be strings. When set, pods will tolerate the given taints. Only one toleration is supported through environment variable configuration."`
ImagePullSecrets []string `toml:"image_pull_secrets,omitempty" json:"image_pull_secrets" long:"image-pull-secrets" env:"KUBERNETES_IMAGE_PULL_SECRETS" description:"A list of image pull secrets that are used for pulling docker image"`
HelperImage string `toml:"helper_image,omitempty" json:"helper_image" long:"helper-image" env:"KUBERNETES_HELPER_IMAGE" description:"[ADVANCED] Override the default helper image used to clone repos and upload artifacts"`
TerminationGracePeriodSeconds int64 `toml:"terminationGracePeriodSeconds,omitzero" json:"terminationGracePeriodSeconds" long:"terminationGracePeriodSeconds" env:"KUBERNETES_TERMINATIONGRACEPERIODSECONDS" description:"Duration after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal."`
PollInterval int `toml:"poll_interval,omitzero" json:"poll_interval" long:"poll-interval" env:"KUBERNETES_POLL_INTERVAL" description:"How frequently, in seconds, the runner will poll the Kubernetes pod it has just created to check its status"`
PollTimeout int `toml:"poll_timeout,omitzero" json:"poll_timeout" long:"poll-timeout" env:"KUBERNETES_POLL_TIMEOUT" description:"The total amount of time, in seconds, that needs to pass before the runner will timeout attempting to connect to the pod it has just created (useful for queueing more builds that the cluster can handle at a time)"`
PodLabels map[string]string `toml:"pod_labels,omitempty" json:"pod_labels" long:"pod-labels" description:"A toml table/json object of key-value. Value is expected to be a string. When set, this will create pods with the given pod labels. Environment variables will be substituted for values here."`
ServiceAccount string `toml:"service_account,omitempty" json:"service_account" long:"service-account" env:"KUBERNETES_SERVICE_ACCOUNT" description:"Executor pods will use this Service Account to talk to kubernetes API"`
ServiceAccountOverwriteAllowed string `toml:"service_account_overwrite_allowed" json:"service_account_overwrite_allowed" long:"service_account_overwrite_allowed" env:"KUBERNETES_SERVICE_ACCOUNT_OVERWRITE_ALLOWED" description:"Regex to validate 'KUBERNETES_SERVICE_ACCOUNT' value"`
PodAnnotations map[string]string `toml:"pod_annotations,omitempty" json:"pod_annotations" long:"pod-annotations" description:"A toml table/json object of key-value. Value is expected to be a string. When set, this will create pods with the given annotations. Can be overwritten in build with KUBERNETES_POD_ANNOTATION_* variables"`
PodAnnotationsOverwriteAllowed string `toml:"pod_annotations_overwrite_allowed" json:"pod_annotations_overwrite_allowed" long:"pod_annotations_overwrite_allowed" env:"KUBERNETES_POD_ANNOTATIONS_OVERWRITE_ALLOWED" description:"Regex to validate 'KUBERNETES_POD_ANNOTATIONS_*' values"`
PodSecurityContext KubernetesPodSecurityContext `toml:"pod_security_context,omitempty" namespace:"pod-security-context" description:"A security context attached to each build pod"`
Volumes KubernetesVolumes `toml:"volumes"`
Services []Service `toml:"services,omitempty" json:"services" long:"services" description:"Add service that is started with container"`
}
type KubernetesVolumes struct {
HostPaths []KubernetesHostPath `toml:"host_path" description:"The host paths which will be mounted"`
PVCs []KubernetesPVC `toml:"pvc" description:"The persistent volume claims that will be mounted"`
ConfigMaps []KubernetesConfigMap `toml:"config_map" description:"The config maps which will be mounted as volumes"`
Secrets []KubernetesSecret `toml:"secret" description:"The secret maps which will be mounted"`
EmptyDirs []KubernetesEmptyDir `toml:"empty_dir" description:"The empty dirs which will be mounted"`
}
type KubernetesConfigMap struct {
Name string `toml:"name" json:"name" description:"The name of the volume and ConfigMap to use"`
MountPath string `toml:"mount_path" description:"Path where volume should be mounted inside of container"`
ReadOnly bool `toml:"read_only,omitempty" description:"If this volume should be mounted read only"`
Items map[string]string `toml:"items,omitempty" description:"Key-to-path mapping for keys from the config map that should be used."`
}
type KubernetesHostPath struct {
Name string `toml:"name" json:"name" description:"The name of the volume"`
MountPath string `toml:"mount_path" description:"Path where volume should be mounted inside of container"`
ReadOnly bool `toml:"read_only,omitempty" description:"If this volume should be mounted read only"`
HostPath string `toml:"host_path,omitempty" description:"Path from the host that should be mounted as a volume"`
}
type KubernetesPVC struct {
Name string `toml:"name" json:"name" description:"The name of the volume and PVC to use"`
MountPath string `toml:"mount_path" description:"Path where volume should be mounted inside of container"`
ReadOnly bool `toml:"read_only,omitempty" description:"If this volume should be mounted read only"`
}
type KubernetesSecret struct {
Name string `toml:"name" json:"name" description:"The name of the volume and Secret to use"`
MountPath string `toml:"mount_path" description:"Path where volume should be mounted inside of container"`
ReadOnly bool `toml:"read_only,omitempty" description:"If this volume should be mounted read only"`
Items map[string]string `toml:"items,omitempty" description:"Key-to-path mapping for keys from the secret that should be used."`
}
type KubernetesEmptyDir struct {
Name string `toml:"name" json:"name" description:"The name of the volume and EmptyDir to use"`
MountPath string `toml:"mount_path" description:"Path where volume should be mounted inside of container"`
Medium string `toml:"medium,omitempty" description:"Set to 'Memory' to have a tmpfs"`
}
type KubernetesPodSecurityContext struct {
FSGroup *int64 `toml:"fs_group,omitempty" long:"fs-group" env:"KUBERNETES_POD_SECURITY_CONTEXT_FS_GROUP" description:"A special supplemental group that applies to all containers in a pod"`
RunAsGroup *int64 `toml:"run_as_group,omitempty" long:"run-as-group" env:"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_GROUP" description:"The GID to run the entrypoint of the container process"`
RunAsNonRoot *bool `toml:"run_as_non_root,omitempty" long:"run-as-non-root" env:"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_NON_ROOT" description:"Indicates that the container must run as a non-root user"`
RunAsUser *int64 `toml:"run_as_user,omitempty" long:"run-as-user" env:"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_USER" description:"The UID to run the entrypoint of the container process"`
SupplementalGroups []int64 `toml:"supplemental_groups,omitempty" long:"supplemental-groups" description:"A list of groups applied to the first process run in each container, in addition to the container's primary GID"`
}
type DockerService struct {
Service
Alias string `toml:"alias,omitempty" long:"alias" description:"The alias of the service"`
}
func (s *DockerService) ToImageDefinition() Image {
return Image{
Name: s.Name,
Alias: s.Alias,
}
}
// TODO: Remove in 13.0
// we should fallback to the default toml parsing
func (s *DockerService) UnmarshalTOML(data interface{}) error {
switch v := data.(type) {
case string:
logrus.Warning("Setting runners.docker.services as array is deprecated and will be removed in 13.0. " +
"Please use the array of tables syntax instead. More info at " +
"[https://docs.gitlab.com/runner/executors/docker.html#define-image-and-services-in-configtoml].")
s.Name = v
return nil
case map[string]interface{}:
name, err := tryGetTomlValue(v, "name")
if err != nil {
return err
}
alias, err := tryGetTomlValue(v, "alias")
if err != nil {
return err
}
s.Name = name
s.Alias = alias
return nil
}
return fmt.Errorf("toml: type mismatch for config.DockerService: expected table but found %T", data)
}
func tryGetTomlValue(data map[string]interface{}, key string) (string, error) {
value, ok := data[key]
if !ok {
return "", nil
}
switch v := value.(type) {
case string:
return v, nil
}
return "", fmt.Errorf("toml: cannot load TOML value of type %T into a Go string", value)
}
type Service struct {
Name string `toml:"name" long:"name" description:"The image path for the service"`
}
type RunnerCredentials struct {
URL string `toml:"url" json:"url" short:"u" long:"url" env:"CI_SERVER_URL" required:"true" description:"Runner URL"`
Token string `toml:"token" json:"token" short:"t" long:"token" env:"CI_SERVER_TOKEN" required:"true" description:"Runner token"`
TLSCAFile string `toml:"tls-ca-file,omitempty" json:"tls-ca-file" long:"tls-ca-file" env:"CI_SERVER_TLS_CA_FILE" description:"File containing the certificates to verify the peer when using HTTPS"`
TLSCertFile string `toml:"tls-cert-file,omitempty" json:"tls-cert-file" long:"tls-cert-file" env:"CI_SERVER_TLS_CERT_FILE" description:"File containing certificate for TLS client auth when using HTTPS"`
TLSKeyFile string `toml:"tls-key-file,omitempty" json:"tls-key-file" long:"tls-key-file" env:"CI_SERVER_TLS_KEY_FILE" description:"File containing private key for TLS client auth when using HTTPS"`
}
type CacheGCSCredentials struct {
AccessID string `toml:"AccessID,omitempty" long:"access-id" env:"CACHE_GCS_ACCESS_ID" description:"ID of GCP Service Account used to access the storage"`
PrivateKey string `toml:"PrivateKey,omitempty" long:"private-key" env:"CACHE_GCS_PRIVATE_KEY" description:"Private key used to sign GCS requests"`
}
type CacheGCSConfig struct {
CacheGCSCredentials
CredentialsFile string `toml:"CredentialsFile,omitempty" long:"credentials-file" env:"GOOGLE_APPLICATION_CREDENTIALS" description:"File with GCP credentials, containing AccessID and PrivateKey"`
BucketName string `toml:"BucketName,omitempty" long:"bucket-name" env:"CACHE_GCS_BUCKET_NAME" description:"Name of the bucket where cache will be stored"`
}
type CacheS3Config struct {
ServerAddress string `toml:"ServerAddress,omitempty" long:"server-address" env:"CACHE_S3_SERVER_ADDRESS" description:"A host:port to the used S3-compatible server"`
AccessKey string `toml:"AccessKey,omitempty" long:"access-key" env:"CACHE_S3_ACCESS_KEY" description:"S3 Access Key"`
SecretKey string `toml:"SecretKey,omitempty" long:"secret-key" env:"CACHE_S3_SECRET_KEY" description:"S3 Secret Key"`
BucketName string `toml:"BucketName,omitempty" long:"bucket-name" env:"CACHE_S3_BUCKET_NAME" description:"Name of the bucket where cache will be stored"`
BucketLocation string `toml:"BucketLocation,omitempty" long:"bucket-location" env:"CACHE_S3_BUCKET_LOCATION" description:"Name of S3 region"`
Insecure bool `toml:"Insecure,omitempty" long:"insecure" env:"CACHE_S3_INSECURE" description:"Use insecure mode (without https)"`
}
type CacheConfig struct {
Type string `toml:"Type,omitempty" long:"type" env:"CACHE_TYPE" description:"Select caching method"`
Path string `toml:"Path,omitempty" long:"path" env:"CACHE_PATH" description:"Name of the path to prepend to the cache URL"`
Shared bool `toml:"Shared,omitempty" long:"shared" env:"CACHE_SHARED" description:"Enable cache sharing between runners."`
S3 *CacheS3Config `toml:"s3,omitempty" json:"s3" namespace:"s3"`
GCS *CacheGCSConfig `toml:"gcs,omitempty" json:"gcs" namespace:"gcs"`
}
type RunnerSettings struct {
Executor string `toml:"executor" json:"executor" long:"executor" env:"RUNNER_EXECUTOR" required:"true" description:"Select executor, eg. shell, docker, etc."`
BuildsDir string `toml:"builds_dir,omitempty" json:"builds_dir" long:"builds-dir" env:"RUNNER_BUILDS_DIR" description:"Directory where builds are stored"`
CacheDir string `toml:"cache_dir,omitempty" json:"cache_dir" long:"cache-dir" env:"RUNNER_CACHE_DIR" description:"Directory where build cache is stored"`
CloneURL string `toml:"clone_url,omitempty" json:"clone_url" long:"clone-url" env:"CLONE_URL" description:"Overwrite the default URL used to clone or fetch the git ref"`
Environment []string `toml:"environment,omitempty" json:"environment" long:"env" env:"RUNNER_ENV" description:"Custom environment variables injected to build environment"`
PreCloneScript string `toml:"pre_clone_script,omitempty" json:"pre_clone_script" long:"pre-clone-script" env:"RUNNER_PRE_CLONE_SCRIPT" description:"Runner-specific command script executed before code is pulled"`
PreBuildScript string `toml:"pre_build_script,omitempty" json:"pre_build_script" long:"pre-build-script" env:"RUNNER_PRE_BUILD_SCRIPT" description:"Runner-specific command script executed after code is pulled, just before build executes"`
PostBuildScript string `toml:"post_build_script,omitempty" json:"post_build_script" long:"post-build-script" env:"RUNNER_POST_BUILD_SCRIPT" description:"Runner-specific command script executed after code is pulled and just after build executes"`
DebugTraceDisabled bool `toml:"debug_trace_disabled,omitempty" json:"debug_trace_disabled" long:"debug-trace-disabled" env:"RUNNER_DEBUG_TRACE_DISABLED" description:"When set to true Runner will disable the possibility of using the CI_DEBUG_TRACE feature"`
Shell string `toml:"shell,omitempty" json:"shell" long:"shell" env:"RUNNER_SHELL" description:"Select bash, cmd or powershell"`
CustomBuildDir *CustomBuildDir `toml:"custom_build_dir,omitempty" json:"custom_build_dir" group:"custom build dir configuration" namespace:"custom_build_dir"`
Referees *referees.Config `toml:"referees,omitempty" json:"referees" group:"referees configuration" namespace:"referees"`
Cache *CacheConfig `toml:"cache,omitempty" json:"cache" group:"cache configuration" namespace:"cache"`
SSH *ssh.Config `toml:"ssh,omitempty" json:"ssh" group:"ssh executor" namespace:"ssh"`
Docker *DockerConfig `toml:"docker,omitempty" json:"docker" group:"docker executor" namespace:"docker"`
Parallels *ParallelsConfig `toml:"parallels,omitempty" json:"parallels" group:"parallels executor" namespace:"parallels"`
VirtualBox *VirtualBoxConfig `toml:"virtualbox,omitempty" json:"virtualbox" group:"virtualbox executor" namespace:"virtualbox"`
Machine *DockerMachine `toml:"machine,omitempty" json:"machine" group:"docker machine provider" namespace:"machine"`
Kubernetes *KubernetesConfig `toml:"kubernetes,omitempty" json:"kubernetes" group:"kubernetes executor" namespace:"kubernetes"`
Custom *CustomConfig `toml:"custom,omitempty" json:"custom" group:"custom executor" namespace:"custom"`
}
type RunnerConfig struct {
Name string `toml:"name" json:"name" short:"name" long:"description" env:"RUNNER_NAME" description:"Runner name"`
Limit int `toml:"limit,omitzero" json:"limit" long:"limit" env:"RUNNER_LIMIT" description:"Maximum number of builds processed by this runner"`
OutputLimit int `toml:"output_limit,omitzero" long:"output-limit" env:"RUNNER_OUTPUT_LIMIT" description:"Maximum build trace size in kilobytes"`
RequestConcurrency int `toml:"request_concurrency,omitzero" long:"request-concurrency" env:"RUNNER_REQUEST_CONCURRENCY" description:"Maximum concurrency for job requests"`
RunnerCredentials
RunnerSettings
}
type SessionServer struct {
ListenAddress string `toml:"listen_address,omitempty" json:"listen_address" description:"Address that the runner will communicate directly with"`
AdvertiseAddress string `toml:"advertise_address,omitempty" json:"advertise_address" description:"Address the runner will expose to the world to connect to the session server"`
SessionTimeout int `toml:"session_timeout,omitempty" json:"session_timeout" description:"How long a terminal session can be active after a build completes, in seconds"`
}
type Config struct {
ListenAddress string `toml:"listen_address,omitempty" json:"listen_address"`
SessionServer SessionServer `toml:"session_server,omitempty" json:"session_server"`
Concurrent int `toml:"concurrent" json:"concurrent"`
CheckInterval int `toml:"check_interval" json:"check_interval" description:"Define active checking interval of jobs"`
LogLevel *string `toml:"log_level" json:"log_level" description:"Define log level (one of: panic, fatal, error, warning, info, debug)"`
LogFormat *string `toml:"log_format" json:"log_format" description:"Define log format (one of: runner, text, json)"`
User string `toml:"user,omitempty" json:"user"`
Runners []*RunnerConfig `toml:"runners" json:"runners"`
SentryDSN *string `toml:"sentry_dsn"`
ModTime time.Time `toml:"-"`
Loaded bool `toml:"-"`
}
type CustomBuildDir struct {
Enabled bool `toml:"enabled,omitempty" json:"enabled" long:"enabled" env:"CUSTOM_BUILD_DIR_ENABLED" description:"Enable job specific build directories"`
}
func (c *CacheS3Config) ShouldUseIAMCredentials() bool {
return c.ServerAddress == "" || c.AccessKey == "" || c.SecretKey == ""
}
func (c *CacheConfig) GetPath() string {
return c.Path
}
func (c *CacheConfig) GetShared() bool {
return c.Shared
}
func (c *SessionServer) GetSessionTimeout() time.Duration {
if c.SessionTimeout > 0 {
return time.Duration(c.SessionTimeout) * time.Second
}
return DefaultSessionTimeout
}
func (c *DockerConfig) GetNanoCPUs() (int64, error) {
if c.CPUS == "" {
return 0, nil
}
cpu, ok := new(big.Rat).SetString(c.CPUS)
if !ok {
return 0, fmt.Errorf("failed to parse %v as a rational number", c.CPUS)
}
nano, _ := cpu.Mul(cpu, big.NewRat(1e9, 1)).Float64()
return int64(nano), nil
}
func (c *DockerConfig) getMemoryBytes(size string, fieldName string) int64 {
if size == "" {
return 0
}
bytes, err := units.RAMInBytes(size)
if err != nil {
logrus.Fatalf("Error parsing docker %s: %s", fieldName, err)
}
return bytes
}
func (c *DockerConfig) GetMemory() int64 {
return c.getMemoryBytes(c.Memory, "memory")
}
func (c *DockerConfig) GetMemorySwap() int64 {
return c.getMemoryBytes(c.MemorySwap, "memory_swap")
}
func (c *DockerConfig) GetMemoryReservation() int64 {
return c.getMemoryBytes(c.MemoryReservation, "memory_reservation")
}
func (c *DockerConfig) GetOomKillDisable() *bool {
return &c.OomKillDisable
}
func (c *KubernetesConfig) GetPollAttempts() int {
if c.PollTimeout <= 0 {
c.PollTimeout = KubernetesPollTimeout
}
return c.PollTimeout / c.GetPollInterval()
}
func (c *KubernetesConfig) GetPollInterval() int {
if c.PollInterval <= 0 {
c.PollInterval = KubernetesPollInterval
}
return c.PollInterval
}
func (c *KubernetesConfig) GetNodeTolerations() []api.Toleration {
var tolerations []api.Toleration
for toleration, effect := range c.NodeTolerations {
newToleration := api.Toleration{
Effect: api.TaintEffect(effect),
}
if strings.Contains(toleration, "=") {
parts := strings.Split(toleration, "=")
newToleration.Key = parts[0]
if len(parts) > 1 {
newToleration.Value = parts[1]
}
newToleration.Operator = api.TolerationOpEqual
} else {
newToleration.Key = toleration
newToleration.Operator = api.TolerationOpExists
}
tolerations = append(tolerations, newToleration)
}
return tolerations
}
func (c *KubernetesConfig) GetPodSecurityContext() *api.PodSecurityContext {
podSecurityContext := c.PodSecurityContext
if podSecurityContext.FSGroup == nil &&
podSecurityContext.RunAsGroup == nil &&
podSecurityContext.RunAsNonRoot == nil &&
podSecurityContext.RunAsUser == nil &&
len(podSecurityContext.SupplementalGroups) == 0 {
return nil
}
return &api.PodSecurityContext{
FSGroup: podSecurityContext.FSGroup,
RunAsGroup: podSecurityContext.RunAsGroup,
RunAsNonRoot: podSecurityContext.RunAsNonRoot,
RunAsUser: podSecurityContext.RunAsUser,
SupplementalGroups: podSecurityContext.SupplementalGroups,
}
}
func (c *DockerMachine) GetIdleCount() int {
if c.isOffPeak() {
return c.OffPeakIdleCount
}
return c.IdleCount
}
func (c *DockerMachine) GetIdleTime() int {
if c.isOffPeak() {
return c.OffPeakIdleTime
}
return c.IdleTime
}
func (c *DockerMachine) isOffPeak() bool {
if c.offPeakTimePeriods == nil {
c.CompileOffPeakPeriods()
}
return c.offPeakTimePeriods != nil && c.offPeakTimePeriods.InPeriod()
}
func (c *DockerMachine) CompileOffPeakPeriods() (err error) {
c.offPeakTimePeriods, err = timeperiod.TimePeriods(c.OffPeakPeriods, c.OffPeakTimezone)
if err != nil {
err = fmt.Errorf("invalid OffPeakPeriods value: %w", err)
}
return
}
func (c *RunnerCredentials) GetURL() string {
return c.URL
}
func (c *RunnerCredentials) GetTLSCAFile() string {
return c.TLSCAFile
}
func (c *RunnerCredentials) GetTLSCertFile() string {
return c.TLSCertFile
}
func (c *RunnerCredentials) GetTLSKeyFile() string {
return c.TLSKeyFile
}
func (c *RunnerCredentials) GetToken() string {
return c.Token
}
func (c *RunnerCredentials) ShortDescription() string {
return helpers.ShortenToken(c.Token)
}
func (c *RunnerCredentials) UniqueID() string {
return c.URL + c.Token
}
func (c *RunnerCredentials) Log() *logrus.Entry {
if c.ShortDescription() != "" {
return logrus.WithField("runner", c.ShortDescription())
}
return logrus.WithFields(logrus.Fields{})
}
func (c *RunnerCredentials) SameAs(other *RunnerCredentials) bool {
return c.URL == other.URL && c.Token == other.Token
}
func (c *RunnerConfig) String() string {
return fmt.Sprintf("%v url=%v token=%v executor=%v", c.Name, c.URL, c.Token, c.Executor)
}
func (c *RunnerConfig) GetRequestConcurrency() int {
if c.RequestConcurrency <= 0 {
return 1
}
return c.RequestConcurrency
}
func (c *RunnerConfig) GetVariables() JobVariables {
variables := JobVariables{
{Key: "CI_RUNNER_SHORT_TOKEN", Value: c.ShortDescription(), Public: true, Internal: true, File: false},
}
for _, environment := range c.Environment {
if variable, err := ParseVariable(environment); err == nil {
variable.Internal = true
variables = append(variables, variable)
}
}
return variables
}
// DeepCopy attempts to make a deep clone of the object
func (c *RunnerConfig) DeepCopy() (*RunnerConfig, error) {
var r RunnerConfig
bytes, err := json.Marshal(c)
if err != nil {
return nil, fmt.Errorf("serialization of runner config failed: %w", err)
}
err = json.Unmarshal(bytes, &r)
if err != nil {
return nil, fmt.Errorf("deserialization of runner config failed: %w", err)
}
return &r, err
}
func NewConfig() *Config {
return &Config{
Concurrent: 1,
SessionServer: SessionServer{
SessionTimeout: int(DefaultSessionTimeout.Seconds()),
},
}
}
func (c *Config) StatConfig(configFile string) error {
_, err := os.Stat(configFile)
if err != nil {
return err
}
return nil
}
func (c *Config) LoadConfig(configFile string) error {
info, err := os.Stat(configFile)
// permission denied is soft error
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
if _, err = toml.DecodeFile(configFile, c); err != nil {
return err
}
for _, runner := range c.Runners {
if runner.Machine == nil {
continue
}
err := runner.Machine.CompileOffPeakPeriods()
if err != nil {
return err
}
}
c.ModTime = info.ModTime()
c.Loaded = true
return nil
}
func (c *Config) SaveConfig(configFile string) error {
var newConfig bytes.Buffer
newBuffer := bufio.NewWriter(&newConfig)
if err := toml.NewEncoder(newBuffer).Encode(c); err != nil {
logrus.Fatalf("Error encoding TOML: %s", err)
return err
}
if err := newBuffer.Flush(); err != nil {
return err
}
// create directory to store configuration
err := os.MkdirAll(filepath.Dir(configFile), 0700)
if err != nil {
return err
}
// write config file
if err := ioutil.WriteFile(configFile, newConfig.Bytes(), 0600); err != nil {
return err
}
c.Loaded = true
return nil
}
func (c *Config) GetCheckInterval() time.Duration {
if c.CheckInterval > 0 {
return time.Duration(c.CheckInterval) * time.Second
}
return CheckInterval
}
package common
import (
"context"
"errors"
"fmt"
"github.com/sirupsen/logrus"
)
type ExecutorData interface{}
type ExecutorCommand struct {
Script string
Stage BuildStage
Predefined bool
Context context.Context
}
type ExecutorStage string
const (
ExecutorStageCreated ExecutorStage = "created"
ExecutorStagePrepare ExecutorStage = "prepare"
ExecutorStageFinish ExecutorStage = "finish"
ExecutorStageCleanup ExecutorStage = "cleanup"
)
type ExecutorPrepareOptions struct {
Config *RunnerConfig
Build *Build
Trace JobTrace
User string
Context context.Context
}
type Executor interface {
Shell() *ShellScriptInfo
Prepare(options ExecutorPrepareOptions) error
Run(cmd ExecutorCommand) error
Finish(err error)
Cleanup()
GetCurrentStage() ExecutorStage
SetCurrentStage(stage ExecutorStage)
}
type ExecutorProvider interface {
CanCreate() bool
Create() Executor
Acquire(config *RunnerConfig) (ExecutorData, error)
Release(config *RunnerConfig, data ExecutorData)
GetFeatures(features *FeaturesInfo) error
GetDefaultShell() string
}
type BuildError struct {
Inner error
FailureReason JobFailureReason
}
func (b *BuildError) Error() string {
if b.Inner == nil {
return "error"
}
return b.Inner.Error()
}
func MakeBuildError(format string, args ...interface{}) error {
return &BuildError{
Inner: fmt.Errorf(format, args...),
}
}
var executors map[string]ExecutorProvider
func validateExecutorProvider(provider ExecutorProvider) error {
if provider.GetDefaultShell() == "" {
return errors.New("default shell not implemented")
}
if !provider.CanCreate() {
return errors.New("cannot create executor")
}
if err := provider.GetFeatures(&FeaturesInfo{}); err != nil {
return fmt.Errorf("cannot get features: %w", err)
}
return nil
}
func RegisterExecutor(executor string, provider ExecutorProvider) {
logrus.Debugln("Registering", executor, "executor...")
if err := validateExecutorProvider(provider); err != nil {
panic("Executor cannot be registered: " + err.Error())
}
if executors == nil {
executors = make(map[string]ExecutorProvider)
}
if _, ok := executors[executor]; ok {
panic("Executor already exist: " + executor)
}
executors[executor] = provider
}
func GetExecutor(executor string) ExecutorProvider {
if executors == nil {
return nil
}
provider, _ := executors[executor]
return provider
}
func GetExecutors() []string {
names := []string{}
if executors != nil {
for name := range executors {
names = append(names, name)
}
}
return names
}
func GetExecutorProviders() (providers []ExecutorProvider) {
if executors != nil {
for _, executorProvider := range executors {
providers = append(providers, executorProvider)
}
}
return
}
func NewExecutor(executor string) Executor {
provider := GetExecutor(executor)
if provider != nil {
return provider.Create()
}
return nil
}
package common
import (
"context"
"fmt"
"io"
"time"
url_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/url"
)
type UpdateState int
type UploadState int
type DownloadState int
type JobState string
type JobFailureReason string
const (
Pending JobState = "pending"
Running JobState = "running"
Failed JobState = "failed"
Success JobState = "success"
)
const (
NoneFailure JobFailureReason = ""
ScriptFailure JobFailureReason = "script_failure"
RunnerSystemFailure JobFailureReason = "runner_system_failure"
JobExecutionTimeout JobFailureReason = "job_execution_timeout"
)
const (
UpdateSucceeded UpdateState = iota
UpdateNotFound
UpdateAbort
UpdateFailed
UpdateRangeMismatch
)
const (
UploadSucceeded UploadState = iota
UploadTooLarge
UploadForbidden
UploadFailed
)
const (
DownloadSucceeded DownloadState = iota
DownloadForbidden
DownloadFailed
DownloadNotFound
)
type FeaturesInfo struct {
Variables bool `json:"variables"`
Image bool `json:"image"`
Services bool `json:"services"`
Artifacts bool `json:"artifacts"`
Cache bool `json:"cache"`
Shared bool `json:"shared"`
UploadMultipleArtifacts bool `json:"upload_multiple_artifacts"`
UploadRawArtifacts bool `json:"upload_raw_artifacts"`
Session bool `json:"session"`
Terminal bool `json:"terminal"`
Refspecs bool `json:"refspecs"`
Masking bool `json:"masking"`
Proxy bool `json:"proxy"`
}
type RegisterRunnerParameters struct {
Description string `json:"description,omitempty"`
Tags string `json:"tag_list,omitempty"`
RunUntagged bool `json:"run_untagged"`
Locked bool `json:"locked"`
AccessLevel string `json:"access_level,omitempty"`
MaximumTimeout int `json:"maximum_timeout,omitempty"`
Active bool `json:"active"`
}
type RegisterRunnerRequest struct {
RegisterRunnerParameters
Info VersionInfo `json:"info,omitempty"`
Token string `json:"token,omitempty"`
}
type RegisterRunnerResponse struct {
Token string `json:"token,omitempty"`
}
type VerifyRunnerRequest struct {
Token string `json:"token,omitempty"`
}
type UnregisterRunnerRequest struct {
Token string `json:"token,omitempty"`
}
type VersionInfo struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Revision string `json:"revision,omitempty"`
Platform string `json:"platform,omitempty"`
Architecture string `json:"architecture,omitempty"`
Executor string `json:"executor,omitempty"`
Shell string `json:"shell,omitempty"`
Features FeaturesInfo `json:"features"`
}
type JobRequest struct {
Info VersionInfo `json:"info,omitempty"`
Token string `json:"token,omitempty"`
LastUpdate string `json:"last_update,omitempty"`
Session *SessionInfo `json:"session,omitempty"`
}
type SessionInfo struct {
URL string `json:"url,omitempty"`
Certificate string `json:"certificate,omitempty"`
Authorization string `json:"authorization,omitempty"`
}
type JobInfo struct {
Name string `json:"name"`
Stage string `json:"stage"`
ProjectID int `json:"project_id"`
ProjectName string `json:"project_name"`
}
type GitInfoRefType string
const (
RefTypeBranch GitInfoRefType = "branch"
RefTypeTag GitInfoRefType = "tag"
)
type GitInfo struct {
RepoURL string `json:"repo_url"`
Ref string `json:"ref"`
Sha string `json:"sha"`
BeforeSha string `json:"before_sha"`
RefType GitInfoRefType `json:"ref_type"`
Refspecs []string `json:"refspecs"`
Depth int `json:"depth"`
}
type RunnerInfo struct {
Timeout int `json:"timeout"`
}
type StepScript []string
type StepName string
const (
StepNameScript StepName = "script"
StepNameAfterScript StepName = "after_script"
)
type StepWhen string
const (
StepWhenOnFailure StepWhen = "on_failure"
StepWhenOnSuccess StepWhen = "on_success"
StepWhenAlways StepWhen = "always"
)
type CachePolicy string
const (
CachePolicyUndefined CachePolicy = ""
CachePolicyPullPush CachePolicy = "pull-push"
CachePolicyPull CachePolicy = "pull"
CachePolicyPush CachePolicy = "push"
)
type Step struct {
Name StepName `json:"name"`
Script StepScript `json:"script"`
Timeout int `json:"timeout"`
When StepWhen `json:"when"`
AllowFailure bool `json:"allow_failure"`
}
type Steps []Step
type Image struct {
Name string `json:"name"`
Alias string `json:"alias,omitempty"`
Command []string `json:"command,omitempty"`
Entrypoint []string `json:"entrypoint,omitempty"`
Ports []Port `json:"ports,omitempty"`
}
type Port struct {
Number int `json:"number,omitempty"`
Protocol string `json:"protocol,omitempty"`
Name string `json:"name,omitempty"`
}
type Services []Image
type ArtifactPaths []string
type ArtifactWhen string
const (
ArtifactWhenOnFailure ArtifactWhen = "on_failure"
ArtifactWhenOnSuccess ArtifactWhen = "on_success"
ArtifactWhenAlways ArtifactWhen = "always"
)
func (when ArtifactWhen) OnSuccess() bool {
return when == "" || when == ArtifactWhenOnSuccess || when == ArtifactWhenAlways
}
func (when ArtifactWhen) OnFailure() bool {
return when == ArtifactWhenOnFailure || when == ArtifactWhenAlways
}
type ArtifactFormat string
const (
ArtifactFormatDefault ArtifactFormat = ""
ArtifactFormatZip ArtifactFormat = "zip"
ArtifactFormatGzip ArtifactFormat = "gzip"
ArtifactFormatRaw ArtifactFormat = "raw"
)
type Artifact struct {
Name string `json:"name"`
Untracked bool `json:"untracked"`
Paths ArtifactPaths `json:"paths"`
When ArtifactWhen `json:"when"`
Type string `json:"artifact_type"`
Format ArtifactFormat `json:"artifact_format"`
ExpireIn string `json:"expire_in"`
}
type Artifacts []Artifact
type Cache struct {
Key string `json:"key"`
Untracked bool `json:"untracked"`
Policy CachePolicy `json:"policy"`
Paths ArtifactPaths `json:"paths"`
}
func (c Cache) CheckPolicy(wanted CachePolicy) (bool, error) {
switch c.Policy {
case CachePolicyUndefined, CachePolicyPullPush:
return true, nil
case CachePolicyPull, CachePolicyPush:
return wanted == c.Policy, nil
}
return false, fmt.Errorf("Unknown cache policy %s", c.Policy)
}
type Caches []Cache
type Credentials struct {
Type string `json:"type"`
URL string `json:"url"`
Username string `json:"username"`
Password string `json:"password"`
}
type DependencyArtifactsFile struct {
Filename string `json:"filename"`
Size int64 `json:"size"`
}
type Dependency struct {
ID int `json:"id"`
Token string `json:"token"`
Name string `json:"name"`
ArtifactsFile DependencyArtifactsFile `json:"artifacts_file"`
}
type Dependencies []Dependency
type GitlabFeatures struct {
TraceSections bool `json:"trace_sections"`
}
type JobResponse struct {
ID int `json:"id"`
Token string `json:"token"`
AllowGitFetch bool `json:"allow_git_fetch"`
JobInfo JobInfo `json:"job_info"`
GitInfo GitInfo `json:"git_info"`
RunnerInfo RunnerInfo `json:"runner_info"`
Variables JobVariables `json:"variables"`
Steps Steps `json:"steps"`
Image Image `json:"image"`
Services Services `json:"services"`
Artifacts Artifacts `json:"artifacts"`
Cache Caches `json:"cache"`
Credentials []Credentials `json:"credentials"`
Dependencies Dependencies `json:"dependencies"`
Features GitlabFeatures `json:"features"`
TLSCAChain string `json:"-"`
TLSAuthCert string `json:"-"`
TLSAuthKey string `json:"-"`
}
func (j *JobResponse) RepoCleanURL() string {
return url_helpers.CleanURL(j.GitInfo.RepoURL)
}
type UpdateJobRequest struct {
Info VersionInfo `json:"info,omitempty"`
Token string `json:"token,omitempty"`
State JobState `json:"state,omitempty"`
FailureReason JobFailureReason `json:"failure_reason,omitempty"`
}
type JobCredentials struct {
ID int `long:"id" env:"CI_JOB_ID" description:"The build ID to upload artifacts for"`
Token string `long:"token" env:"CI_JOB_TOKEN" required:"true" description:"Build token"`
URL string `long:"url" env:"CI_SERVER_URL" required:"true" description:"GitLab CI URL"`
TLSCAFile string `long:"tls-ca-file" env:"CI_SERVER_TLS_CA_FILE" description:"File containing the certificates to verify the peer when using HTTPS"`
TLSCertFile string `long:"tls-cert-file" env:"CI_SERVER_TLS_CERT_FILE" description:"File containing certificate for TLS client auth with runner when using HTTPS"`
TLSKeyFile string `long:"tls-key-file" env:"CI_SERVER_TLS_KEY_FILE" description:"File containing private key for TLS client auth with runner when using HTTPS"`
}
func (j *JobCredentials) GetURL() string {
return j.URL
}
func (j *JobCredentials) GetTLSCAFile() string {
return j.TLSCAFile
}
func (j *JobCredentials) GetTLSCertFile() string {
return j.TLSCertFile
}
func (j *JobCredentials) GetTLSKeyFile() string {
return j.TLSKeyFile
}
func (j *JobCredentials) GetToken() string {
return j.Token
}
type UpdateJobInfo struct {
ID int
State JobState
FailureReason JobFailureReason
}
type ArtifactsOptions struct {
BaseName string
ExpireIn string
Format ArtifactFormat
Type string
}
type FailuresCollector interface {
RecordFailure(reason JobFailureReason, runnerDescription string)
}
type JobTrace interface {
io.Writer
Success()
Fail(err error, failureReason JobFailureReason)
SetCancelFunc(cancelFunc context.CancelFunc)
SetFailuresCollector(fc FailuresCollector)
SetMasked(values []string)
IsStdout() bool
}
type PatchTraceResult struct {
SentOffset int
State UpdateState
NewUpdateInterval time.Duration
}
func NewPatchTraceResult(sentOffset int, state UpdateState, newUpdateInterval int) PatchTraceResult {
return PatchTraceResult{
SentOffset: sentOffset,
State: state,
NewUpdateInterval: time.Duration(newUpdateInterval) * time.Second,
}
}
type Network interface {
RegisterRunner(config RunnerCredentials, parameters RegisterRunnerParameters) *RegisterRunnerResponse
VerifyRunner(config RunnerCredentials) bool
UnregisterRunner(config RunnerCredentials) bool
RequestJob(config RunnerConfig, sessionInfo *SessionInfo) (*JobResponse, bool)
UpdateJob(config RunnerConfig, jobCredentials *JobCredentials, jobInfo UpdateJobInfo) UpdateState
PatchTrace(config RunnerConfig, jobCredentials *JobCredentials, content []byte, startOffset int) PatchTraceResult
DownloadArtifacts(config JobCredentials, artifactsFile string) DownloadState
UploadRawArtifacts(config JobCredentials, reader io.Reader, options ArtifactsOptions) UploadState
ProcessJob(config RunnerConfig, buildCredentials *JobCredentials) (JobTrace, error)
}
package common
import (
"fmt"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
type ShellConfiguration struct {
Environment []string
DockerCommand []string
Command string
Arguments []string
PassFile bool
Extension string
}
type ShellType int
const (
NormalShell ShellType = iota
LoginShell
)
func (s *ShellConfiguration) GetCommandWithArguments() []string {
parts := []string{s.Command}
parts = append(parts, s.Arguments...)
return parts
}
func (s *ShellConfiguration) String() string {
return helpers.ToYAML(s)
}
type ShellScriptInfo struct {
Shell string
Build *Build
Type ShellType
User string
RunnerCommand string
PreCloneScript string
PreBuildScript string
PostBuildScript string
}
type Shell interface {
GetName() string
GetFeatures(features *FeaturesInfo)
IsDefault() bool
GetConfiguration(info ShellScriptInfo) (*ShellConfiguration, error)
GenerateScript(buildStage BuildStage, info ShellScriptInfo) (string, error)
}
var shells map[string]Shell
func RegisterShell(shell Shell) {
logrus.Debugln("Registering", shell.GetName(), "shell...")
if shells == nil {
shells = make(map[string]Shell)
}
if shells[shell.GetName()] != nil {
panic("Shell already exist: " + shell.GetName())
}
shells[shell.GetName()] = shell
}
func GetShell(shell string) Shell {
if shells == nil {
return nil
}
return shells[shell]
}
func GetShells() []string {
names := []string{}
if shells != nil {
for name := range shells {
names = append(names, name)
}
}
return names
}
func GetShellConfiguration(info ShellScriptInfo) (*ShellConfiguration, error) {
shell := GetShell(info.Shell)
if shell == nil {
return nil, fmt.Errorf("shell %s not found", info.Shell)
}
return shell.GetConfiguration(info)
}
func GenerateShellScript(buildStage BuildStage, info ShellScriptInfo) (string, error) {
shell := GetShell(info.Shell)
if shell == nil {
return "", fmt.Errorf("shell %s not found", info.Shell)
}
return shell.GenerateScript(buildStage, info)
}
func GetDefaultShell() string {
if shells == nil {
panic("no shells defined")
}
for _, shell := range shells {
if shell.IsDefault() {
return shell.GetName()
}
}
panic("no default shell defined")
}
package common
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"os"
"path"
"runtime"
"strings"
"time"
"github.com/tevino/abool"
)
const (
repoRemoteURL = "https://gitlab.com/gitlab-org/ci-cd/tests/gitlab-test.git"
repoRefType = RefTypeBranch
repoSHA = "91956efe32fb7bef54f378d90c9bd74c19025872"
repoBeforeSHA = "ca50079dac5293292f83a4d454922ba8db44e7a3"
repoRefName = "master"
repoLFSSHA = "2371dd05e426fca09b0d2ec5d9ed757559035e2f"
repoLFSBeforeSHA = "91956efe32fb7bef54f378d90c9bd74c19025872"
repoLFSRefName = "add-lfs-object"
repoSubmoduleLFSSHA = "d0cb7ff49b5c4fcf159e860fd6b30ef40731c435"
repoSubmoduleLFSBeforeSHA = "dcbc4f0c93cb1731eeac4e3a70a55a991838e137"
repoSubmoduleLFSRefName = "add-lfs-submodule"
FilesLFSFile1LFSsize = int64(2097152)
)
var (
gitLabComChain string
gitLabComChainFetched *abool.AtomicBool
)
func init() {
gitLabComChainFetched = abool.New()
}
func GetGitInfo(url string) GitInfo {
return GitInfo{
RepoURL: url,
Sha: repoSHA,
BeforeSha: repoBeforeSHA,
Ref: repoRefName,
RefType: repoRefType,
Refspecs: []string{"+refs/heads/*:refs/origin/heads/*", "+refs/tags/*:refs/tags/*"},
}
}
func GetLFSGitInfo(url string) GitInfo {
return GitInfo{
RepoURL: url,
Sha: repoLFSSHA,
BeforeSha: repoLFSBeforeSHA,
Ref: repoLFSRefName,
RefType: repoRefType,
Refspecs: []string{"+refs/heads/*:refs/origin/heads/*", "+refs/tags/*:refs/tags/*"},
}
}
func GetSubmoduleLFSGitInfo(url string) GitInfo {
return GitInfo{
RepoURL: url,
Sha: repoSubmoduleLFSSHA,
BeforeSha: repoSubmoduleLFSBeforeSHA,
Ref: repoSubmoduleLFSRefName,
RefType: repoRefType,
Refspecs: []string{"+refs/heads/*:refs/origin/heads/*", "+refs/tags/*:refs/tags/*"},
}
}
func GetSuccessfulBuild() (JobResponse, error) {
return GetLocalBuildResponse("echo Hello World")
}
func GetRemoteSuccessfulBuild() (JobResponse, error) {
return GetRemoteBuildResponse("echo Hello World")
}
func GetRemoteSuccessfulLFSBuild() (JobResponse, error) {
response, err := GetRemoteBuildResponse("echo Hello World")
response.GitInfo = GetLFSGitInfo(repoRemoteURL)
return response, err
}
func GetRemoteSuccessfulBuildWithAfterScript() (JobResponse, error) {
jobResponse, err := GetRemoteBuildResponse("echo Hello World")
jobResponse.Steps = append(jobResponse.Steps,
Step{
Name: StepNameAfterScript,
Script: []string{"echo Hello World"},
When: StepWhenAlways,
},
)
return jobResponse, err
}
func GetRemoteSuccessfulBuildWithDumpedVariables() (JobResponse, error) {
variableName := "test_dump"
variableValue := "test"
response, err := GetRemoteBuildResponse(
fmt.Sprintf("[[ \"${%s}\" != \"\" ]]", variableName),
fmt.Sprintf("[[ $(cat $%s) == \"%s\" ]]", variableName, variableValue),
)
if err != nil {
return JobResponse{}, err
}
dumpedVariable := JobVariable{
Key: variableName, Value: variableValue,
Internal: true, Public: true, File: true,
}
response.Variables = append(response.Variables, dumpedVariable)
return response, nil
}
func GetFailedBuild() (JobResponse, error) {
return GetLocalBuildResponse("exit 1")
}
func GetRemoteFailedBuild() (JobResponse, error) {
return GetRemoteBuildResponse("exit 1")
}
func GetLongRunningBuild() (JobResponse, error) {
return GetLocalBuildResponse("sleep 3600")
}
func GetRemoteLongRunningBuild() (JobResponse, error) {
return GetRemoteBuildResponse("sleep 3600")
}
func GetMultilineBashBuild() (JobResponse, error) {
return GetRemoteBuildResponse(`if true; then
echo 'Hello World'
fi
`)
}
func GetMultilineBashBuildPowerShell() (JobResponse, error) {
return GetRemoteBuildResponse("if (0 -eq 0) {\n\recho \"Hello World\"\n\r}")
}
func GetMultilineBashBuildCmd() (JobResponse, error) {
return GetRemoteBuildResponse(`IF 0==0 (
echo Hello World
)`)
}
func GetRemoteBrokenTLSBuild() (JobResponse, error) {
invalidCert, err := buildSnakeOilCert()
if err != nil {
return JobResponse{}, err
}
return getRemoteCustomTLSBuild(invalidCert)
}
func GetRemoteGitLabComTLSBuild() (JobResponse, error) {
cert, err := getGitLabComTLSChain()
if err != nil {
return JobResponse{}, err
}
return getRemoteCustomTLSBuild(cert)
}
func getRemoteCustomTLSBuild(chain string) (JobResponse, error) {
job, err := GetRemoteBuildResponse("echo Hello World")
if err != nil {
return JobResponse{}, err
}
job.TLSCAChain = chain
job.Variables = append(job.Variables,
JobVariable{Key: "GIT_STRATEGY", Value: "clone"},
JobVariable{Key: "GIT_SUBMODULE_STRATEGY", Value: "normal"})
return job, nil
}
func getBuildResponse(repoURL string, commands []string) JobResponse {
return JobResponse{
GitInfo: GetGitInfo(repoURL),
Steps: Steps{
Step{
Name: StepNameScript,
Script: commands,
When: StepWhenAlways,
AllowFailure: false,
},
},
}
}
func GetRemoteBuildResponse(commands ...string) (JobResponse, error) {
return getBuildResponse(repoRemoteURL, commands), nil
}
func GetLocalBuildResponse(commands ...string) (JobResponse, error) {
localRepoURL, err := getLocalRepoURL()
if err != nil {
return JobResponse{}, err
}
return getBuildResponse(localRepoURL, commands), nil
}
func getLocalRepoURL() (string, error) {
_, filename, _, _ := runtime.Caller(0)
directory := path.Dir(filename)
if strings.Contains(directory, "_test/_obj_test") {
pwd, err := os.Getwd()
if err != nil {
return "", err
}
directory = pwd
}
localRepoURL := path.Clean(directory + "/../tmp/gitlab-test/.git")
_, err := os.Stat(localRepoURL)
if err != nil {
return "", err
}
return localRepoURL, nil
}
func buildSnakeOilCert() (string, error) {
priv, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return "", err
}
notBefore := time.Now()
notAfter := notBefore.Add(time.Hour)
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
Organization: []string{"Snake Oil Co"},
},
NotBefore: notBefore,
NotAfter: notAfter,
IsCA: true,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return "", err
}
certificate := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
return string(certificate), nil
}
func getGitLabComTLSChain() (string, error) {
if gitLabComChainFetched.IsSet() {
return gitLabComChain, nil
}
resp, err := http.Head("https://gitlab.com/users/sign_in")
if err != nil {
return "", err
}
var buff bytes.Buffer
for _, certs := range resp.TLS.VerifiedChains {
for _, cert := range certs {
err = pem.Encode(&buff, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
if err != nil {
return "", err
}
}
}
gitLabComChain = buff.String()
gitLabComChainFetched.Set()
return gitLabComChain, nil
}
package common
import (
"context"
"io"
"os"
"sync"
)
type Trace struct {
Writer io.Writer
CancelFunc context.CancelFunc
mutex sync.Mutex
}
func (s *Trace) Write(p []byte) (n int, err error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.Writer == nil {
return 0, os.ErrInvalid
}
return s.Writer.Write(p)
}
func (s *Trace) SetMasked(values []string) {
}
func (s *Trace) Success() {
}
func (s *Trace) Fail(err error, failureReason JobFailureReason) {
}
func (s *Trace) SetCancelFunc(cancelFunc context.CancelFunc) {
s.CancelFunc = cancelFunc
}
func (s *Trace) SetFailuresCollector(fc FailuresCollector) {}
func (s *Trace) IsStdout() bool {
return true
}
package common
import (
"errors"
"fmt"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/helpers/shell"
)
type JobVariable struct {
Key string `json:"key"`
Value string `json:"value"`
Public bool `json:"public"`
Internal bool `json:"-"`
File bool `json:"file"`
Masked bool `json:"masked"`
}
type JobVariables []JobVariable
func (b JobVariable) String() string {
return fmt.Sprintf("%s=%s", b.Key, b.Value)
}
func (b JobVariables) PublicOrInternal() (variables JobVariables) {
for _, variable := range b {
if variable.Public || variable.Internal {
variables = append(variables, variable)
}
}
return variables
}
func (b JobVariables) StringList() (variables []string) {
for _, variable := range b {
variables = append(variables, variable.String())
}
return variables
}
func (b JobVariables) Get(key string) string {
switch key {
case "$":
return key
case "*", "#", "@", "!", "?", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9":
return ""
}
for i := len(b) - 1; i >= 0; i-- {
if b[i].Key == key {
return b[i].Value
}
}
return ""
}
func (b JobVariables) ExpandValue(value string) string {
return shell.LegacyExpand(value, b.Get)
}
func (b JobVariables) Expand() (variables JobVariables) {
for _, variable := range b {
variable.Value = b.ExpandValue(variable.Value)
variables = append(variables, variable)
}
return variables
}
func (b JobVariables) Masked() (masked []string) {
for _, variable := range b {
if variable.Masked {
masked = append(masked, variable.Value)
}
}
return
}
func ParseVariable(text string) (variable JobVariable, err error) {
keyValue := strings.SplitN(text, "=", 2)
if len(keyValue) != 2 {
err = errors.New("missing =")
return
}
variable = JobVariable{
Key: keyValue[0],
Value: keyValue[1],
}
return
}
package common
import (
"fmt"
"runtime"
"github.com/prometheus/client_golang/prometheus"
"github.com/urfave/cli"
)
var NAME = "gitlab-runner"
var VERSION = "development version"
var REVISION = "HEAD"
var BRANCH = "HEAD"
var BUILT = "unknown"
var AppVersion AppVersionInfo
type AppVersionInfo struct {
Name string `json:"name"`
Version string `json:"version"`
Revision string `json:"revision"`
Branch string `json:"branch"`
GOVersion string `json:"go_version"`
BuiltAt string `json:"built_at"`
OS string `json:"os"`
Architecture string `json:"architecture"`
}
func (v *AppVersionInfo) Printer(c *cli.Context) {
fmt.Print(v.Extended())
}
func (v *AppVersionInfo) Line() string {
return fmt.Sprintf("%s %s (%s)", v.Name, v.Version, v.Revision)
}
func (v *AppVersionInfo) ShortLine() string {
return fmt.Sprintf("%s (%s)", v.Version, v.Revision)
}
func (v *AppVersionInfo) UserAgent() string {
return fmt.Sprintf("%s %s (%s; %s; %s/%s)", v.Name, v.Version, v.Branch, v.GOVersion, v.OS, v.Architecture)
}
func (v *AppVersionInfo) Variables() JobVariables {
return JobVariables{
{Key: "CI_RUNNER_VERSION", Value: v.Version, Public: true, Internal: true, File: false},
{Key: "CI_RUNNER_REVISION", Value: v.Revision, Public: true, Internal: true, File: false},
{Key: "CI_RUNNER_EXECUTABLE_ARCH", Value: fmt.Sprintf("%s/%s", v.OS, v.Architecture), Public: true, Internal: true, File: false},
}
}
func (v *AppVersionInfo) Extended() string {
version := fmt.Sprintf("Version: %s\n", v.Version)
version += fmt.Sprintf("Git revision: %s\n", v.Revision)
version += fmt.Sprintf("Git branch: %s\n", v.Branch)
version += fmt.Sprintf("GO version: %s\n", v.GOVersion)
version += fmt.Sprintf("Built: %s\n", v.BuiltAt)
version += fmt.Sprintf("OS/Arch: %s/%s\n", v.OS, v.Architecture)
return version
}
// NewMetricsCollector returns a prometheus.Collector which represents current build information.
func (v *AppVersionInfo) NewMetricsCollector() *prometheus.GaugeVec {
labels := map[string]string{
"name": v.Name,
"version": v.Version,
"revision": v.Revision,
"branch": v.Branch,
"go_version": v.GOVersion,
"built_at": v.BuiltAt,
"os": v.OS,
"architecture": v.Architecture,
}
labelNames := make([]string, 0, len(labels))
for n := range labels {
labelNames = append(labelNames, n)
}
buildInfo := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "gitlab_runner_version_info",
Help: "A metric with a constant '1' value labeled by different build stats fields.",
},
labelNames,
)
buildInfo.With(labels).Set(1)
return buildInfo
}
func init() {
AppVersion = AppVersionInfo{
Name: NAME,
Version: VERSION,
Revision: REVISION,
Branch: BRANCH,
GOVersion: runtime.Version(),
BuiltAt: BUILT,
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
package command
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors/custom/api"
"gitlab.com/gitlab-org/gitlab-runner/executors/custom/process"
)
const (
BuildFailureExitCode = 1
SystemFailureExitCode = 2
)
type CreateOptions struct {
Dir string
Env []string
Stdout io.Writer
Stderr io.Writer
Logger common.BuildLogger
GracefulKillTimeout time.Duration
ForceKillTimeout time.Duration
}
type Command interface {
Run() error
}
type command struct {
context context.Context
cmd commander
waitCh chan error
logger common.BuildLogger
gracefulKillTimeout time.Duration
forceKillTimeout time.Duration
}
func New(ctx context.Context, executable string, args []string, options CreateOptions) Command {
defaultVariables := map[string]string{
"TMPDIR": options.Dir,
api.BuildFailureExitCodeVariable: strconv.Itoa(BuildFailureExitCode),
api.SystemFailureExitCodeVariable: strconv.Itoa(SystemFailureExitCode),
}
env := os.Environ()
for key, value := range defaultVariables {
env = append(env, fmt.Sprintf("%s=%s", key, value))
}
options.Env = append(env, options.Env...)
return &command{
context: ctx,
cmd: newCmd(executable, args, options),
waitCh: make(chan error),
logger: options.Logger,
gracefulKillTimeout: options.GracefulKillTimeout,
forceKillTimeout: options.ForceKillTimeout,
}
}
func (c *command) Run() error {
err := c.cmd.Start()
if err != nil {
return fmt.Errorf("failed to start command: %w", err)
}
go c.waitForCommand()
select {
case err = <-c.waitCh:
return err
case <-c.context.Done():
return c.killAndWait()
}
}
var getExitCode = func(err *exec.ExitError) int {
return err.ExitCode()
}
func (c *command) waitForCommand() {
err := c.cmd.Wait()
eerr, ok := err.(*exec.ExitError)
if ok {
exitCode := getExitCode(eerr)
switch {
case exitCode == BuildFailureExitCode:
err = &common.BuildError{Inner: eerr}
case exitCode != SystemFailureExitCode:
err = &ErrUnknownFailure{Inner: eerr, ExitCode: exitCode}
}
}
c.waitCh <- err
}
var newProcessKiller = process.NewKiller
func (c *command) killAndWait() error {
if c.cmd.Process() == nil {
return errors.New("process not started yet")
}
logger := c.logger.WithFields(logrus.Fields{
"PID": c.cmd.Process().Pid,
})
processKiller := newProcessKiller(logger, c.cmd.Process())
processKiller.Terminate()
select {
case err := <-c.waitCh:
return err
case <-time.After(c.gracefulKillTimeout):
processKiller.ForceKill()
select {
case err := <-c.waitCh:
return err
case <-time.After(c.forceKillTimeout):
return errors.New("failed to kill process, likely process is dormant")
}
}
}
package command
import (
"os"
"os/exec"
)
type commander interface {
Start() error
Wait() error
Process() *os.Process
}
type cmd struct {
internal *exec.Cmd
}
var newCmd = func(executable string, args []string, options CreateOptions) commander {
c := exec.Command(executable, args...)
c.Dir = options.Dir
c.Env = options.Env
c.Stdin = nil
c.Stdout = options.Stdout
c.Stderr = options.Stderr
return &cmd{internal: c}
}
func (c *cmd) Start() error {
return c.internal.Start()
}
func (c *cmd) Wait() error {
return c.internal.Wait()
}
func (c *cmd) Process() *os.Process {
return c.internal.Process
}
package command
import (
"fmt"
)
type ErrUnknownFailure struct {
Inner error
ExitCode int
}
func (e *ErrUnknownFailure) Error() string {
return fmt.Sprintf(
"unknown Custom executor executable exit code %d; executable execution terminated with: %v",
e.ExitCode,
e.Inner,
)
}
package custom
import (
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type config struct {
*common.CustomConfig
}
func (c *config) GetConfigExecTimeout() time.Duration {
return getDuration(c.ConfigExecTimeout, defaultConfigExecTimeout)
}
func (c *config) GetPrepareExecTimeout() time.Duration {
return getDuration(c.PrepareExecTimeout, defaultPrepareExecTimeout)
}
func (c *config) GetCleanupScriptTimeout() time.Duration {
return getDuration(c.CleanupExecTimeout, defaultCleanupExecTimeout)
}
func (c *config) GetGracefulKillTimeout() time.Duration {
return getDuration(c.GracefulKillTimeout, defaultGracefulKillTimeout)
}
func (c *config) GetForceKillTimeout() time.Duration {
return getDuration(c.ForceKillTimeout, defaultForceKillTimeout)
}
func getDuration(source *int, defaultValue time.Duration) time.Duration {
if source == nil {
return defaultValue
}
timeout := *source
if timeout <= 0 {
return defaultValue
}
return time.Duration(timeout) * time.Second
}
package custom
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/executors/custom/api"
"gitlab.com/gitlab-org/gitlab-runner/executors/custom/command"
)
const ciJobImageEnv = "CUSTOM_ENV_CI_JOB_IMAGE"
type commandOutputs struct {
stdout io.Writer
stderr io.Writer
}
type prepareCommandOpts struct {
executable string
args []string
out commandOutputs
}
type ConfigExecOutput struct {
api.ConfigExecOutput
}
func (c *ConfigExecOutput) InjectInto(executor *executor) {
if c.Hostname != nil {
executor.Build.Hostname = *c.Hostname
}
if c.BuildsDir != nil {
executor.Config.BuildsDir = *c.BuildsDir
}
if c.CacheDir != nil {
executor.Config.CacheDir = *c.CacheDir
}
if c.BuildsDirIsShared != nil {
executor.SharedBuildsDir = *c.BuildsDirIsShared
}
executor.driverInfo = c.Driver
}
type executor struct {
executors.AbstractExecutor
config *config
tempDir string
driverInfo *api.DriverInfo
}
func (e *executor) Prepare(options common.ExecutorPrepareOptions) error {
e.AbstractExecutor.PrepareConfiguration(options)
err := e.prepareConfig()
if err != nil {
return err
}
e.tempDir, err = ioutil.TempDir("", "custom-executor")
if err != nil {
return err
}
err = e.dynamicConfig()
if err != nil {
return err
}
e.logStartupMessage()
err = e.AbstractExecutor.PrepareBuildAndShell()
if err != nil {
return err
}
// nothing to do, as there's no prepare_script
if e.config.PrepareExec == "" {
return nil
}
ctx, cancelFunc := context.WithTimeout(e.Context, e.config.GetPrepareExecTimeout())
defer cancelFunc()
opts := prepareCommandOpts{
executable: e.config.PrepareExec,
args: e.config.PrepareArgs,
out: e.defaultCommandOutputs(),
}
return e.prepareCommand(ctx, opts).Run()
}
func (e *executor) prepareConfig() error {
if e.Config.Custom == nil {
return common.MakeBuildError("custom executor not configured")
}
e.config = &config{
CustomConfig: e.Config.Custom,
}
if e.config.RunExec == "" {
return common.MakeBuildError("custom executor is missing RunExec")
}
return nil
}
func (e *executor) dynamicConfig() error {
if e.config.ConfigExec == "" {
return nil
}
ctx, cancelFunc := context.WithTimeout(e.Context, e.config.GetConfigExecTimeout())
defer cancelFunc()
buf := bytes.NewBuffer(nil)
outputs := commandOutputs{
stdout: buf,
stderr: e.Trace,
}
opts := prepareCommandOpts{
executable: e.config.ConfigExec,
args: e.config.ConfigArgs,
out: outputs,
}
err := e.prepareCommand(ctx, opts).Run()
if err != nil {
return err
}
jsonConfig := buf.Bytes()
if len(jsonConfig) < 1 {
return nil
}
config := new(ConfigExecOutput)
err = json.Unmarshal(jsonConfig, config)
if err != nil {
return fmt.Errorf("error while parsing JSON output: %w", err)
}
config.InjectInto(e)
return nil
}
func (e *executor) logStartupMessage() {
const usageLine = "Using Custom executor"
info := e.driverInfo
if info == nil || info.Name == nil {
e.Println(fmt.Sprintf("%s...", usageLine))
return
}
if info.Version == nil {
e.Println(fmt.Sprintf("%s with driver %s...", usageLine, *info.Name))
return
}
e.Println(fmt.Sprintf("%s with driver %s %s...", usageLine, *info.Name, *info.Version))
}
func (e *executor) defaultCommandOutputs() commandOutputs {
return commandOutputs{
stdout: e.Trace,
stderr: e.Trace,
}
}
var commandFactory = command.New
func (e *executor) prepareCommand(ctx context.Context, opts prepareCommandOpts) command.Command {
cmdOpts := command.CreateOptions{
Dir: e.tempDir,
Env: make([]string, 0),
Stdout: opts.out.stdout,
Stderr: opts.out.stderr,
Logger: e.BuildLogger,
GracefulKillTimeout: e.config.GetGracefulKillTimeout(),
ForceKillTimeout: e.config.GetForceKillTimeout(),
}
for _, variable := range e.Build.GetAllVariables() {
cmdOpts.Env = append(cmdOpts.Env, fmt.Sprintf("CUSTOM_ENV_%s=%s", variable.Key, variable.Value))
}
// since the variable is unique to the custom executor
// at the moment, we add it separately from the other build variables
// if we decide to export only the postfix in the build, this code can be removed
imageName := e.Build.GetAllVariables().ExpandValue(e.Build.Image.Name)
cmdOpts.Env = append(cmdOpts.Env, fmt.Sprintf("%s=%s", ciJobImageEnv, imageName))
return commandFactory(ctx, opts.executable, opts.args, cmdOpts)
}
func (e *executor) Run(cmd common.ExecutorCommand) error {
scriptDir, err := ioutil.TempDir(e.tempDir, "script")
if err != nil {
return err
}
scriptFile := filepath.Join(scriptDir, "script."+e.BuildShell.Extension)
err = ioutil.WriteFile(scriptFile, []byte(cmd.Script), 0700)
if err != nil {
return err
}
args := append(e.config.RunArgs, scriptFile, string(cmd.Stage))
opts := prepareCommandOpts{
executable: e.config.RunExec,
args: args,
out: e.defaultCommandOutputs(),
}
return e.prepareCommand(cmd.Context, opts).Run()
}
func (e *executor) Cleanup() {
e.AbstractExecutor.Cleanup()
err := e.prepareConfig()
if err != nil {
e.Warningln(err)
// at this moment we don't care about the errors
return
}
// nothing to do, as there's no cleanup_script
if e.config.CleanupExec == "" {
return
}
ctx, cancelFunc := context.WithTimeout(context.Background(), e.config.GetCleanupScriptTimeout())
defer cancelFunc()
stdoutLogger := e.BuildLogger.WithFields(logrus.Fields{"cleanup_std": "out"})
stderrLogger := e.BuildLogger.WithFields(logrus.Fields{"cleanup_std": "err"})
outputs := commandOutputs{
stdout: stdoutLogger.WriterLevel(logrus.DebugLevel),
stderr: stderrLogger.WriterLevel(logrus.WarnLevel),
}
opts := prepareCommandOpts{
executable: e.config.CleanupExec,
args: e.config.CleanupArgs,
out: outputs,
}
err = e.prepareCommand(ctx, opts).Run()
if err != nil {
e.Warningln("Cleanup script failed:", err)
}
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: false,
Shell: common.ShellScriptInfo{
Shell: common.GetDefaultShell(),
Type: common.NormalShell,
RunnerCommand: "gitlab-runner",
},
ShowHostname: false,
}
creator := func() common.Executor {
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
}
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
features.Shared = true
}
common.RegisterExecutor("custom", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
// +build !windows
package custom
import (
"errors"
terminalsession "gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
func (e *executor) Connect() (terminalsession.Conn, error) {
return nil, errors.New("not yet supported")
}
package docker
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/stdcopy"
"github.com/kardianos/osext"
"github.com/mattn/go-zglob"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage"
docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags"
)
const (
DockerExecutorStagePrepare common.ExecutorStage = "docker_prepare"
DockerExecutorStageRun common.ExecutorStage = "docker_run"
DockerExecutorStageCleanup common.ExecutorStage = "docker_cleanup"
DockerExecutorStageCreatingBuildVolumes common.ExecutorStage = "docker_creating_build_volumes"
DockerExecutorStageCreatingServices common.ExecutorStage = "docker_creating_services"
DockerExecutorStageCreatingUserVolumes common.ExecutorStage = "docker_creating_user_volumes"
DockerExecutorStagePullingImage common.ExecutorStage = "docker_pulling_image"
)
const (
AuthConfigSourceNameUserVariable = "$DOCKER_AUTH_CONFIG"
AuthConfigSourceNameJobPayload = "job payload (GitLab Registry)"
)
var DockerPrebuiltImagesPaths []string
var neverRestartPolicy = container.RestartPolicy{Name: "no"}
var errVolumesManagerUndefined = errors.New("volumesManager is undefined")
type executor struct {
executors.AbstractExecutor
client docker_helpers.Client
volumeParser parser.Parser
info types.Info
temporary []string // IDs of containers that should be removed
builds []string // IDs of successfully created build containers
services []*types.Container
links []string
devices []container.DeviceMapping
helperImageInfo helperimage.Info
usedImages map[string]string
usedImagesLock sync.RWMutex
volumesManager volumes.Manager
}
func init() {
runnerFolder, err := osext.ExecutableFolder()
if err != nil {
logrus.Errorln("Docker executor: unable to detect gitlab-runner folder, prebuilt image helpers will be loaded from DockerHub.", err)
}
DockerPrebuiltImagesPaths = []string{
filepath.Join(runnerFolder, "helper-images"),
filepath.Join(runnerFolder, "out/helper-images"),
}
}
func (e *executor) getServiceVariables() []string {
return e.Build.GetAllVariables().PublicOrInternal().StringList()
}
func (e *executor) getUserAuthConfiguration(indexName string) (string, *types.AuthConfig) {
if e.Build == nil {
return "", nil
}
buf := bytes.NewBufferString(e.Build.GetDockerAuthConfig())
authConfigs, _ := docker_helpers.ReadAuthConfigsFromReader(buf)
if authConfigs == nil {
return "", nil
}
return AuthConfigSourceNameUserVariable, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
}
func (e *executor) getBuildAuthConfiguration(indexName string) (string, *types.AuthConfig) {
if e.Build == nil {
return "", nil
}
authConfigs := make(map[string]types.AuthConfig)
for _, credentials := range e.Build.Credentials {
if credentials.Type != "registry" {
continue
}
authConfigs[credentials.URL] = types.AuthConfig{
Username: credentials.Username,
Password: credentials.Password,
ServerAddress: credentials.URL,
}
}
return AuthConfigSourceNameJobPayload, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
}
func (e *executor) getHomeDirAuthConfiguration(indexName string) (string, *types.AuthConfig) {
sourceFile, authConfigs, _ := docker_helpers.ReadDockerAuthConfigsFromHomeDir(e.Shell().User)
if authConfigs == nil {
return "", nil
}
return sourceFile, docker_helpers.ResolveDockerAuthConfig(indexName, authConfigs)
}
type authConfigResolver func(indexName string) (string, *types.AuthConfig)
func (e *executor) getAuthConfig(imageName string) *types.AuthConfig {
indexName, _ := docker_helpers.SplitDockerImageName(imageName)
resolvers := []authConfigResolver{
e.getUserAuthConfiguration,
e.getHomeDirAuthConfiguration,
e.getBuildAuthConfiguration,
}
for _, resolver := range resolvers {
source, authConfig := resolver(indexName)
if authConfig != nil {
e.Println("Authenticating with credentials from", source)
e.Debugln("Using", authConfig.Username, "to connect to", authConfig.ServerAddress,
"in order to resolve", imageName, "...")
return authConfig
}
}
e.Debugln(fmt.Sprintf("No credentials found for %v", indexName))
return nil
}
func (e *executor) pullDockerImage(imageName string, ac *types.AuthConfig) (*types.ImageInspect, error) {
e.SetCurrentStage(DockerExecutorStagePullingImage)
e.Println("Pulling docker image", imageName, "...")
ref := imageName
// Add :latest to limit the download results
if !strings.ContainsAny(ref, ":@") {
ref += ":latest"
}
options := types.ImagePullOptions{}
if ac != nil {
options.RegistryAuth, _ = docker_helpers.EncodeAuthConfig(ac)
}
errorRegexp := regexp.MustCompile("(repository does not exist|not found)")
if err := e.client.ImagePullBlocking(e.Context, ref, options); err != nil {
if errorRegexp.MatchString(err.Error()) {
return nil, &common.BuildError{Inner: err}
}
return nil, err
}
image, _, err := e.client.ImageInspectWithRaw(e.Context, imageName)
return &image, err
}
func (e *executor) getDockerImage(imageName string) (image *types.ImageInspect, err error) {
pullPolicy, err := e.Config.Docker.PullPolicy.Get()
if err != nil {
return nil, err
}
authConfig := e.getAuthConfig(imageName)
e.Debugln("Looking for image", imageName, "...")
existingImage, _, err := e.client.ImageInspectWithRaw(e.Context, imageName)
// Return early if we already used that image
if err == nil && e.wasImageUsed(imageName, existingImage.ID) {
return &existingImage, nil
}
defer func() {
if err == nil {
e.markImageAsUsed(imageName, image.ID)
}
}()
// If never is specified then we return what inspect did return
if pullPolicy == common.PullPolicyNever {
return &existingImage, err
}
if err == nil {
// Don't pull image that is passed by ID
if existingImage.ID == imageName {
return &existingImage, nil
}
// If not-present is specified
if pullPolicy == common.PullPolicyIfNotPresent {
e.Println("Using locally found image version due to if-not-present pull policy")
return &existingImage, err
}
}
return e.pullDockerImage(imageName, authConfig)
}
func (e *executor) expandAndGetDockerImage(imageName string, allowedImages []string) (*types.ImageInspect, error) {
imageName, err := e.expandImageName(imageName, allowedImages)
if err != nil {
return nil, err
}
image, err := e.getDockerImage(imageName)
if err != nil {
return nil, err
}
return image, nil
}
func (e *executor) loadPrebuiltImage(path, ref, tag string) (*types.ImageInspect, error) {
file, err := os.OpenFile(path, os.O_RDONLY, 0600)
if err != nil {
if os.IsNotExist(err) {
return nil, err
}
return nil, fmt.Errorf("Cannot load prebuilt image: %s: %w", path, err)
}
defer file.Close()
e.Debugln("Loading prebuilt image...")
source := types.ImageImportSource{
Source: file,
SourceName: "-",
}
options := types.ImageImportOptions{Tag: tag}
if err := e.client.ImageImportBlocking(e.Context, source, ref, options); err != nil {
return nil, fmt.Errorf("Failed to import image: %w", err)
}
image, _, err := e.client.ImageInspectWithRaw(e.Context, ref+":"+tag)
if err != nil {
e.Debugln("Inspecting imported image", ref, "failed:", err)
return nil, err
}
return &image, err
}
func (e *executor) getPrebuiltImage() (*types.ImageInspect, error) {
if imageNameFromConfig := e.Config.Docker.HelperImage; imageNameFromConfig != "" {
imageNameFromConfig = common.AppVersion.Variables().ExpandValue(imageNameFromConfig)
e.Debugln("Pull configured helper_image for predefined container instead of import bundled image", imageNameFromConfig, "...")
return e.getDockerImage(imageNameFromConfig)
}
e.Debugln(fmt.Sprintf("Looking for prebuilt image %s...", e.helperImageInfo))
image, _, err := e.client.ImageInspectWithRaw(e.Context, e.helperImageInfo.String())
if err == nil {
return &image, nil
}
// Try to load prebuilt image from local filesystem
loadedImage := e.getLocalHelperImage()
if loadedImage != nil {
return loadedImage, nil
}
// Fallback to getting image from DockerHub
e.Debugln(fmt.Sprintf("Loading image form registry: %s", e.helperImageInfo))
return e.getDockerImage(e.helperImageInfo.String())
}
func (e *executor) getLocalHelperImage() *types.ImageInspect {
if !e.helperImageInfo.IsSupportingLocalImport {
return nil
}
architecture := e.helperImageInfo.Architecture
for _, dockerPrebuiltImagesPath := range DockerPrebuiltImagesPaths {
dockerPrebuiltImageFilePath := filepath.Join(dockerPrebuiltImagesPath, "prebuilt-"+architecture+prebuiltImageExtension)
image, err := e.loadPrebuiltImage(dockerPrebuiltImageFilePath, prebuiltImageName, e.helperImageInfo.Tag)
if err != nil {
e.Debugln("Failed to load prebuilt image from:", dockerPrebuiltImageFilePath, "error:", err)
continue
}
return image
}
return nil
}
func (e *executor) getBuildImage() (*types.ImageInspect, error) {
imageName, err := e.expandImageName(e.Build.Image.Name, []string{})
if err != nil {
return nil, err
}
// Fetch image
image, err := e.getDockerImage(imageName)
if err != nil {
return nil, err
}
return image, nil
}
func (e *executor) getLabels(containerType string, otherLabels ...string) map[string]string {
labels := make(map[string]string)
labels[dockerLabelPrefix+".job.id"] = strconv.Itoa(e.Build.ID)
labels[dockerLabelPrefix+".job.sha"] = e.Build.GitInfo.Sha
labels[dockerLabelPrefix+".job.before_sha"] = e.Build.GitInfo.BeforeSha
labels[dockerLabelPrefix+".job.ref"] = e.Build.GitInfo.Ref
labels[dockerLabelPrefix+".project.id"] = strconv.Itoa(e.Build.JobInfo.ProjectID)
labels[dockerLabelPrefix+".pipeline.id"] = e.Build.GetAllVariables().Get("CI_PIPELINE_ID")
labels[dockerLabelPrefix+".runner.id"] = e.Build.Runner.ShortDescription()
labels[dockerLabelPrefix+".runner.local_id"] = strconv.Itoa(e.Build.RunnerID)
labels[dockerLabelPrefix+".type"] = containerType
for _, label := range otherLabels {
keyValue := strings.SplitN(label, "=", 2)
if len(keyValue) == 2 {
labels[dockerLabelPrefix+"."+keyValue[0]] = keyValue[1]
}
}
return labels
}
func fakeContainer(id string, names ...string) *types.Container {
return &types.Container{ID: id, Names: names}
}
func (e *executor) parseDeviceString(deviceString string) (device container.DeviceMapping, err error) {
// Split the device string PathOnHost[:PathInContainer[:CgroupPermissions]]
parts := strings.Split(deviceString, ":")
if len(parts) > 3 {
err = fmt.Errorf("Too many colons")
return
}
device.PathOnHost = parts[0]
// Optional container path
if len(parts) >= 2 {
device.PathInContainer = parts[1]
} else {
// default: device at same path in container
device.PathInContainer = device.PathOnHost
}
// Optional permissions
if len(parts) >= 3 {
device.CgroupPermissions = parts[2]
} else {
// default: rwm, just like 'docker run'
device.CgroupPermissions = "rwm"
}
return
}
func (e *executor) bindDevices() (err error) {
for _, deviceString := range e.Config.Docker.Devices {
device, err := e.parseDeviceString(deviceString)
if err != nil {
err = fmt.Errorf("Failed to parse device string %q: %w", deviceString, err)
return err
}
e.devices = append(e.devices, device)
}
return nil
}
func (e *executor) wasImageUsed(imageName, imageID string) bool {
e.usedImagesLock.RLock()
defer e.usedImagesLock.RUnlock()
if e.usedImages[imageName] == imageID {
return true
}
return false
}
func (e *executor) markImageAsUsed(imageName, imageID string) {
e.usedImagesLock.Lock()
defer e.usedImagesLock.Unlock()
if e.usedImages == nil {
e.usedImages = make(map[string]string)
}
e.usedImages[imageName] = imageID
if imageName != imageID {
e.Println("Using docker image", imageID, "for", imageName, "...")
}
}
func (e *executor) createService(serviceIndex int, service, version, image string, serviceDefinition common.Image) (*types.Container, error) {
if len(service) == 0 {
return nil, fmt.Errorf("invalid service name: %s", serviceDefinition.Name)
}
if e.volumesManager == nil {
return nil, errVolumesManagerUndefined
}
e.Println("Starting service", service+":"+version, "...")
serviceImage, err := e.getDockerImage(image)
if err != nil {
return nil, err
}
serviceSlug := strings.Replace(service, "/", "__", -1)
containerName := fmt.Sprintf("%s-%s-%d", e.Build.ProjectUniqueName(), serviceSlug, serviceIndex)
// this will fail potentially some builds if there's name collision
e.removeContainer(e.Context, containerName)
config := &container.Config{
Image: serviceImage.ID,
Labels: e.getLabels("service", "service="+service, "service.version="+version),
Env: e.getServiceVariables(),
}
if len(serviceDefinition.Command) > 0 {
config.Cmd = serviceDefinition.Command
}
config.Entrypoint = e.overwriteEntrypoint(&serviceDefinition)
hostConfig := &container.HostConfig{
DNS: e.Config.Docker.DNS,
DNSSearch: e.Config.Docker.DNSSearch,
RestartPolicy: neverRestartPolicy,
ExtraHosts: e.Config.Docker.ExtraHosts,
Privileged: e.Config.Docker.Privileged,
NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode),
Binds: e.volumesManager.Binds(),
ShmSize: e.Config.Docker.ShmSize,
VolumesFrom: e.volumesManager.ContainerIDs(),
Tmpfs: e.Config.Docker.ServicesTmpfs,
LogConfig: container.LogConfig{
Type: "json-file",
},
}
e.Debugln("Creating service container", containerName, "...")
resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName)
if err != nil {
return nil, err
}
e.Debugln("Starting service container", resp.ID, "...")
err = e.client.ContainerStart(e.Context, resp.ID, types.ContainerStartOptions{})
if err != nil {
e.temporary = append(e.temporary, resp.ID)
return nil, err
}
return fakeContainer(resp.ID, containerName), nil
}
func (e *executor) getServicesDefinitions() (common.Services, error) {
internalServiceImages := []string{}
serviceDefinitions := common.Services{}
for _, service := range e.Config.Docker.Services {
internalServiceImages = append(internalServiceImages, service.Name)
serviceDefinitions = append(serviceDefinitions, service.ToImageDefinition())
}
for _, service := range e.Build.Services {
serviceName := e.Build.GetAllVariables().ExpandValue(service.Name)
err := e.verifyAllowedImage(serviceName, "services", e.Config.Docker.AllowedServices, internalServiceImages)
if err != nil {
return nil, err
}
service.Name = serviceName
serviceDefinitions = append(serviceDefinitions, service)
}
return serviceDefinitions, nil
}
func (e *executor) waitForServices() {
waitForServicesTimeout := e.Config.Docker.WaitForServicesTimeout
if waitForServicesTimeout == 0 {
waitForServicesTimeout = common.DefaultWaitForServicesTimeout
}
// wait for all services to came up
if waitForServicesTimeout > 0 && len(e.services) > 0 {
e.Println("Waiting for services to be up and running...")
wg := sync.WaitGroup{}
for _, service := range e.services {
wg.Add(1)
go func(service *types.Container) {
e.waitForServiceContainer(service, time.Duration(waitForServicesTimeout)*time.Second)
wg.Done()
}(service)
}
wg.Wait()
}
}
func (e *executor) buildServiceLinks(linksMap map[string]*types.Container) (links []string) {
for linkName, linkee := range linksMap {
newContainer, err := e.client.ContainerInspect(e.Context, linkee.ID)
if err != nil {
continue
}
if newContainer.State.Running {
links = append(links, linkee.ID+":"+linkName)
}
}
return
}
func (e *executor) createFromServiceDefinition(serviceIndex int, serviceDefinition common.Image, linksMap map[string]*types.Container) (err error) {
var container *types.Container
serviceMeta := services.SplitNameAndVersion(serviceDefinition.Name)
if serviceDefinition.Alias != "" {
serviceMeta.Aliases = append(serviceMeta.Aliases, serviceDefinition.Alias)
}
for _, linkName := range serviceMeta.Aliases {
if linksMap[linkName] != nil {
e.Warningln("Service", serviceDefinition.Name, "is already created. Ignoring.")
continue
}
// Create service if not yet created
if container == nil {
container, err = e.createService(serviceIndex, serviceMeta.Service, serviceMeta.Version, serviceMeta.ImageName, serviceDefinition)
if err != nil {
return
}
e.Debugln("Created service", serviceDefinition.Name, "as", container.ID)
e.services = append(e.services, container)
e.temporary = append(e.temporary, container.ID)
}
linksMap[linkName] = container
}
return
}
func (e *executor) createServices() (err error) {
e.SetCurrentStage(DockerExecutorStageCreatingServices)
e.Debugln("Creating services...")
servicesDefinitions, err := e.getServicesDefinitions()
if err != nil {
return
}
linksMap := make(map[string]*types.Container)
for index, serviceDefinition := range servicesDefinitions {
err = e.createFromServiceDefinition(index, serviceDefinition, linksMap)
if err != nil {
return
}
}
e.waitForServices()
e.links = e.buildServiceLinks(linksMap)
return
}
func (e *executor) getValidContainers(containers []string) []string {
var newContainers []string
for _, container := range containers {
if _, err := e.client.ContainerInspect(e.Context, container); err == nil {
newContainers = append(newContainers, container)
}
}
return newContainers
}
func (e *executor) createContainer(containerType string, imageDefinition common.Image, cmd []string, allowedInternalImages []string) (*types.ContainerJSON, error) {
if e.volumesManager == nil {
return nil, errVolumesManagerUndefined
}
image, err := e.expandAndGetDockerImage(imageDefinition.Name, allowedInternalImages)
if err != nil {
return nil, err
}
hostname := e.Config.Docker.Hostname
if hostname == "" {
hostname = e.Build.ProjectUniqueName()
}
// Always create unique, but sequential name
containerIndex := len(e.builds)
containerName := e.Build.ProjectUniqueName() + "-" +
containerType + "-" + strconv.Itoa(containerIndex)
config := &container.Config{
Image: image.ID,
Hostname: hostname,
Cmd: cmd,
Labels: e.getLabels(containerType),
Tty: false,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
OpenStdin: true,
StdinOnce: true,
Env: append(e.Build.GetAllVariables().StringList(), e.BuildShell.Environment...),
}
config.Entrypoint = e.overwriteEntrypoint(&imageDefinition)
nanoCPUs, err := e.Config.Docker.GetNanoCPUs()
if err != nil {
return nil, err
}
// By default we use caches container,
// but in later phases we hook to previous build container
volumesFrom := e.volumesManager.ContainerIDs()
if len(e.builds) > 0 {
volumesFrom = []string{
e.builds[len(e.builds)-1],
}
}
hostConfig := &container.HostConfig{
Resources: container.Resources{
Memory: e.Config.Docker.GetMemory(),
MemorySwap: e.Config.Docker.GetMemorySwap(),
MemoryReservation: e.Config.Docker.GetMemoryReservation(),
CpusetCpus: e.Config.Docker.CPUSetCPUs,
CPUShares: e.Config.Docker.CPUShares,
NanoCPUs: nanoCPUs,
Devices: e.devices,
OomKillDisable: e.Config.Docker.GetOomKillDisable(),
},
DNS: e.Config.Docker.DNS,
DNSSearch: e.Config.Docker.DNSSearch,
Runtime: e.Config.Docker.Runtime,
Privileged: e.Config.Docker.Privileged,
UsernsMode: container.UsernsMode(e.Config.Docker.UsernsMode),
CapAdd: e.Config.Docker.CapAdd,
CapDrop: e.Config.Docker.CapDrop,
SecurityOpt: e.Config.Docker.SecurityOpt,
RestartPolicy: neverRestartPolicy,
ExtraHosts: e.Config.Docker.ExtraHosts,
NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode),
Links: append(e.Config.Docker.Links, e.links...),
Binds: e.volumesManager.Binds(),
OomScoreAdj: e.Config.Docker.OomScoreAdjust,
ShmSize: e.Config.Docker.ShmSize,
VolumeDriver: e.Config.Docker.VolumeDriver,
VolumesFrom: append(e.Config.Docker.VolumesFrom, volumesFrom...),
LogConfig: container.LogConfig{
Type: "json-file",
},
Tmpfs: e.Config.Docker.Tmpfs,
Sysctls: e.Config.Docker.SysCtls,
}
// this will fail potentially some builds if there's name collision
e.removeContainer(e.Context, containerName)
e.Debugln("Creating container", containerName, "...")
resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName)
if err != nil {
if resp.ID != "" {
e.temporary = append(e.temporary, resp.ID)
}
return nil, err
}
inspect, err := e.client.ContainerInspect(e.Context, resp.ID)
if err != nil {
e.temporary = append(e.temporary, resp.ID)
return nil, err
}
e.builds = append(e.builds, resp.ID)
e.temporary = append(e.temporary, resp.ID)
return &inspect, nil
}
func (e *executor) killContainer(id string, waitCh chan error) (err error) {
for {
e.disconnectNetwork(e.Context, id)
e.Debugln("Killing container", id, "...")
e.client.ContainerKill(e.Context, id, "SIGKILL")
// Wait for signal that container were killed
// or retry after some time
select {
case err = <-waitCh:
return
case <-time.After(time.Second):
}
}
}
func (e *executor) waitForContainer(ctx context.Context, id string) error {
e.Debugln("Waiting for container", id, "...")
retries := 0
// Use active wait
for ctx.Err() == nil {
container, err := e.client.ContainerInspect(ctx, id)
if err != nil {
if docker_helpers.IsErrNotFound(err) {
return err
}
if retries > 3 {
return err
}
retries++
time.Sleep(time.Second)
continue
}
// Reset retry timer
retries = 0
if container.State.Running {
time.Sleep(time.Second)
continue
}
if container.State.ExitCode != 0 {
return &common.BuildError{
Inner: fmt.Errorf("exit code %d", container.State.ExitCode),
}
}
return nil
}
return ctx.Err()
}
func (e *executor) watchContainer(ctx context.Context, id string, input io.Reader) (err error) {
options := types.ContainerAttachOptions{
Stream: true,
Stdin: true,
Stdout: true,
Stderr: true,
}
e.Debugln("Attaching to container", id, "...")
hijacked, err := e.client.ContainerAttach(ctx, id, options)
if err != nil {
return
}
defer hijacked.Close()
e.Debugln("Starting container", id, "...")
err = e.client.ContainerStart(ctx, id, types.ContainerStartOptions{})
if err != nil {
return
}
e.Debugln("Waiting for attach to finish", id, "...")
attachCh := make(chan error, 2)
// Copy any output to the build trace
go func() {
_, err := stdcopy.StdCopy(e.Trace, e.Trace, hijacked.Reader)
if err != nil {
attachCh <- err
}
}()
// Write the input to the container and close its STDIN to get it to finish
go func() {
_, err := io.Copy(hijacked.Conn, input)
hijacked.CloseWrite()
if err != nil {
attachCh <- err
}
}()
waitCh := make(chan error, 1)
go func() {
waitCh <- e.waitForContainer(e.Context, id)
}()
select {
case <-ctx.Done():
e.killContainer(id, waitCh)
err = errors.New("Aborted")
case err = <-attachCh:
e.killContainer(id, waitCh)
e.Debugln("Container", id, "finished with", err)
case err = <-waitCh:
e.Debugln("Container", id, "finished with", err)
}
return
}
func (e *executor) removeContainer(ctx context.Context, id string) error {
e.disconnectNetwork(ctx, id)
options := types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
}
err := e.client.ContainerRemove(ctx, id, options)
e.Debugln("Removed container", id, "with", err)
return err
}
func (e *executor) disconnectNetwork(ctx context.Context, id string) error {
netList, err := e.client.NetworkList(ctx, types.NetworkListOptions{})
if err != nil {
e.Debugln("Can't get network list. ListNetworks exited with", err)
return err
}
for _, network := range netList {
for _, pluggedContainer := range network.Containers {
if id == pluggedContainer.Name {
err = e.client.NetworkDisconnect(ctx, network.ID, id, true)
if err != nil {
e.Warningln("Can't disconnect possibly zombie container", pluggedContainer.Name, "from network", network.Name, "->", err)
} else {
e.Warningln("Possibly zombie container", pluggedContainer.Name, "is disconnected from network", network.Name)
}
break
}
}
}
return err
}
func (e *executor) verifyAllowedImage(image, optionName string, allowedImages []string, internalImages []string) error {
for _, allowedImage := range allowedImages {
ok, _ := zglob.Match(allowedImage, image)
if ok {
return nil
}
}
for _, internalImage := range internalImages {
if internalImage == image {
return nil
}
}
if len(allowedImages) != 0 {
e.Println()
e.Errorln("The", image, "is not present on list of allowed", optionName)
for _, allowedImage := range allowedImages {
e.Println("-", allowedImage)
}
e.Println()
} else {
// by default allow to override the image name
return nil
}
e.Println("Please check runner's configuration: http://doc.gitlab.com/ci/docker/using_docker_images.html#overwrite-image-and-services")
return errors.New("invalid image")
}
func (e *executor) expandImageName(imageName string, allowedInternalImages []string) (string, error) {
if imageName != "" {
image := e.Build.GetAllVariables().ExpandValue(imageName)
allowedInternalImages = append(allowedInternalImages, e.Config.Docker.Image)
err := e.verifyAllowedImage(image, "images", e.Config.Docker.AllowedImages, allowedInternalImages)
if err != nil {
return "", err
}
return image, nil
}
if e.Config.Docker.Image == "" {
return "", errors.New("No Docker image specified to run the build in")
}
return e.Config.Docker.Image, nil
}
func (e *executor) overwriteEntrypoint(image *common.Image) []string {
if len(image.Entrypoint) > 0 {
if !e.Config.Docker.DisableEntrypointOverwrite {
return image.Entrypoint
}
e.Warningln("Entrypoint override disabled")
}
return nil
}
func (e *executor) connectDocker() error {
client, err := docker_helpers.New(e.Config.Docker.DockerCredentials, "")
if err != nil {
return err
}
e.client = client
e.info, err = client.Info(e.Context)
if err != nil {
return err
}
err = e.validateOSType()
if err != nil {
return err
}
e.helperImageInfo, err = helperimage.Get(common.REVISION, helperimage.Config{
OSType: e.info.OSType,
Architecture: e.info.Architecture,
OperatingSystem: e.info.OperatingSystem,
})
return err
}
// validateOSType checks if the ExecutorOptions metadata matches with the docker
// info response.
func (e *executor) validateOSType() error {
executorOSType := e.ExecutorOptions.Metadata[metadataOSType]
if executorOSType == "" {
return common.MakeBuildError("%s does not have any OSType specified", e.Config.Executor)
}
if executorOSType != e.info.OSType {
return common.MakeBuildError(
"executor requires OSType=%s, but Docker Engine supports only OSType=%s",
executorOSType, e.info.OSType,
)
}
return nil
}
func (e *executor) createDependencies() error {
createDependenciesStrategy := []func() error{
e.bindDevices,
e.createVolumesManager,
e.createVolumes,
e.createBuildVolume,
e.createServices,
}
if e.Build.IsFeatureFlagOn(featureflags.UseLegacyVolumesMountingOrder) {
// TODO: Remove in 13.0 https://gitlab.com/gitlab-org/gitlab-runner/issues/4180
createDependenciesStrategy = []func() error{
e.bindDevices,
e.createVolumesManager,
e.createBuildVolume,
e.createServices,
e.createVolumes,
}
}
for _, setup := range createDependenciesStrategy {
err := setup()
if err != nil {
return err
}
}
return nil
}
func (e *executor) createVolumes() error {
e.SetCurrentStage(DockerExecutorStageCreatingUserVolumes)
e.Debugln("Creating user-defined volumes...")
if e.volumesManager == nil {
return errVolumesManagerUndefined
}
for _, volume := range e.Config.Docker.Volumes {
err := e.volumesManager.Create(volume)
if err == volumes.ErrCacheVolumesDisabled {
e.Warningln(fmt.Sprintf(
"Container based cache volumes creation is disabled. Will not create volume for %q",
volume,
))
continue
}
if err != nil {
return err
}
}
return nil
}
func (e *executor) createBuildVolume() error {
e.SetCurrentStage(DockerExecutorStageCreatingBuildVolumes)
e.Debugln("Creating build volume...")
if e.volumesManager == nil {
return errVolumesManagerUndefined
}
jobsDir := e.Build.RootDir
// TODO: Remove in 13.0 https://gitlab.com/gitlab-org/gitlab-runner/issues/4180
if e.Build.IsFeatureFlagOn(featureflags.UseLegacyBuildsDirForDocker) {
// Cache Git sources:
// take path of the projects directory,
// because we use `rm -rf` which could remove the mounted volume
jobsDir = path.Dir(e.Build.FullProjectDir())
}
var err error
if e.Build.GetGitStrategy() == common.GitFetch {
err = e.volumesManager.Create(jobsDir)
if err == nil {
return nil
}
if err == volumes.ErrCacheVolumesDisabled {
err = e.volumesManager.CreateTemporary(jobsDir)
}
} else {
err = e.volumesManager.CreateTemporary(jobsDir)
}
if err != nil {
if _, ok := err.(*volumes.ErrVolumeAlreadyDefined); !ok {
return err
}
}
return nil
}
func (e *executor) Prepare(options common.ExecutorPrepareOptions) error {
e.SetCurrentStage(DockerExecutorStagePrepare)
if options.Config.Docker == nil {
return errors.New("missing docker configuration")
}
e.AbstractExecutor.PrepareConfiguration(options)
err := e.connectDocker()
if err != nil {
return err
}
err = e.prepareBuildsDir(options)
if err != nil {
return err
}
err = e.AbstractExecutor.PrepareBuildAndShell()
if err != nil {
return err
}
if e.BuildShell.PassFile {
return errors.New("docker doesn't support shells that require script file")
}
imageName, err := e.expandImageName(e.Build.Image.Name, []string{})
if err != nil {
return err
}
e.Println("Using Docker executor with image", imageName, "...")
err = e.createDependencies()
if err != nil {
return err
}
return nil
}
func (e *executor) prepareBuildsDir(options common.ExecutorPrepareOptions) error {
if e.volumeParser == nil {
return common.MakeBuildError("missing volume parser")
}
isHostMounted, err := volumes.IsHostMountedVolume(e.volumeParser, e.RootDir(), options.Config.Docker.Volumes...)
if err != nil {
return &common.BuildError{Inner: err}
}
// We need to set proper value for e.SharedBuildsDir because
// it's required to properly start the job, what is done inside of
// e.AbstractExecutor.Prepare()
// And a started job is required for Volumes Manager to work, so it's
// done before the manager is even created.
if isHostMounted {
e.SharedBuildsDir = true
}
return nil
}
func (e *executor) Cleanup() {
e.SetCurrentStage(DockerExecutorStageCleanup)
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), dockerCleanupTimeout)
defer cancel()
remove := func(id string) {
wg.Add(1)
go func() {
e.removeContainer(ctx, id)
wg.Done()
}()
}
for _, temporaryID := range e.temporary {
remove(temporaryID)
}
if e.volumesManager != nil {
<-e.volumesManager.Cleanup(ctx)
}
wg.Wait()
if e.client != nil {
e.client.Close()
}
e.AbstractExecutor.Cleanup()
}
type serviceHealthCheckError struct {
Inner error
Logs string
}
func (e *serviceHealthCheckError) Error() string {
if e.Inner == nil {
return "serviceHealthCheckError"
}
return e.Inner.Error()
}
func (e *executor) runServiceHealthCheckContainer(service *types.Container, timeout time.Duration) error {
waitImage, err := e.getPrebuiltImage()
if err != nil {
return fmt.Errorf("getPrebuiltImage: %w", err)
}
containerName := service.Names[0] + "-wait-for-service"
cmd := []string{"gitlab-runner-helper", "health-check"}
config := &container.Config{
Cmd: cmd,
Image: waitImage.ID,
Labels: e.getLabels("wait", "wait="+service.ID),
}
hostConfig := &container.HostConfig{
RestartPolicy: neverRestartPolicy,
Links: []string{service.Names[0] + ":service"},
NetworkMode: container.NetworkMode(e.Config.Docker.NetworkMode),
LogConfig: container.LogConfig{
Type: "json-file",
},
}
e.Debugln("Waiting for service container", containerName, "to be up and running...")
resp, err := e.client.ContainerCreate(e.Context, config, hostConfig, nil, containerName)
if err != nil {
return fmt.Errorf("ContainerCreate: %w", err)
}
defer e.removeContainer(e.Context, resp.ID)
err = e.client.ContainerStart(e.Context, resp.ID, types.ContainerStartOptions{})
if err != nil {
return fmt.Errorf("ContainerStart: %w", err)
}
waitResult := make(chan error, 1)
go func() {
waitResult <- e.waitForContainer(e.Context, resp.ID)
}()
// these are warnings and they don't make the build fail
select {
case err := <-waitResult:
if err == nil {
return nil
}
return &serviceHealthCheckError{
Inner: err,
Logs: e.readContainerLogs(resp.ID),
}
case <-time.After(timeout):
return &serviceHealthCheckError{
Inner: fmt.Errorf("service %q timeout", containerName),
Logs: e.readContainerLogs(resp.ID),
}
}
}
func (e *executor) waitForServiceContainer(service *types.Container, timeout time.Duration) error {
err := e.runServiceHealthCheckContainer(service, timeout)
if err == nil {
return nil
}
var buffer bytes.Buffer
buffer.WriteString("\n")
buffer.WriteString(helpers.ANSI_YELLOW + "*** WARNING:" + helpers.ANSI_RESET + " Service " + service.Names[0] + " probably didn't start properly.\n")
buffer.WriteString("\n")
buffer.WriteString("Health check error:\n")
buffer.WriteString(strings.TrimSpace(err.Error()))
buffer.WriteString("\n")
if healtCheckErr, ok := err.(*serviceHealthCheckError); ok {
buffer.WriteString("\n")
buffer.WriteString("Health check container logs:\n")
buffer.WriteString(healtCheckErr.Logs)
buffer.WriteString("\n")
}
buffer.WriteString("\n")
buffer.WriteString("Service container logs:\n")
buffer.WriteString(e.readContainerLogs(service.ID))
buffer.WriteString("\n")
buffer.WriteString("\n")
buffer.WriteString(helpers.ANSI_YELLOW + "*********" + helpers.ANSI_RESET + "\n")
buffer.WriteString("\n")
io.Copy(e.Trace, &buffer)
return err
}
func (e *executor) readContainerLogs(containerID string) string {
var containerBuffer bytes.Buffer
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Timestamps: true,
}
hijacked, err := e.client.ContainerLogs(e.Context, containerID, options)
if err != nil {
return strings.TrimSpace(err.Error())
}
defer hijacked.Close()
stdcopy.StdCopy(&containerBuffer, &containerBuffer, hijacked)
containerLog := containerBuffer.String()
return strings.TrimSpace(containerLog)
}
package docker
import (
"bytes"
"errors"
"fmt"
"sync"
"github.com/docker/docker/api/types"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser"
)
type commandExecutor struct {
executor
buildContainer *types.ContainerJSON
lock sync.Mutex
}
func (s *commandExecutor) getBuildContainer() *types.ContainerJSON {
s.lock.Lock()
defer s.lock.Unlock()
return s.buildContainer
}
func (s *commandExecutor) Prepare(options common.ExecutorPrepareOptions) error {
err := s.executor.Prepare(options)
if err != nil {
return err
}
s.Debugln("Starting Docker command...")
if len(s.BuildShell.DockerCommand) == 0 {
return errors.New("Script is not compatible with Docker")
}
_, err = s.getPrebuiltImage()
if err != nil {
return err
}
_, err = s.getBuildImage()
if err != nil {
return err
}
return nil
}
func (s *commandExecutor) requestNewPredefinedContainer() (*types.ContainerJSON, error) {
prebuildImage, err := s.getPrebuiltImage()
if err != nil {
return nil, err
}
buildImage := common.Image{
Name: prebuildImage.ID,
}
containerJSON, err := s.createContainer("predefined", buildImage, s.helperImageInfo.Cmd, []string{prebuildImage.ID})
if err != nil {
return nil, err
}
return containerJSON, err
}
func (s *commandExecutor) requestBuildContainer() (*types.ContainerJSON, error) {
s.lock.Lock()
defer s.lock.Unlock()
if s.buildContainer == nil {
var err error
// Start build container which will run actual build
s.buildContainer, err = s.createContainer("build", s.Build.Image, s.BuildShell.DockerCommand, []string{})
if err != nil {
return nil, err
}
}
return s.buildContainer, nil
}
func (s *commandExecutor) Run(cmd common.ExecutorCommand) error {
var runOn *types.ContainerJSON
var err error
if cmd.Predefined {
runOn, err = s.requestNewPredefinedContainer()
} else {
runOn, err = s.requestBuildContainer()
}
if err != nil {
return err
}
s.Debugln("Executing on", runOn.Name, "the", cmd.Script)
s.SetCurrentStage(DockerExecutorStageRun)
return s.watchContainer(cmd.Context, runOn.ID, bytes.NewBufferString(cmd.Script))
}
func (s *commandExecutor) GetMetricsSelector() string {
return fmt.Sprintf("instance=%q", s.executor.info.Name)
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: true,
DefaultBuildsDir: "/builds",
DefaultCacheDir: "/cache",
SharedBuildsDir: false,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.NormalShell,
RunnerCommand: "/usr/bin/gitlab-runner-helper",
},
ShowHostname: true,
Metadata: map[string]string{
metadataOSType: osTypeLinux,
},
}
creator := func() common.Executor {
e := &commandExecutor{
executor: executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
volumeParser: parser.NewLinuxParser(),
},
}
e.SetCurrentStage(common.ExecutorStageCreated)
return e
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
features.Image = true
features.Services = true
features.Session = true
features.Terminal = true
}
common.RegisterExecutor("docker", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
package docker
import (
"errors"
"github.com/docker/docker/api/types"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
)
type sshExecutor struct {
executor
sshCommand ssh.Client
}
func (s *sshExecutor) Prepare(options common.ExecutorPrepareOptions) error {
err := s.executor.Prepare(options)
if err != nil {
return err
}
s.Warningln("Since GitLab Runner 10.0 docker-ssh and docker-ssh+machine executors are marked as DEPRECATED and will be removed in one of the upcoming releases")
if s.Config.SSH == nil {
return errors.New("Missing SSH configuration")
}
s.Debugln("Starting SSH command...")
// Start build container which will run actual build
container, err := s.createContainer("build", s.Build.Image, []string{}, []string{})
if err != nil {
return err
}
s.Debugln("Starting container", container.ID, "...")
err = s.client.ContainerStart(s.Context, container.ID, types.ContainerStartOptions{})
if err != nil {
return err
}
containerData, err := s.client.ContainerInspect(s.Context, container.ID)
if err != nil {
return err
}
// Create SSH command
s.sshCommand = ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
}
s.sshCommand.Host = containerData.NetworkSettings.IPAddress
s.Debugln("Connecting to SSH server...")
err = s.sshCommand.Connect()
if err != nil {
return err
}
return nil
}
func (s *sshExecutor) Run(cmd common.ExecutorCommand) error {
s.SetCurrentStage(DockerExecutorStageRun)
err := s.sshCommand.Run(cmd.Context, ssh.Command{
Environment: s.BuildShell.Environment,
Command: s.BuildShell.GetCommandWithArguments(),
Stdin: cmd.Script,
})
if _, ok := err.(*ssh.ExitError); ok {
err = &common.BuildError{Inner: err}
}
return err
}
func (s *sshExecutor) Cleanup() {
s.sshCommand.Cleanup()
s.executor.Cleanup()
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: true,
DefaultBuildsDir: "builds",
DefaultCacheDir: "cache",
SharedBuildsDir: false,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.LoginShell,
RunnerCommand: "gitlab-runner",
},
ShowHostname: true,
Metadata: map[string]string{
metadataOSType: osTypeLinux,
},
}
creator := func() common.Executor {
e := &sshExecutor{
executor: executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
volumeParser: parser.NewLinuxParser(),
},
}
e.SetCurrentStage(common.ExecutorStageCreated)
return e
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
features.Image = true
features.Services = true
}
common.RegisterExecutor("docker-ssh", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
package volumes
import (
"context"
"fmt"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
)
type containerClient interface {
docker_helpers.Client
LabelContainer(container *container.Config, containerType string, otherLabels ...string)
WaitForContainer(id string) error
RemoveContainer(ctx context.Context, id string) error
}
type CacheContainersManager interface {
FindOrCleanExisting(containerName string, containerPath string) string
Create(containerName string, containerPath string) (string, error)
Cleanup(ctx context.Context, ids []string) chan bool
}
type cacheContainerManager struct {
ctx context.Context
logger debugLogger
containerClient containerClient
helperImage *types.ImageInspect
failedContainerIDs []string
}
func NewCacheContainerManager(ctx context.Context, logger debugLogger, cClient containerClient, helperImage *types.ImageInspect) CacheContainersManager {
return &cacheContainerManager{
ctx: ctx,
logger: logger,
containerClient: cClient,
helperImage: helperImage,
}
}
func (m *cacheContainerManager) FindOrCleanExisting(containerName string, containerPath string) string {
inspected, err := m.containerClient.ContainerInspect(m.ctx, containerName)
if err != nil {
m.logger.Debugln(fmt.Sprintf("Error while inspecting %q container: %v", containerName, err))
return ""
}
// check if we have valid cache, if not remove the broken container
_, ok := inspected.Config.Volumes[containerPath]
if !ok {
m.logger.Debugln(fmt.Sprintf("Removing broken cache container for %q path", containerPath))
err = m.containerClient.RemoveContainer(m.ctx, inspected.ID)
m.logger.Debugln(fmt.Sprintf("Cache container for %q path removed with %v", containerPath, err))
return ""
}
return inspected.ID
}
func (m *cacheContainerManager) Create(containerName string, containerPath string) (string, error) {
containerID, err := m.createCacheContainer(containerName, containerPath)
if err != nil {
return "", err
}
err = m.startCacheContainer(containerID)
if err != nil {
return "", err
}
return containerID, nil
}
func (m *cacheContainerManager) createCacheContainer(containerName string, containerPath string) (string, error) {
config := &container.Config{
Image: m.helperImage.ID,
Cmd: []string{"gitlab-runner-helper", "cache-init", containerPath},
Volumes: map[string]struct{}{
containerPath: {},
},
}
m.containerClient.LabelContainer(config, "cache", "cache.dir="+containerPath)
hostConfig := &container.HostConfig{
LogConfig: container.LogConfig{
Type: "json-file",
},
}
resp, err := m.containerClient.ContainerCreate(m.ctx, config, hostConfig, nil, containerName)
if err != nil {
if resp.ID != "" {
m.failedContainerIDs = append(m.failedContainerIDs, resp.ID)
}
return "", err
}
return resp.ID, nil
}
func (m *cacheContainerManager) startCacheContainer(containerID string) error {
m.logger.Debugln(fmt.Sprintf("Starting cache container %q...", containerID))
err := m.containerClient.ContainerStart(m.ctx, containerID, types.ContainerStartOptions{})
if err != nil {
m.failedContainerIDs = append(m.failedContainerIDs, containerID)
return err
}
m.logger.Debugln(fmt.Sprintf("Waiting for cache container %q...", containerID))
err = m.containerClient.WaitForContainer(containerID)
if err != nil {
m.failedContainerIDs = append(m.failedContainerIDs, containerID)
return err
}
return nil
}
func (m *cacheContainerManager) Cleanup(ctx context.Context, ids []string) chan bool {
done := make(chan bool, 1)
ids = append(m.failedContainerIDs, ids...)
go func() {
wg := new(sync.WaitGroup)
wg.Add(len(ids))
for _, id := range ids {
m.remove(ctx, wg, id)
}
wg.Wait()
done <- true
}()
return done
}
func (m *cacheContainerManager) remove(ctx context.Context, wg *sync.WaitGroup, id string) {
go func() {
err := m.containerClient.RemoveContainer(ctx, id)
if err != nil {
m.logger.Debugln(fmt.Sprintf("Error while removing the container: %v", err))
}
wg.Done()
}()
}
package volumes
import (
"context"
"errors"
"fmt"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser"
)
var ErrCacheVolumesDisabled = errors.New("cache volumes feature disabled")
type Manager interface {
Create(volume string) error
CreateTemporary(containerPath string) error
Binds() []string
ContainerIDs() []string
Cleanup(ctx context.Context) chan bool
}
type ManagerConfig struct {
CacheDir string
BaseContainerPath string
UniqueName string
DisableCache bool
}
type manager struct {
config ManagerConfig
logger debugLogger
parser parser.Parser
cacheContainersManager CacheContainersManager
volumeBindings []string
cacheContainerIDs []string
tmpContainerIDs []string
managedVolumes pathList
}
func NewManager(logger debugLogger, volumeParser parser.Parser, ccManager CacheContainersManager, config ManagerConfig) Manager {
return &manager{
config: config,
logger: logger,
parser: volumeParser,
cacheContainersManager: ccManager,
volumeBindings: make([]string, 0),
cacheContainerIDs: make([]string, 0),
tmpContainerIDs: make([]string, 0),
managedVolumes: pathList{},
}
}
func (m *manager) Create(volume string) error {
if len(volume) < 1 {
return nil
}
parsedVolume, err := m.parser.ParseVolume(volume)
if err != nil {
return err
}
switch parsedVolume.Len() {
case 2:
err = m.addHostVolume(parsedVolume)
case 1:
err = m.addCacheVolume(parsedVolume)
default:
err = fmt.Errorf("unsupported volume definition %s", volume)
}
return err
}
func (m *manager) addHostVolume(volume *parser.Volume) error {
var err error
volume.Destination, err = m.getAbsoluteContainerPath(volume.Destination)
if err != nil {
return err
}
err = m.managedVolumes.Add(volume.Destination)
if err != nil {
return err
}
m.appendVolumeBind(volume)
return nil
}
func (m *manager) getAbsoluteContainerPath(dir string) (string, error) {
if m.parser.Path().IsRoot(dir) {
return "", errDirectoryIsRootPath
}
if m.parser.Path().IsAbs(dir) {
return dir, nil
}
return m.parser.Path().Join(m.config.BaseContainerPath, dir), nil
}
func (m *manager) appendVolumeBind(volume *parser.Volume) {
m.logger.Debugln(fmt.Sprintf("Using host-based %q for %q...", volume.Source, volume.Destination))
m.volumeBindings = append(m.volumeBindings, volume.Definition())
}
func (m *manager) addCacheVolume(volume *parser.Volume) error {
// disable cache for automatic container cache,
// but leave it for host volumes (they are shared on purpose)
if m.config.DisableCache {
m.logger.Debugln("Cache containers feature is disabled")
return ErrCacheVolumesDisabled
}
if m.config.CacheDir != "" {
return m.createHostBasedCacheVolume(volume.Destination)
}
_, err := m.createContainerBasedCacheVolume(volume.Destination)
return err
}
func (m *manager) createHostBasedCacheVolume(containerPath string) error {
var err error
containerPath, err = m.getAbsoluteContainerPath(containerPath)
if err != nil {
return err
}
err = m.managedVolumes.Add(containerPath)
if err != nil {
return err
}
hostPath := m.parser.Path().Join(m.config.CacheDir, m.config.UniqueName, hashContainerPath(containerPath))
m.appendVolumeBind(&parser.Volume{
Source: hostPath,
Destination: containerPath,
})
return nil
}
func (m *manager) createContainerBasedCacheVolume(containerPath string) (string, error) {
containerPath, err := m.getAbsoluteContainerPath(containerPath)
if err != nil {
return "", err
}
err = m.managedVolumes.Add(containerPath)
if err != nil {
return "", err
}
containerName := fmt.Sprintf("%s-cache-%s", m.config.UniqueName, hashContainerPath(containerPath))
containerID := m.cacheContainersManager.FindOrCleanExisting(containerName, containerPath)
// create new cache container for that project
if containerID == "" {
var err error
containerID, err = m.cacheContainersManager.Create(containerName, containerPath)
if err != nil {
return "", err
}
}
m.logger.Debugln(fmt.Sprintf("Using container %q as cache %q...", containerID, containerPath))
m.cacheContainerIDs = append(m.cacheContainerIDs, containerID)
return containerID, nil
}
func (m *manager) CreateTemporary(containerPath string) error {
id, err := m.createContainerBasedCacheVolume(containerPath)
if err != nil {
return err
}
m.tmpContainerIDs = append(m.tmpContainerIDs, id)
return nil
}
func (m *manager) Binds() []string {
return m.volumeBindings
}
func (m *manager) ContainerIDs() []string {
return m.cacheContainerIDs
}
func (m *manager) Cleanup(ctx context.Context) chan bool {
return m.cacheContainersManager.Cleanup(ctx, m.tmpContainerIDs)
}
package parser
import (
"regexp"
"gitlab.com/gitlab-org/gitlab-runner/helpers/path"
)
type baseParser struct {
path path.Path
}
// The way how matchesToVolumeSpecParts parses the volume mount specification and assigns
// parts was inspired by how Docker Engine's `windowsParser` is created. The original sources
// can be found at:
//
// https://github.com/docker/engine/blob/a79fabbfe84117696a19671f4aa88b82d0f64fc1/volume/mounts/windows_parser.go
//
// The original source is licensed under Apache License 2.0 and the copyright for it
// goes to Docker, Inc.
func (p *baseParser) matchesToVolumeSpecParts(spec string, specExp *regexp.Regexp) (map[string]string, error) {
match := specExp.FindStringSubmatch(spec)
if len(match) == 0 {
return nil, NewInvalidVolumeSpecErr(spec)
}
matchgroups := make(map[string]string)
for i, name := range specExp.SubexpNames() {
matchgroups[name] = match[i]
}
parts := map[string]string{
"source": "",
"destination": "",
"mode": "",
"bindPropagation": "",
}
for group := range parts {
content, ok := matchgroups[group]
if ok {
parts[group] = content
}
}
return parts, nil
}
func (p *baseParser) Path() path.Path {
return p.path
}
package parser
import (
"fmt"
)
type InvalidVolumeSpecError struct {
spec string
}
func (e *InvalidVolumeSpecError) Error() string {
return fmt.Sprintf("invalid volume specification: %q", e.spec)
}
func NewInvalidVolumeSpecErr(spec string) error {
return &InvalidVolumeSpecError{
spec: spec,
}
}
package parser
import (
"regexp"
"gitlab.com/gitlab-org/gitlab-runner/helpers/path"
)
const (
linuxDir = `/(?:[^\\/:*?"<>|\r\n ]+/?)*`
linuxVolumeName = `[^\\/:*?"<>|\r\n]+`
linuxSource = `((?P<source>((` + linuxDir + `)|(` + linuxVolumeName + `))):)?`
linuxDestination = `(?P<destination>(?:` + linuxDir + `))`
linuxMode = `(:(?P<mode>(?i)ro|rw|z))?`
linuxBindPropagation = `((:|,)(?P<bindPropagation>(?i)shared|slave|private|rshared|rslave|rprivate))?`
)
type linuxParser struct {
baseParser
}
func NewLinuxParser() Parser {
return &linuxParser{
baseParser: baseParser{
path: path.NewUnixPath(),
},
}
}
func (p *linuxParser) ParseVolume(spec string) (*Volume, error) {
specExp := regexp.MustCompile(`^` + linuxSource + linuxDestination + linuxMode + linuxBindPropagation + `$`)
parts, err := p.matchesToVolumeSpecParts(spec, specExp)
if err != nil {
return nil, err
}
return newVolume(parts["source"], parts["destination"], parts["mode"], parts["bindPropagation"]), nil
}
package parser
import (
"strings"
)
type Volume struct {
Source string
Destination string
Mode string
BindPropagation string
}
func newVolume(source string, destination string, mode string, bindPropagation string) *Volume {
return &Volume{
Source: source,
Destination: destination,
Mode: mode,
BindPropagation: bindPropagation,
}
}
func (v *Volume) Definition() string {
parts := make([]string, 0)
builder := strings.Builder{}
if v.Source != "" {
parts = append(parts, v.Source)
}
parts = append(parts, v.Destination)
if v.Mode != "" {
parts = append(parts, v.Mode)
}
builder.WriteString(strings.Join(parts, ":"))
if v.BindPropagation != "" {
separator := ":"
if v.Mode != "" {
separator = ","
}
builder.WriteString(separator)
builder.WriteString(v.BindPropagation)
}
return builder.String()
}
func (v *Volume) Len() int {
len := 0
if v.Source != "" {
len++
}
if v.Destination != "" {
len++
}
return len
}
package volumes
import (
"crypto/md5"
"errors"
"fmt"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser"
)
var (
errDirectoryNotAbsolute = errors.New("build directory needs to be an absolute path")
errDirectoryIsRootPath = errors.New("build directory needs to be a non-root path")
)
type debugLogger interface {
Debugln(args ...interface{})
}
func IsHostMountedVolume(volumeParser parser.Parser, dir string, volumes ...string) (bool, error) {
if !volumeParser.Path().IsAbs(dir) {
return false, errDirectoryNotAbsolute
}
if volumeParser.Path().IsRoot(dir) {
return false, errDirectoryIsRootPath
}
for _, volume := range volumes {
parsedVolume, err := volumeParser.ParseVolume(volume)
if err != nil {
return false, err
}
if parsedVolume.Len() < 2 {
continue
}
if volumeParser.Path().Contains(parsedVolume.Destination, dir) {
return true, nil
}
}
return false, nil
}
func hashContainerPath(containerPath string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(containerPath)))
}
type ErrVolumeAlreadyDefined struct {
containerPath string
}
func (e *ErrVolumeAlreadyDefined) Error() string {
return fmt.Sprintf("volume for container path %q is already defined", e.containerPath)
}
func NewErrVolumeAlreadyDefined(containerPath string) *ErrVolumeAlreadyDefined {
return &ErrVolumeAlreadyDefined{
containerPath: containerPath,
}
}
type pathList map[string]bool
func (m pathList) Add(containerPath string) error {
if m[containerPath] {
return NewErrVolumeAlreadyDefined(containerPath)
}
m[containerPath] = true
return nil
}
package machine
import (
"github.com/prometheus/client_golang/prometheus"
)
func (m *machineProvider) collectDetails() (data machinesData) {
m.lock.RLock()
defer m.lock.RUnlock()
for _, details := range m.details {
if !details.isDead() {
data.Add(details)
}
}
return
}
// Describe implements prometheus.Collector.
func (m *machineProvider) Describe(ch chan<- *prometheus.Desc) {
m.totalActions.Describe(ch)
m.creationHistogram.Describe(ch)
ch <- m.currentStatesDesc
}
// Collect implements prometheus.Collector.
func (m *machineProvider) Collect(ch chan<- prometheus.Metric) {
data := m.collectDetails()
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.Acquired), "acquired")
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.Creating), "creating")
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.Idle), "idle")
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.Used), "used")
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.Removing), "removing")
ch <- prometheus.MustNewConstMetric(m.currentStatesDesc, prometheus.GaugeValue, float64(data.StuckOnRemoving), "stuck-on-removing")
m.totalActions.Collect(ch)
m.creationHistogram.Collect(ch)
}
package machine
import (
"fmt"
"os"
"time"
"github.com/sirupsen/logrus"
)
type machinesData struct {
Runner string
Acquired int
Creating int
Idle int
Used int
Removing int
StuckOnRemoving int
}
func (d *machinesData) Available() int {
return d.Acquired + d.Creating + d.Idle
}
func (d *machinesData) Total() int {
return d.Acquired + d.Creating + d.Idle + d.Used + d.Removing + d.StuckOnRemoving
}
func (d *machinesData) Add(details *machineDetails) {
switch details.State {
case machineStateIdle:
d.Idle++
case machineStateCreating:
d.Creating++
case machineStateAcquired:
d.Acquired++
case machineStateUsed:
d.Used++
case machineStateRemoving:
if details.isStuckOnRemove() {
d.StuckOnRemoving++
} else {
d.Removing++
}
}
}
func (d *machinesData) Fields() logrus.Fields {
return logrus.Fields{
"runner": d.Runner,
"used": d.Used,
"idle": d.Idle,
"total": d.Total(),
"creating": d.Creating,
"removing": d.Removing,
}
}
func (d *machinesData) writeDebugInformation() {
if logrus.GetLevel() < logrus.DebugLevel {
return
}
file, err := os.OpenFile("machines.csv", os.O_RDWR|os.O_APPEND, 0600)
if err != nil {
return
}
defer file.Close()
fmt.Fprintln(file,
"time", time.Now(),
"runner", d.Runner,
"acquired", d.Acquired,
"creating", d.Creating,
"idle", d.Idle,
"used", d.Used,
"removing", d.Removing)
}
package machine
import (
"fmt"
"io/ioutil"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
type machineDetails struct {
Name string
Created time.Time `yaml:"-"`
Used time.Time `yaml:"-"`
UsedCount int
State machineState
Reason string
RetryCount int
LastSeen time.Time
}
func (m *machineDetails) isPersistedOnDisk() bool {
// Machines in creating phase might or might not be persisted on disk
// this is due to async nature of machine creation process
// where to `docker-machine create` is the one that is creating relevant files
// and it is being executed with undefined delay
return m.State != machineStateCreating
}
func (m *machineDetails) isUsed() bool {
return m.State != machineStateIdle
}
func (m *machineDetails) isStuckOnRemove() bool {
return m.State == machineStateRemoving && m.RetryCount >= removeRetryTries
}
func (m *machineDetails) isDead() bool {
return m.State == machineStateIdle &&
time.Since(m.LastSeen) > machineDeadInterval
}
func (m *machineDetails) canBeUsed() bool {
return m.State == machineStateAcquired
}
func (m *machineDetails) match(machineFilter string) bool {
var query string
if n, _ := fmt.Sscanf(m.Name, machineFilter, &query); n != 1 {
return false
}
return true
}
func (m *machineDetails) writeDebugInformation() {
if logrus.GetLevel() < logrus.DebugLevel {
return
}
var details struct {
Details machineDetails
Time string
CreatedAgo time.Duration
}
details.Details = *m
details.Time = time.Now().String()
details.CreatedAgo = time.Since(m.Created)
data := helpers.ToYAML(&details)
ioutil.WriteFile("machines/"+details.Details.Name+".yml", []byte(data), 0600)
}
func (m *machineDetails) logger() *logrus.Entry {
return logrus.WithFields(logrus.Fields{
"name": m.Name,
"lifetime": time.Since(m.Created),
"used": time.Since(m.Used),
"usedCount": m.UsedCount,
"reason": m.Reason,
})
}
type machinesDetails map[string]*machineDetails
package machine
import (
"errors"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
_ "gitlab.com/gitlab-org/gitlab-runner/executors/docker" // Force to load docker executor
"gitlab.com/gitlab-org/gitlab-runner/referees"
)
const (
DockerMachineExecutorStageUseMachine common.ExecutorStage = "docker_machine_use_machine"
DockerMachineExecutorStageReleaseMachine common.ExecutorStage = "docker_machine_release_machine"
)
type machineExecutor struct {
provider *machineProvider
executor common.Executor
build *common.Build
data common.ExecutorData
config common.RunnerConfig
currentStage common.ExecutorStage
}
func (e *machineExecutor) log() (log *logrus.Entry) {
log = e.build.Log()
details, _ := e.build.ExecutorData.(*machineDetails)
if details == nil {
details, _ = e.data.(*machineDetails)
}
if details != nil {
log = log.WithFields(logrus.Fields{
"name": details.Name,
"usedcount": details.UsedCount,
"created": details.Created,
"now": time.Now(),
})
}
if e.config.Docker != nil {
log = log.WithField("docker", e.config.Docker.Host)
}
return
}
func (e *machineExecutor) Shell() *common.ShellScriptInfo {
if e.executor == nil {
return nil
}
return e.executor.Shell()
}
func (e *machineExecutor) Prepare(options common.ExecutorPrepareOptions) (err error) {
e.build = options.Build
if options.Config.Docker == nil {
options.Config.Docker = &common.DockerConfig{}
}
// Use the machine
e.SetCurrentStage(DockerMachineExecutorStageUseMachine)
e.config, e.data, err = e.provider.Use(options.Config, options.Build.ExecutorData)
if err != nil {
return err
}
options.Config.Docker.DockerCredentials = e.config.Docker.DockerCredentials
// TODO: Currently the docker-machine doesn't support multiple builds
e.build.ProjectRunnerID = 0
if details, _ := options.Build.ExecutorData.(*machineDetails); details != nil {
options.Build.Hostname = details.Name
} else if details, _ := e.data.(*machineDetails); details != nil {
options.Build.Hostname = details.Name
}
e.log().Infoln("Starting docker-machine build...")
// Create original executor
e.executor = e.provider.provider.Create()
if e.executor == nil {
return errors.New("failed to create an executor")
}
return e.executor.Prepare(options)
}
func (e *machineExecutor) Run(cmd common.ExecutorCommand) error {
if e.executor == nil {
return errors.New("missing executor")
}
return e.executor.Run(cmd)
}
func (e *machineExecutor) Finish(err error) {
if e.executor != nil {
e.executor.Finish(err)
}
e.log().Infoln("Finished docker-machine build:", err)
}
func (e *machineExecutor) Cleanup() {
// Cleanup executor if were created
if e.executor != nil {
e.executor.Cleanup()
}
// Release allocated machine
if e.data != nil {
e.SetCurrentStage(DockerMachineExecutorStageReleaseMachine)
e.provider.Release(&e.config, e.data)
e.data = nil
}
}
func (e *machineExecutor) GetCurrentStage() common.ExecutorStage {
if e.executor == nil {
return common.ExecutorStage("")
}
return e.executor.GetCurrentStage()
}
func (e *machineExecutor) SetCurrentStage(stage common.ExecutorStage) {
if e.executor == nil {
e.currentStage = stage
return
}
e.executor.SetCurrentStage(stage)
}
func (e *machineExecutor) GetMetricsSelector() string {
refereed, ok := e.executor.(referees.MetricsExecutor)
if !ok {
return ""
}
return refereed.GetMetricsSelector()
}
func init() {
common.RegisterExecutor("docker+machine", newMachineProvider("docker+machine", "docker"))
common.RegisterExecutor("docker-ssh+machine", newMachineProvider("docker-ssh+machine", "docker-ssh"))
}
package machine
import (
"crypto/rand"
"fmt"
"strings"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/dns"
)
func machineFormat(runner string, template string) string {
if runner != "" {
return "runner-" + strings.ToLower(runner) + "-" + template
}
return template
}
func machineFilter(config *common.RunnerConfig) string {
return machineFormat(dns.MakeRFC1123Compatible(config.ShortDescription()), config.Machine.MachineName)
}
func matchesMachineFilter(name, filter string) bool {
var query string
if n, _ := fmt.Sscanf(name, filter, &query); n == 1 {
return true
}
return false
}
func filterMachineList(machines []string, filter string) (newMachines []string) {
newMachines = make([]string, 0, len(machines))
for _, machine := range machines {
if matchesMachineFilter(machine, filter) {
newMachines = append(newMachines, machine)
}
}
return
}
func newMachineName(config *common.RunnerConfig) string {
r := make([]byte, 4)
rand.Read(r)
t := time.Now().Unix()
return fmt.Sprintf(machineFilter(config), fmt.Sprintf("%d-%x", t, r))
}
package machine
import (
"errors"
"fmt"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
)
type machineProvider struct {
name string
machine docker_helpers.Machine
details machinesDetails
lock sync.RWMutex
acquireLock sync.Mutex
// provider stores a real executor that is used to start run the builds
provider common.ExecutorProvider
stuckRemoveLock sync.Mutex
// metrics
totalActions *prometheus.CounterVec
currentStatesDesc *prometheus.Desc
creationHistogram prometheus.Histogram
}
func (m *machineProvider) machineDetails(name string, acquire bool) *machineDetails {
m.lock.Lock()
defer m.lock.Unlock()
details, ok := m.details[name]
if !ok {
details = &machineDetails{
Name: name,
Created: time.Now(),
Used: time.Now(),
LastSeen: time.Now(),
UsedCount: 1, // any machine that we find we mark as already used
State: machineStateIdle,
}
m.details[name] = details
}
if acquire {
if details.isUsed() {
return nil
}
details.State = machineStateAcquired
}
return details
}
func (m *machineProvider) create(config *common.RunnerConfig, state machineState) (details *machineDetails, errCh chan error) {
name := newMachineName(config)
details = m.machineDetails(name, true)
details.State = machineStateCreating
details.UsedCount = 0
details.RetryCount = 0
details.LastSeen = time.Now()
errCh = make(chan error, 1)
// Create machine asynchronously
go func() {
started := time.Now()
err := m.machine.Create(config.Machine.MachineDriver, details.Name, config.Machine.MachineOptions...)
for i := 0; i < 3 && err != nil; i++ {
details.RetryCount++
logrus.WithField("name", details.Name).
WithError(err).
Warningln("Machine creation failed, trying to provision")
time.Sleep(provisionRetryInterval)
err = m.machine.Provision(details.Name)
}
if err != nil {
logrus.WithField("name", details.Name).
WithField("time", time.Since(started)).
WithError(err).
Errorln("Machine creation failed")
m.remove(details.Name, "Failed to create")
} else {
details.State = state
details.Used = time.Now()
creationTime := time.Since(started)
logrus.WithField("duration", creationTime).
WithField("name", details.Name).
WithField("now", time.Now()).
WithField("retries", details.RetryCount).
Infoln("Machine created")
m.totalActions.WithLabelValues("created").Inc()
m.creationHistogram.Observe(creationTime.Seconds())
}
errCh <- err
}()
return
}
func (m *machineProvider) findFreeMachine(skipCache bool, machines ...string) (details *machineDetails) {
// Enumerate all machines in reverse order, to always take the newest machines first
for idx := range machines {
name := machines[len(machines)-idx-1]
details := m.machineDetails(name, true)
if details == nil {
continue
}
// Check if node is running
canConnect := m.machine.CanConnect(name, skipCache)
if !canConnect {
m.remove(name, "machine is unavailable")
continue
}
return details
}
return nil
}
func (m *machineProvider) useMachine(config *common.RunnerConfig) (details *machineDetails, err error) {
machines, err := m.loadMachines(config)
if err != nil {
return
}
details = m.findFreeMachine(true, machines...)
if details == nil {
var errCh chan error
details, errCh = m.create(config, machineStateAcquired)
err = <-errCh
}
return
}
func (m *machineProvider) retryUseMachine(config *common.RunnerConfig) (details *machineDetails, err error) {
// Try to find a machine
for i := 0; i < 3; i++ {
details, err = m.useMachine(config)
if err == nil {
break
}
time.Sleep(provisionRetryInterval)
}
return
}
func (m *machineProvider) removeMachine(details *machineDetails) (err error) {
if !m.machine.Exist(details.Name) {
details.logger().
Warningln("Skipping machine removal, because it doesn't exist")
return nil
}
// This code limits amount of removal of stuck machines to one machine per interval
if details.isStuckOnRemove() {
m.stuckRemoveLock.Lock()
defer m.stuckRemoveLock.Unlock()
}
details.logger().
Warningln("Stopping machine")
err = m.machine.Stop(details.Name, machineStopCommandTimeout)
if err != nil {
details.logger().
WithError(err).
Warningln("Error while stopping machine")
}
details.logger().
Warningln("Removing machine")
err = m.machine.Remove(details.Name)
if err != nil {
details.RetryCount++
time.Sleep(removeRetryInterval)
return err
}
return nil
}
func (m *machineProvider) finalizeRemoval(details *machineDetails) {
for {
err := m.removeMachine(details)
if err == nil {
break
}
}
m.lock.Lock()
defer m.lock.Unlock()
delete(m.details, details.Name)
details.logger().
WithField("now", time.Now()).
WithField("retries", details.RetryCount).
Infoln("Machine removed")
m.totalActions.WithLabelValues("removed").Inc()
}
func (m *machineProvider) remove(machineName string, reason ...interface{}) error {
m.lock.Lock()
defer m.lock.Unlock()
details, _ := m.details[machineName]
if details == nil {
return errors.New("machine not found")
}
details.Reason = fmt.Sprint(reason...)
details.State = machineStateRemoving
details.RetryCount = 0
details.logger().
WithField("now", time.Now()).
Warningln("Requesting machine removal")
details.Used = time.Now()
details.writeDebugInformation()
go m.finalizeRemoval(details)
return nil
}
func (m *machineProvider) updateMachine(config *common.RunnerConfig, data *machinesData, details *machineDetails) error {
if details.State != machineStateIdle {
return nil
}
if config.Machine.MaxBuilds > 0 && details.UsedCount >= config.Machine.MaxBuilds {
// Limit number of builds
return errors.New("too many builds")
}
if data.Total() >= config.Limit && config.Limit > 0 {
// Limit maximum number of machines
return errors.New("too many machines")
}
if time.Since(details.Used) > time.Second*time.Duration(config.Machine.GetIdleTime()) {
if data.Idle >= config.Machine.GetIdleCount() {
// Remove machine that are way over the idle time
return errors.New("too many idle machines")
}
}
return nil
}
func (m *machineProvider) updateMachines(machines []string, config *common.RunnerConfig) (data machinesData, validMachines []string) {
data.Runner = config.ShortDescription()
validMachines = make([]string, 0, len(machines))
for _, name := range machines {
details := m.machineDetails(name, false)
details.LastSeen = time.Now()
err := m.updateMachine(config, &data, details)
if err == nil {
validMachines = append(validMachines, name)
} else {
m.remove(details.Name, err)
}
data.Add(details)
}
return
}
func (m *machineProvider) createMachines(config *common.RunnerConfig, data *machinesData) {
// Create a new machines and mark them as Idle
for {
if data.Available() >= config.Machine.GetIdleCount() {
// Limit maximum number of idle machines
break
}
if data.Total() >= config.Limit && config.Limit > 0 {
// Limit maximum number of machines
break
}
m.create(config, machineStateIdle)
data.Creating++
}
}
// intermediateMachineList returns a list of machines that might not yet be
// persisted on disk, these machines are the ones between being virtually
// created, and `docker-machine create` getting executed we populate this data
// set to overcome the race conditions related to not-full set of machines
// returned by `docker-machine ls -q`
func (m *machineProvider) intermediateMachineList(excludedMachines []string) []string {
var excludedSet map[string]struct{}
var intermediateMachines []string
m.lock.Lock()
defer m.lock.Unlock()
for _, details := range m.details {
if details.isPersistedOnDisk() {
continue
}
// lazy init set, as most of times we don't create new machines
if excludedSet == nil {
excludedSet = make(map[string]struct{}, len(excludedMachines))
for _, excludedMachine := range excludedMachines {
excludedSet[excludedMachine] = struct{}{}
}
}
if _, ok := excludedSet[details.Name]; ok {
continue
}
intermediateMachines = append(intermediateMachines, details.Name)
}
return intermediateMachines
}
func (m *machineProvider) loadMachines(config *common.RunnerConfig) (machines []string, err error) {
machines, err = m.machine.List()
if err != nil {
return nil, err
}
machines = append(machines, m.intermediateMachineList(machines)...)
machines = filterMachineList(machines, machineFilter(config))
return
}
func (m *machineProvider) Acquire(config *common.RunnerConfig) (data common.ExecutorData, err error) {
if config.Machine == nil || config.Machine.MachineName == "" {
err = fmt.Errorf("missing Machine options")
return
}
// Lock updating machines, because two Acquires can be run at the same time
m.acquireLock.Lock()
defer m.acquireLock.Unlock()
machines, err := m.loadMachines(config)
if err != nil {
return
}
// Update a list of currently configured machines
machinesData, validMachines := m.updateMachines(machines, config)
// Pre-create machines
m.createMachines(config, &machinesData)
logrus.WithFields(machinesData.Fields()).
WithField("runner", config.ShortDescription()).
WithField("minIdleCount", config.Machine.GetIdleCount()).
WithField("maxMachines", config.Limit).
WithField("time", time.Now()).
Debugln("Docker Machine Details")
machinesData.writeDebugInformation()
// Try to find a free machine
details := m.findFreeMachine(false, validMachines...)
if details != nil {
data = details
return
}
// If we have a free machines we can process a build
if config.Machine.GetIdleCount() != 0 && machinesData.Idle == 0 {
err = errors.New("no free machines that can process builds")
}
return
}
func (m *machineProvider) Use(config *common.RunnerConfig, data common.ExecutorData) (newConfig common.RunnerConfig, newData common.ExecutorData, err error) {
// Find a new machine
details, _ := data.(*machineDetails)
if details == nil || !details.canBeUsed() || !m.machine.CanConnect(details.Name, true) {
details, err = m.retryUseMachine(config)
if err != nil {
return
}
// Return details only if this is a new instance
newData = details
}
// Get machine credentials
dc, err := m.machine.Credentials(details.Name)
if err != nil {
if newData != nil {
m.Release(config, newData)
}
newData = nil
return
}
// Create shallow copy of config and store in it docker credentials
newConfig = *config
newConfig.Docker = &common.DockerConfig{}
if config.Docker != nil {
*newConfig.Docker = *config.Docker
}
newConfig.Docker.DockerCredentials = dc
// Mark machine as used
details.State = machineStateUsed
details.Used = time.Now()
details.UsedCount++
m.totalActions.WithLabelValues("used").Inc()
return
}
func (m *machineProvider) Release(config *common.RunnerConfig, data common.ExecutorData) {
// Release machine
details, ok := data.(*machineDetails)
if ok {
// Mark last used time when is Used
if details.State == machineStateUsed {
details.Used = time.Now()
}
// Remove machine if we already used it
if config != nil && config.Machine != nil &&
config.Machine.MaxBuilds > 0 && details.UsedCount >= config.Machine.MaxBuilds {
err := m.remove(details.Name, "Too many builds")
if err == nil {
return
}
}
details.State = machineStateIdle
}
}
func (m *machineProvider) CanCreate() bool {
return m.provider.CanCreate()
}
func (m *machineProvider) GetFeatures(features *common.FeaturesInfo) error {
return m.provider.GetFeatures(features)
}
func (m *machineProvider) GetDefaultShell() string {
return m.provider.GetDefaultShell()
}
func (m *machineProvider) Create() common.Executor {
return &machineExecutor{
provider: m,
}
}
func newMachineProvider(name, executor string) *machineProvider {
provider := common.GetExecutor(executor)
if provider == nil {
logrus.Panicln("Missing", executor)
}
return &machineProvider{
name: name,
details: make(machinesDetails),
machine: docker_helpers.NewMachineCommand(),
provider: provider,
totalActions: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gitlab_runner_autoscaling_actions_total",
Help: "The total number of actions executed by the provider.",
ConstLabels: prometheus.Labels{
"executor": name,
},
},
[]string{"action"},
),
currentStatesDesc: prometheus.NewDesc(
"gitlab_runner_autoscaling_machine_states",
"The current number of machines per state in this provider.",
[]string{"state"},
prometheus.Labels{
"executor": name,
},
),
creationHistogram: prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: "gitlab_runner_autoscaling_machine_creation_duration_seconds",
Help: "Histogram of machine creation time.",
Buckets: prometheus.ExponentialBuckets(30, 1.25, 10),
ConstLabels: prometheus.Labels{
"executor": name,
},
},
),
}
}
package machine
type machineState int
const (
machineStateIdle machineState = iota
machineStateAcquired
machineStateCreating
machineStateUsed
machineStateRemoving
)
func (t machineState) String() string {
switch t {
case machineStateIdle:
return "Idle"
case machineStateAcquired:
return "Acquired"
case machineStateCreating:
return "Creating"
case machineStateUsed:
return "Used"
case machineStateRemoving:
return "Removing"
default:
return "Unknown"
}
}
func (t machineState) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
package machine
import (
"errors"
"gitlab.com/gitlab-org/gitlab-runner/session/terminal"
terminalsession "gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
func (e *machineExecutor) Connect() (terminalsession.Conn, error) {
if term, ok := e.executor.(terminal.InteractiveTerminal); ok {
return term.Connect()
}
return nil, errors.New("executor does not have terminal")
}
package docker
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/docker/docker/api/types"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
terminalsession "gitlab.com/gitlab-org/gitlab-runner/session/terminal"
"gitlab.com/gitlab-org/gitlab-terminal"
)
// buildContainerTerminalTimeout is the error used when the build container is
// not running yet an we have a terminal request waiting for the container to
// start and a certain amount of time is exceeded.
type buildContainerTerminalTimeout struct {
}
func (buildContainerTerminalTimeout) Error() string {
return "timeout for waiting for build container"
}
func (s *commandExecutor) watchForRunningBuildContainer(deadline time.Time) (string, error) {
for time.Since(deadline) < 0 {
buildContainer := s.getBuildContainer()
if buildContainer == nil {
time.Sleep(time.Second)
continue
}
containerID := buildContainer.ID
container, err := s.client.ContainerInspect(s.Context, containerID)
if err != nil {
return "", err
}
if container.State.Running {
return containerID, nil
}
}
return "", buildContainerTerminalTimeout{}
}
func (s *commandExecutor) Connect() (terminalsession.Conn, error) {
// Waiting for the container to start, is not ideal as it might be hiding a
// real issue and the user is not aware of it. Ideally, the runner should
// inform the user in an interactive way that the container has no started
// yet and should wait/try again. This isn't an easy task to do since we
// can't access the WebSocket here since that is the responsibility of
// `gitlab-terminal` package. There are plans to improve this please take a
// look at https://gitlab.com/gitlab-org/gitlab-ce/issues/50384#proposal and
// https://gitlab.com/gitlab-org/gitlab-terminal/issues/4
containerID, err := s.watchForRunningBuildContainer(time.Now().Add(waitForContainerTimeout))
if err != nil {
return nil, err
}
ctx, cancelFn := context.WithCancel(s.Context)
return terminalConn{
logger: &s.BuildLogger,
ctx: ctx,
cancelFn: cancelFn,
executor: s,
client: s.client,
containerID: containerID,
shell: s.BuildShell.DockerCommand,
}, nil
}
type terminalConn struct {
logger *common.BuildLogger
ctx context.Context
cancelFn func()
executor *commandExecutor
client docker_helpers.Client
containerID string
shell []string
}
func (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {
execConfig := types.ExecConfig{
Tty: true,
AttachStdin: true,
AttachStderr: true,
AttachStdout: true,
Cmd: t.shell,
}
exec, err := t.client.ContainerExecCreate(t.ctx, t.containerID, execConfig)
if err != nil {
t.logger.Errorln("Failed to create exec container for terminal:", err)
http.Error(w, "failed to create exec for build container", http.StatusInternalServerError)
return
}
execStartCfg := types.ExecStartCheck{Tty: true}
resp, err := t.client.ContainerExecAttach(t.ctx, exec.ID, execStartCfg)
if err != nil {
t.logger.Errorln("Failed to exec attach to container for terminal:", err)
http.Error(w, "failed to attach tty to build container", http.StatusInternalServerError)
return
}
dockerTTY := newDockerTTY(&resp)
proxy := terminal.NewStreamProxy(1) // one stopper: terminal exit handler
// wait for container to exit
go func() {
t.logger.Debugln("Waiting for the terminal container:", t.containerID)
err := t.executor.waitForContainer(t.ctx, t.containerID)
t.logger.Debugln("The terminal container:", t.containerID, "finished with:", err)
stopCh := proxy.GetStopCh()
if err != nil {
stopCh <- fmt.Errorf("build container exited with %w", err)
} else {
stopCh <- errors.New("build container exited")
}
}()
terminalsession.ProxyTerminal(
timeoutCh,
disconnectCh,
proxy.StopCh,
func() {
terminal.ProxyStream(w, r, dockerTTY, proxy)
},
)
}
func (t terminalConn) Close() error {
if t.cancelFn != nil {
t.cancelFn()
}
return nil
}
package docker
import "github.com/docker/docker/api/types"
func newDockerTTY(hijackedResp *types.HijackedResponse) *dockerTTY {
return &dockerTTY{
hijackedResp: hijackedResp,
}
}
type dockerTTY struct {
hijackedResp *types.HijackedResponse
}
func (d *dockerTTY) Read(p []byte) (int, error) {
return d.hijackedResp.Reader.Read(p)
}
func (d *dockerTTY) Write(p []byte) (int, error) {
return d.hijackedResp.Conn.Write(p)
}
func (d *dockerTTY) Close() error {
d.hijackedResp.Close()
d.hijackedResp.CloseWrite()
return nil
}
package docker
import (
"context"
"github.com/docker/docker/api/types/container"
"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes"
docker_helpers "gitlab.com/gitlab-org/gitlab-runner/helpers/docker"
)
type volumesManagerAdapter struct {
docker_helpers.Client
e *executor
}
func (a *volumesManagerAdapter) LabelContainer(container *container.Config, containerType string, otherLabels ...string) {
container.Labels = a.e.getLabels(containerType, otherLabels...)
}
func (a *volumesManagerAdapter) WaitForContainer(id string) error {
return a.e.waitForContainer(a.e.Context, id)
}
func (a *volumesManagerAdapter) RemoveContainer(ctx context.Context, id string) error {
return a.e.removeContainer(ctx, id)
}
var createVolumesManager = func(e *executor) (volumes.Manager, error) {
adapter := &volumesManagerAdapter{
Client: e.client,
e: e,
}
helperImage, err := e.getPrebuiltImage()
if err != nil {
return nil, err
}
ccManager := volumes.NewCacheContainerManager(
e.Context,
&e.BuildLogger,
adapter,
helperImage,
)
config := volumes.ManagerConfig{
CacheDir: e.Config.Docker.CacheDir,
BaseContainerPath: e.Build.FullProjectDir(),
UniqueName: e.Build.ProjectUniqueName(),
DisableCache: e.Config.Docker.DisableCache,
}
volumesManager := volumes.NewManager(&e.BuildLogger, e.volumeParser, ccManager, config)
return volumesManager, nil
}
func (e *executor) createVolumesManager() error {
vm, err := createVolumesManager(e)
if err != nil {
return err
}
e.volumesManager = vm
return nil
}
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file was modified by James Munnelly (https://gitlab.com/u/munnerz)
*/
package kubernetes
import (
"fmt"
"io"
"net/url"
"github.com/sirupsen/logrus"
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
// RemoteExecutor defines the interface accepted by the Exec command - provided for test stubbing
type RemoteExecutor interface {
Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error
}
// DefaultRemoteExecutor is the standard implementation of remote command execution
type DefaultRemoteExecutor struct{}
func (*DefaultRemoteExecutor) Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
if err != nil {
return err
}
return exec.Stream(remotecommand.StreamOptions{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
Tty: tty,
})
}
// ExecOptions declare the arguments accepted by the Exec command
type ExecOptions struct {
Namespace string
PodName string
ContainerName string
Stdin bool
Command []string
In io.Reader
Out io.Writer
Err io.Writer
Executor RemoteExecutor
Client *kubernetes.Clientset
Config *restclient.Config
}
// Run executes a validated remote execution against a pod.
func (p *ExecOptions) Run() error {
pod, err := p.Client.CoreV1().Pods(p.Namespace).Get(p.PodName, metav1.GetOptions{})
if err != nil {
return err
}
if pod.Status.Phase != api.PodRunning {
return fmt.Errorf("Pod '%s' (on namespace '%s') is not running and cannot execute commands; current phase is '%s'",
p.PodName, p.Namespace, pod.Status.Phase)
}
containerName := p.ContainerName
if len(containerName) == 0 {
logrus.Infof("defaulting container name to '%s'", pod.Spec.Containers[0].Name)
containerName = pod.Spec.Containers[0].Name
}
// TODO: refactor with terminal helpers from the edit utility once that is merged
var stdin io.Reader
if p.Stdin {
stdin = p.In
}
// TODO: consider abstracting into a client invocation or client helper
req := p.Client.CoreV1().RESTClient().Post().
Resource("pods").
Name(pod.Name).
Namespace(pod.Namespace).
SubResource("exec").
Param("container", containerName)
req.VersionedParams(&api.PodExecOptions{
Container: containerName,
Command: p.Command,
Stdin: stdin != nil,
Stdout: p.Out != nil,
Stderr: p.Err != nil,
}, scheme.ParameterCodec)
return p.Executor.Execute("POST", req.URL(), p.Config, stdin, p.Out, p.Err, false)
}
func init() {
runtime.ErrorHandlers = append(runtime.ErrorHandlers, func(err error) {
logrus.WithError(err).Error("K8S stream error")
})
runtime.PanicHandlers = append(runtime.PanicHandlers, func(r interface{}) {
logrus.Errorf("K8S stream panic: %v", r)
})
}
package kubernetes
import (
"encoding/json"
"errors"
"fmt"
"strings"
"sync"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services"
"golang.org/x/net/context"
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" // Register all available authentication methods
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage"
"gitlab.com/gitlab-org/gitlab-runner/helpers/dns"
"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
)
const (
buildContainerName = "build"
helperContainerName = "helper"
)
var (
executorOptions = executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: true,
DefaultBuildsDir: "/builds",
DefaultCacheDir: "/cache",
SharedBuildsDir: false,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.NormalShell,
RunnerCommand: "/usr/bin/gitlab-runner-helper",
},
ShowHostname: true,
}
)
type kubernetesOptions struct {
Image common.Image
Services common.Services
}
type executor struct {
executors.AbstractExecutor
kubeClient *kubernetes.Clientset
pod *api.Pod
credentials *api.Secret
options *kubernetesOptions
services []api.Service
configurationOverwrites *overwrites
buildLimits api.ResourceList
serviceLimits api.ResourceList
helperLimits api.ResourceList
buildRequests api.ResourceList
serviceRequests api.ResourceList
helperRequests api.ResourceList
pullPolicy common.KubernetesPullPolicy
helperImageInfo helperimage.Info
featureChecker featureChecker
}
type serviceDeleteResponse struct {
serviceName string
err error
}
type serviceCreateResponse struct {
service *api.Service
err error
}
func (s *executor) setupResources() error {
var err error
// Limit
if s.buildLimits, err = limits(s.Config.Kubernetes.CPULimit, s.Config.Kubernetes.MemoryLimit); err != nil {
return fmt.Errorf("invalid build limits specified: %w", err)
}
if s.serviceLimits, err = limits(s.Config.Kubernetes.ServiceCPULimit, s.Config.Kubernetes.ServiceMemoryLimit); err != nil {
return fmt.Errorf("invalid service limits specified: %w", err)
}
if s.helperLimits, err = limits(s.Config.Kubernetes.HelperCPULimit, s.Config.Kubernetes.HelperMemoryLimit); err != nil {
return fmt.Errorf("invalid helper limits specified: %w", err)
}
// Requests
if s.buildRequests, err = limits(s.Config.Kubernetes.CPURequest, s.Config.Kubernetes.MemoryRequest); err != nil {
return fmt.Errorf("invalid build requests specified: %w", err)
}
if s.serviceRequests, err = limits(s.Config.Kubernetes.ServiceCPURequest, s.Config.Kubernetes.ServiceMemoryRequest); err != nil {
return fmt.Errorf("invalid service requests specified: %w", err)
}
if s.helperRequests, err = limits(s.Config.Kubernetes.HelperCPURequest, s.Config.Kubernetes.HelperMemoryRequest); err != nil {
return fmt.Errorf("invalid helper requests specified: %w", err)
}
return nil
}
func (s *executor) Prepare(options common.ExecutorPrepareOptions) (err error) {
if err = s.AbstractExecutor.Prepare(options); err != nil {
return err
}
if s.BuildShell.PassFile {
return fmt.Errorf("kubernetes doesn't support shells that require script file")
}
if err = s.setupResources(); err != nil {
return err
}
if s.pullPolicy, err = s.Config.Kubernetes.PullPolicy.Get(); err != nil {
return err
}
if err = s.prepareOverwrites(options.Build.Variables); err != nil {
return err
}
s.prepareOptions(options.Build)
if err = s.checkDefaults(); err != nil {
return err
}
if s.kubeClient, err = getKubeClient(options.Config.Kubernetes, s.configurationOverwrites); err != nil {
return fmt.Errorf("error connecting to Kubernetes: %w", err)
}
s.featureChecker = &kubeClientFeatureChecker{kubeClient: s.kubeClient}
s.Println("Using Kubernetes executor with image", s.options.Image.Name, "...")
return nil
}
func (s *executor) Run(cmd common.ExecutorCommand) error {
s.Debugln("Starting Kubernetes command...")
if s.pod == nil {
err := s.setupCredentials()
if err != nil {
return err
}
err = s.setupBuildPod()
if err != nil {
return err
}
}
containerName := buildContainerName
containerCommand := s.BuildShell.DockerCommand
if cmd.Predefined {
containerName = helperContainerName
containerCommand = s.helperImageInfo.Cmd
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s.Debugln(fmt.Sprintf(
"Starting in container %q the command %q with script: %s",
containerName,
containerCommand,
cmd.Script,
))
select {
case err := <-s.runInContainer(ctx, containerName, containerCommand, cmd.Script):
s.Debugln(fmt.Sprintf("Container %q exited with error: %v", containerName, err))
if err != nil && strings.Contains(err.Error(), "command terminated with exit code") {
return &common.BuildError{Inner: err}
}
return err
case <-cmd.Context.Done():
return fmt.Errorf("build aborted")
}
}
func (s *executor) Cleanup() {
if s.pod != nil {
err := s.kubeClient.CoreV1().Pods(s.pod.Namespace).Delete(s.pod.Name, &metav1.DeleteOptions{})
if err != nil {
s.Errorln(fmt.Sprintf("Error cleaning up pod: %s", err.Error()))
}
}
if s.credentials != nil {
err := s.kubeClient.CoreV1().Secrets(s.configurationOverwrites.namespace).Delete(s.credentials.Name, &metav1.DeleteOptions{})
if err != nil {
s.Errorln(fmt.Sprintf("Error cleaning up secrets: %s", err.Error()))
}
}
ch := make(chan serviceDeleteResponse)
var wg sync.WaitGroup
wg.Add(len(s.services))
for _, service := range s.services {
go s.deleteKubernetesService(service.ObjectMeta.Name, ch, &wg)
}
go func() {
wg.Wait()
close(ch)
}()
for res := range ch {
if res.err != nil {
s.Errorln(fmt.Sprintf("Error cleaning up the pod service %q: %v", res.serviceName, res.err))
}
}
closeKubeClient(s.kubeClient)
s.AbstractExecutor.Cleanup()
}
func (s *executor) deleteKubernetesService(serviceName string, ch chan<- serviceDeleteResponse, wg *sync.WaitGroup) {
defer wg.Done()
err := s.kubeClient.CoreV1().Services(s.configurationOverwrites.namespace).Delete(serviceName, &metav1.DeleteOptions{})
ch <- serviceDeleteResponse{serviceName: serviceName, err: err}
}
func (s *executor) buildContainer(name, image string, imageDefinition common.Image, requests, limits api.ResourceList, containerCommand ...string) api.Container {
privileged := false
containerPorts := make([]api.ContainerPort, len(imageDefinition.Ports))
proxyPorts := make([]proxy.Port, len(imageDefinition.Ports))
for i, port := range imageDefinition.Ports {
proxyPorts[i] = proxy.Port{Name: port.Name, Number: port.Number, Protocol: port.Protocol}
containerPorts[i] = api.ContainerPort{ContainerPort: int32(port.Number)}
}
if len(proxyPorts) > 0 {
serviceName := imageDefinition.Alias
if serviceName == "" {
serviceName = name
if name != buildContainerName {
serviceName = fmt.Sprintf("proxy-%s", name)
}
}
s.ProxyPool[serviceName] = s.newProxy(serviceName, proxyPorts)
}
if s.Config.Kubernetes != nil {
privileged = s.Config.Kubernetes.Privileged
}
command, args := s.getCommandAndArgs(imageDefinition, containerCommand...)
return api.Container{
Name: name,
Image: image,
ImagePullPolicy: api.PullPolicy(s.pullPolicy),
Command: command,
Args: args,
Env: buildVariables(s.Build.GetAllVariables().PublicOrInternal()),
Resources: api.ResourceRequirements{
Limits: limits,
Requests: requests,
},
Ports: containerPorts,
VolumeMounts: s.getVolumeMounts(),
SecurityContext: &api.SecurityContext{
Privileged: &privileged,
},
Stdin: true,
}
}
func (s *executor) getCommandAndArgs(imageDefinition common.Image, command ...string) ([]string, []string) {
if len(command) == 0 && len(imageDefinition.Entrypoint) > 0 {
command = imageDefinition.Entrypoint
}
var args []string
if len(imageDefinition.Command) > 0 {
args = imageDefinition.Command
}
return command, args
}
func (s *executor) getVolumeMounts() (mounts []api.VolumeMount) {
mounts = append(mounts, api.VolumeMount{
Name: "repo",
MountPath: s.Build.RootDir,
})
for _, mount := range s.Config.Kubernetes.Volumes.HostPaths {
mounts = append(mounts, api.VolumeMount{
Name: mount.Name,
MountPath: mount.MountPath,
ReadOnly: mount.ReadOnly,
})
}
for _, mount := range s.Config.Kubernetes.Volumes.Secrets {
mounts = append(mounts, api.VolumeMount{
Name: mount.Name,
MountPath: mount.MountPath,
ReadOnly: mount.ReadOnly,
})
}
for _, mount := range s.Config.Kubernetes.Volumes.PVCs {
mounts = append(mounts, api.VolumeMount{
Name: mount.Name,
MountPath: mount.MountPath,
ReadOnly: mount.ReadOnly,
})
}
for _, mount := range s.Config.Kubernetes.Volumes.ConfigMaps {
mounts = append(mounts, api.VolumeMount{
Name: mount.Name,
MountPath: mount.MountPath,
ReadOnly: mount.ReadOnly,
})
}
for _, mount := range s.Config.Kubernetes.Volumes.EmptyDirs {
mounts = append(mounts, api.VolumeMount{
Name: mount.Name,
MountPath: mount.MountPath,
})
}
return
}
func (s *executor) getVolumes() (volumes []api.Volume) {
volumes = append(volumes, api.Volume{
Name: "repo",
VolumeSource: api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
},
})
for _, volume := range s.Config.Kubernetes.Volumes.HostPaths {
path := volume.HostPath
// Make backward compatible with syntax introduced in version 9.3.0
if path == "" {
path = volume.MountPath
}
volumes = append(volumes, api.Volume{
Name: volume.Name,
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: path,
},
},
})
}
for _, volume := range s.Config.Kubernetes.Volumes.Secrets {
items := []api.KeyToPath{}
for key, path := range volume.Items {
items = append(items, api.KeyToPath{Key: key, Path: path})
}
volumes = append(volumes, api.Volume{
Name: volume.Name,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: volume.Name,
Items: items,
},
},
})
}
for _, volume := range s.Config.Kubernetes.Volumes.PVCs {
volumes = append(volumes, api.Volume{
Name: volume.Name,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: volume.Name,
ReadOnly: volume.ReadOnly,
},
},
})
}
for _, volume := range s.Config.Kubernetes.Volumes.ConfigMaps {
items := []api.KeyToPath{}
for key, path := range volume.Items {
items = append(items, api.KeyToPath{Key: key, Path: path})
}
volumes = append(volumes, api.Volume{
Name: volume.Name,
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{
Name: volume.Name,
},
Items: items,
},
},
})
}
for _, volume := range s.Config.Kubernetes.Volumes.EmptyDirs {
volumes = append(volumes, api.Volume{
Name: volume.Name,
VolumeSource: api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{
Medium: api.StorageMedium(volume.Medium),
},
},
})
}
return
}
type dockerConfigEntry struct {
Username, Password string
}
func (s *executor) projectUniqueName() string {
return dns.MakeRFC1123Compatible(s.Build.ProjectUniqueName())
}
func (s *executor) setupCredentials() error {
authConfigs := make(map[string]dockerConfigEntry)
for _, credentials := range s.Build.Credentials {
if credentials.Type != "registry" {
continue
}
authConfigs[credentials.URL] = dockerConfigEntry{
Username: credentials.Username,
Password: credentials.Password,
}
}
if len(authConfigs) == 0 {
return nil
}
dockerCfgContent, err := json.Marshal(authConfigs)
if err != nil {
return err
}
secret := api.Secret{}
secret.GenerateName = s.projectUniqueName()
secret.Namespace = s.configurationOverwrites.namespace
secret.Type = api.SecretTypeDockercfg
secret.Data = map[string][]byte{}
secret.Data[api.DockerConfigKey] = dockerCfgContent
s.credentials, err = s.kubeClient.CoreV1().Secrets(s.configurationOverwrites.namespace).Create(&secret)
if err != nil {
return err
}
return nil
}
type invalidHostAliasDNSError struct {
service common.Image
inner error
}
func (e *invalidHostAliasDNSError) Error() string {
return fmt.Sprintf(
"provided host alias %s for service %s is invalid DNS. %s",
e.service.Alias,
e.service.Name,
e.inner,
)
}
func (e *invalidHostAliasDNSError) Is(err error) bool {
_, ok := err.(*invalidHostAliasDNSError)
return ok
}
func (s *executor) prepareHostAlias() (*api.HostAlias, error) {
supportsHostAliases, err := s.featureChecker.IsHostAliasSupported()
if errors.Is(err, &badVersionError{}) {
s.Warningln("Checking for host alias support. Host aliases will be disabled.", err)
return nil, nil
} else if err != nil {
return nil, err
} else if !supportsHostAliases {
return nil, nil
}
return s.createHostAlias()
}
func (s *executor) createHostAlias() (*api.HostAlias, error) {
servicesHostAlias := api.HostAlias{IP: "127.0.0.1"}
for _, service := range s.options.Services {
// Services with ports are coming from .gitlab-webide.yml
// they are used for ports mapping and their aliases are in no way validated
// so we ignore them. Check out https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/1170
// for details
if len(service.Ports) > 0 {
continue
}
serviceMeta := services.SplitNameAndVersion(service.Name)
for _, alias := range serviceMeta.Aliases {
// For backward compatibility reasons a non DNS1123 compliant alias might be generated,
// this will be removed in https://gitlab.com/gitlab-org/gitlab-runner/issues/6100
err := dns.ValidateDNS1123Subdomain(alias)
if err == nil {
servicesHostAlias.Hostnames = append(servicesHostAlias.Hostnames, alias)
}
}
if service.Alias == "" {
continue
}
err := dns.ValidateDNS1123Subdomain(service.Alias)
if err != nil {
return nil, &invalidHostAliasDNSError{service: service, inner: err}
}
servicesHostAlias.Hostnames = append(servicesHostAlias.Hostnames, service.Alias)
}
return &servicesHostAlias, nil
}
func (s *executor) setupBuildPod() error {
services := make([]api.Container, len(s.options.Services))
for i, service := range s.options.Services {
resolvedImage := s.Build.GetAllVariables().ExpandValue(service.Name)
services[i] = s.buildContainer(fmt.Sprintf("svc-%d", i), resolvedImage, service, s.serviceRequests, s.serviceLimits)
}
// We set a default label to the pod. This label will be used later
// by the services, to link each service to the pod
labels := map[string]string{"pod": s.projectUniqueName()}
for k, v := range s.Build.Runner.Kubernetes.PodLabels {
labels[k] = s.Build.Variables.ExpandValue(v)
}
annotations := make(map[string]string)
for key, val := range s.configurationOverwrites.podAnnotations {
annotations[key] = s.Build.Variables.ExpandValue(val)
}
var imagePullSecrets []api.LocalObjectReference
for _, imagePullSecret := range s.Config.Kubernetes.ImagePullSecrets {
imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: imagePullSecret})
}
if s.credentials != nil {
imagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s.credentials.Name})
}
hostAlias, err := s.prepareHostAlias()
if err != nil {
return err
}
podConfig := s.preparePodConfig(labels, annotations, services, imagePullSecrets, hostAlias)
pod, err := s.kubeClient.CoreV1().Pods(s.configurationOverwrites.namespace).Create(&podConfig)
if err != nil {
return err
}
s.pod = pod
s.services, err = s.makePodProxyServices()
if err != nil {
return err
}
return nil
}
func (s *executor) preparePodConfig(labels, annotations map[string]string, services []api.Container, imagePullSecrets []api.LocalObjectReference, hostAlias *api.HostAlias) api.Pod {
buildImage := s.Build.GetAllVariables().ExpandValue(s.options.Image.Name)
pod := api.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: s.projectUniqueName(),
Namespace: s.configurationOverwrites.namespace,
Labels: labels,
Annotations: annotations,
},
Spec: api.PodSpec{
Volumes: s.getVolumes(),
ServiceAccountName: s.configurationOverwrites.serviceAccount,
RestartPolicy: api.RestartPolicyNever,
NodeSelector: s.Config.Kubernetes.NodeSelector,
Tolerations: s.Config.Kubernetes.GetNodeTolerations(),
Containers: append([]api.Container{
// TODO use the build and helper template here
s.buildContainer(buildContainerName, buildImage, s.options.Image, s.buildRequests, s.buildLimits, s.BuildShell.DockerCommand...),
s.buildContainer(helperContainerName, s.getHelperImage(), common.Image{}, s.helperRequests, s.helperLimits, s.BuildShell.DockerCommand...),
}, services...),
TerminationGracePeriodSeconds: &s.Config.Kubernetes.TerminationGracePeriodSeconds,
ImagePullSecrets: imagePullSecrets,
SecurityContext: s.Config.Kubernetes.GetPodSecurityContext(),
},
}
if hostAlias != nil {
pod.Spec.HostAliases = []api.HostAlias{*hostAlias}
}
return pod
}
func (s *executor) getHelperImage() string {
if len(s.Config.Kubernetes.HelperImage) > 0 {
return common.AppVersion.Variables().ExpandValue(s.Config.Kubernetes.HelperImage)
}
return s.helperImageInfo.String()
}
func (s *executor) makePodProxyServices() ([]api.Service, error) {
ch := make(chan serviceCreateResponse)
var wg sync.WaitGroup
wg.Add(len(s.ProxyPool))
for serviceName, serviceProxy := range s.ProxyPool {
serviceName := dns.MakeRFC1123Compatible(serviceName)
servicePorts := make([]api.ServicePort, len(serviceProxy.Settings.Ports))
for i, port := range serviceProxy.Settings.Ports {
// When there is more than one port Kubernetes requires a port name
portName := fmt.Sprintf("%s-%d", serviceName, port.Number)
servicePorts[i] = api.ServicePort{Port: int32(port.Number), TargetPort: intstr.FromInt(port.Number), Name: portName}
}
serviceConfig := s.prepareServiceConfig(serviceName, servicePorts)
go s.createKubernetesService(&serviceConfig, serviceProxy.Settings, ch, &wg)
}
go func() {
wg.Wait()
close(ch)
}()
var services []api.Service
for res := range ch {
if res.err != nil {
err := fmt.Errorf("error creating the proxy service %q: %w", res.service.Name, res.err)
s.Errorln(err)
return []api.Service{}, err
}
services = append(services, *res.service)
}
return services, nil
}
func (s *executor) prepareServiceConfig(name string, ports []api.ServicePort) api.Service {
return api.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: name,
Namespace: s.configurationOverwrites.namespace,
},
Spec: api.ServiceSpec{
Ports: ports,
Selector: map[string]string{"pod": s.projectUniqueName()},
Type: api.ServiceTypeClusterIP,
},
}
}
func (s *executor) createKubernetesService(service *api.Service, proxySettings *proxy.Settings, ch chan<- serviceCreateResponse, wg *sync.WaitGroup) {
defer wg.Done()
service, err := s.kubeClient.CoreV1().Services(s.pod.Namespace).Create(service)
if err == nil {
// Updating the internal service name reference and activating the proxy
proxySettings.ServiceName = service.Name
}
ch <- serviceCreateResponse{service: service, err: err}
}
func (s *executor) runInContainer(ctx context.Context, name string, command []string, script string) <-chan error {
errc := make(chan error, 1)
go func() {
defer close(errc)
status, err := waitForPodRunning(ctx, s.kubeClient, s.pod, s.Trace, s.Config.Kubernetes)
if err != nil {
errc <- err
return
}
if status != api.PodRunning {
errc <- fmt.Errorf("pod failed to enter running state: %s", status)
return
}
config, err := getKubeClientConfig(s.Config.Kubernetes, s.configurationOverwrites)
if err != nil {
errc <- err
return
}
exec := ExecOptions{
PodName: s.pod.Name,
Namespace: s.pod.Namespace,
ContainerName: name,
Command: command,
In: strings.NewReader(script),
Out: s.Trace,
Err: s.Trace,
Stdin: true,
Config: config,
Client: s.kubeClient,
Executor: &DefaultRemoteExecutor{},
}
errc <- exec.Run()
}()
return errc
}
func (s *executor) prepareOverwrites(variables common.JobVariables) error {
values, err := createOverwrites(s.Config.Kubernetes, variables, s.BuildLogger)
if err != nil {
return err
}
s.configurationOverwrites = values
return nil
}
func (s *executor) prepareOptions(build *common.Build) {
s.options = &kubernetesOptions{}
s.options.Image = build.Image
s.getServices(build)
}
func (s *executor) getServices(build *common.Build) {
for _, service := range s.Config.Kubernetes.Services {
if service.Name == "" {
continue
}
s.options.Services = append(s.options.Services, common.Image{Name: service.Name})
}
for _, service := range build.Services {
if service.Name == "" {
continue
}
s.options.Services = append(s.options.Services, service)
}
}
// checkDefaults Defines the configuration for the Pod on Kubernetes
func (s *executor) checkDefaults() error {
if s.options.Image.Name == "" {
if s.Config.Kubernetes.Image == "" {
return fmt.Errorf("no image specified and no default set in config")
}
s.options.Image = common.Image{
Name: s.Config.Kubernetes.Image,
}
}
if s.configurationOverwrites.namespace == "" {
s.Warningln("Namespace is empty, therefore assuming 'default'.")
s.configurationOverwrites.namespace = "default"
}
s.Println("Using Kubernetes namespace:", s.configurationOverwrites.namespace)
return nil
}
func createFn() common.Executor {
helperImageInfo, err := helperimage.Get(common.REVISION, helperimage.Config{
OSType: helperimage.OSTypeLinux,
Architecture: "amd64",
})
if err != nil {
logrus.WithError(err).Fatal("Failed to set up helper image for kubernetes executor")
}
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: executorOptions,
},
helperImageInfo: helperImageInfo,
}
}
func featuresFn(features *common.FeaturesInfo) {
features.Variables = true
features.Image = true
features.Services = true
features.Artifacts = true
features.Cache = true
features.Session = true
features.Terminal = true
features.Proxy = true
}
func init() {
common.RegisterExecutor("kubernetes", executors.DefaultExecutorProvider{
Creator: createFn,
FeaturesUpdater: featuresFn,
DefaultShellName: executorOptions.Shell.Shell,
})
}
package kubernetes
import (
"fmt"
"strings"
"unicode"
"github.com/hashicorp/go-version"
"k8s.io/client-go/kubernetes"
)
type featureChecker interface {
IsHostAliasSupported() (bool, error)
}
type kubeClientFeatureChecker struct {
kubeClient *kubernetes.Clientset
}
// https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
var minimumHostAliasesVersionRequired, _ = version.NewVersion("1.7")
type badVersionError struct {
major string
minor string
inner error
}
func (s *badVersionError) Error() string {
return fmt.Sprintf("parsing Kubernetes version %s.%s - %s", s.major, s.minor, s.inner)
}
func (s *badVersionError) Is(err error) bool {
_, ok := err.(*badVersionError)
return ok
}
func (c *kubeClientFeatureChecker) IsHostAliasSupported() (bool, error) {
verInfo, err := c.kubeClient.ServerVersion()
if err != nil {
return false, err
}
major := cleanVersion(verInfo.Major)
minor := cleanVersion(verInfo.Minor)
ver, err := version.NewVersion(fmt.Sprintf("%s.%s", major, minor))
if err != nil {
// Use the original major and minor parts of the version so we can better see in the logs
// what came straight from kubernetes. The inner error from version.NewVersion will tell us
// what version we actually tried to parse
return false, &badVersionError{
major: verInfo.Major,
minor: verInfo.Minor,
inner: err,
}
}
supportsHostAliases := ver.GreaterThan(minimumHostAliasesVersionRequired) ||
ver.Equal(minimumHostAliasesVersionRequired)
return supportsHostAliases, nil
}
// Sometimes kubernetes returns a version which aren't valid semver versions
// or invalid enough that the version package can't parse them e.g. GCP returns 1.14+
func cleanVersion(version string) string {
// Try to find the index of the first symbol that isn't a digit
// use all the digits before that symbol as the version
nonDigitIndex := strings.IndexFunc(version, func(r rune) bool {
return !unicode.IsDigit(r)
})
if nonDigitIndex == -1 {
return version
}
return version[:nonDigitIndex]
}
package kubernetes
import (
"fmt"
"regexp"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
const (
// NamespaceOverwriteVariableName is the key for the JobVariable containing user overwritten Namespace
NamespaceOverwriteVariableName = "KUBERNETES_NAMESPACE_OVERWRITE"
// ServiceAccountOverwriteVariableName is the key for the JobVariable containing user overwritten ServiceAccount
ServiceAccountOverwriteVariableName = "KUBERNETES_SERVICE_ACCOUNT_OVERWRITE"
// BearerTokenOverwriteVariableValue is the key for the JobVariable containing user overwritten BearerToken
BearerTokenOverwriteVariableValue = "KUBERNETES_BEARER_TOKEN"
// PodAnnotationsOverwriteVariablePrefix is the prefix for all the JobVariable keys containing user overwritten PodAnnotations
PodAnnotationsOverwriteVariablePrefix = "KUBERNETES_POD_ANNOTATIONS_"
)
type overwrites struct {
namespace string
serviceAccount string
bearerToken string
podAnnotations map[string]string
}
func createOverwrites(config *common.KubernetesConfig, variables common.JobVariables, logger common.BuildLogger) (*overwrites, error) {
var err error
o := &overwrites{}
namespaceOverwrite := variables.Expand().Get(NamespaceOverwriteVariableName)
o.namespace, err = o.evaluateOverwrite("Namespace", config.Namespace, config.NamespaceOverwriteAllowed, namespaceOverwrite, logger)
if err != nil {
return nil, err
}
serviceAccountOverwrite := variables.Expand().Get(ServiceAccountOverwriteVariableName)
o.serviceAccount, err = o.evaluateOverwrite("ServiceAccount", config.ServiceAccount, config.ServiceAccountOverwriteAllowed, serviceAccountOverwrite, logger)
if err != nil {
return nil, err
}
bearerTokenOverwrite := variables.Expand().Get(BearerTokenOverwriteVariableValue)
o.bearerToken, err = o.evaluateBoolControlledOverwrite("BearerToken", config.BearerToken, config.BearerTokenOverwriteAllowed, bearerTokenOverwrite, logger)
if err != nil {
return nil, err
}
o.podAnnotations, err = o.evaluateMapOverwrite("PodAnnotations", config.PodAnnotations, config.PodAnnotationsOverwriteAllowed, variables, PodAnnotationsOverwriteVariablePrefix, logger)
if err != nil {
return nil, err
}
return o, nil
}
func (o *overwrites) evaluateBoolControlledOverwrite(fieldName, value string, canOverride bool, overwriteValue string, logger common.BuildLogger) (string, error) {
if canOverride {
return o.evaluateOverwrite(fieldName, value, ".+", overwriteValue, logger)
}
return o.evaluateOverwrite(fieldName, value, "", overwriteValue, logger)
}
func (o *overwrites) evaluateOverwrite(fieldName, value, regex, overwriteValue string, logger common.BuildLogger) (string, error) {
if regex == "" {
logger.Debugln("Regex allowing overrides for", fieldName, "is empty, disabling override.")
return value, nil
}
if overwriteValue == "" {
return value, nil
}
if err := overwriteRegexCheck(regex, overwriteValue); err != nil {
return value, err
}
logValue := overwriteValue
if fieldName == "BearerToken" {
logValue = "XXXXXXXX..."
}
logger.Println(fmt.Sprintf("%q overwritten with %q", fieldName, logValue))
return overwriteValue, nil
}
func overwriteRegexCheck(regex, value string) error {
var err error
var r *regexp.Regexp
if r, err = regexp.Compile(regex); err != nil {
return err
}
if match := r.MatchString(value); !match {
return fmt.Errorf("Provided value %q does not match regex %q", value, regex)
}
return nil
}
// splitMapOverwrite splits provided string on the first "=" and returns (key, value, nil).
// If the argument cannot be split an error is returned
func splitMapOverwrite(str string) (string, string, error) {
if split := strings.SplitN(str, "=", 2); len(split) > 1 {
return split[0], split[1], nil
}
return "", "", fmt.Errorf("Provided value %q is malformed, does not match k=v", str)
}
func (o *overwrites) evaluateMapOverwrite(fieldName string, values map[string]string, regex string, variables common.JobVariables, variablesSelector string, logger common.BuildLogger) (map[string]string, error) {
if regex == "" {
logger.Debugln("Regex allowing overrides for", fieldName, "is empty, disabling override.")
return values, nil
}
finalValues := make(map[string]string)
for k, v := range values {
finalValues[k] = v
}
for _, variable := range variables {
if strings.HasPrefix(variable.Key, variablesSelector) {
if err := overwriteRegexCheck(regex, variable.Value); err != nil {
return nil, err
}
key, value, err := splitMapOverwrite(variable.Value)
if err != nil {
return nil, err
}
finalValues[key] = value
logger.Println(fmt.Sprintf("%q %q overwritten with %q", fieldName, key, value))
}
}
return finalValues, nil
}
package kubernetes
import (
"fmt"
"io"
"net/http"
"strconv"
"github.com/gorilla/websocket"
"github.com/sirupsen/logrus"
terminal "gitlab.com/gitlab-org/gitlab-terminal"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8net "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/rest"
"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
)
const runningState = "Running"
func (s *executor) Pool() proxy.Pool {
return s.ProxyPool
}
func (s *executor) newProxy(serviceName string, ports []proxy.Port) *proxy.Proxy {
return &proxy.Proxy{
Settings: proxy.NewProxySettings(serviceName, ports),
ConnectionHandler: s,
}
}
func (s *executor) ProxyRequest(w http.ResponseWriter, r *http.Request, requestedURI string, port string, settings *proxy.Settings) {
logger := logrus.WithFields(logrus.Fields{
"uri": r.RequestURI,
"method": r.Method,
"port": port,
"settings": settings,
})
portSettings, err := settings.PortByNameOrNumber(port)
if err != nil {
logger.WithError(err).Errorf("port proxy %q not found", port)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
if !s.servicesRunning() {
logger.Errorf("services are not ready yet")
http.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
return
}
if websocket.IsWebSocketUpgrade(r) {
proxyWSRequest(s, w, r, requestedURI, portSettings, settings, logger)
return
}
proxyHTTPRequest(s, w, r, requestedURI, portSettings, settings, logger)
}
func (s *executor) servicesRunning() bool {
pod, err := s.kubeClient.CoreV1().Pods(s.pod.Namespace).Get(s.pod.Name, metav1.GetOptions{})
if err != nil || pod.Status.Phase != runningState {
return false
}
for _, container := range pod.Status.ContainerStatuses {
if !container.Ready {
return false
}
}
return true
}
func (s *executor) serviceEndpointRequest(verb, serviceName, requestedURI string, port proxy.Port) (*rest.Request, error) {
scheme, err := port.Scheme()
if err != nil {
return nil, err
}
result := s.kubeClient.CoreV1().RESTClient().Verb(verb).
Namespace(s.pod.Namespace).
Resource("services").
SubResource("proxy").
Name(k8net.JoinSchemeNamePort(scheme, serviceName, strconv.Itoa(port.Number))).
Suffix(requestedURI)
return result, nil
}
func proxyWSRequest(s *executor, w http.ResponseWriter, r *http.Request, requestedURI string, port proxy.Port, proxySettings *proxy.Settings, logger *logrus.Entry) {
// In order to avoid calling this method, and use one of its own,
// we should refactor the library "gitlab.com/gitlab-org/gitlab-terminal"
// and make it more generic, not so terminal focused, with a broader
// terminology. (https://gitlab.com/gitlab-org/gitlab-runner/issues/4059)
settings, err := s.getTerminalSettings()
if err != nil {
logger.WithError(err).Errorf("service proxy: error getting WS settings")
http.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
return
}
req, err := s.serviceEndpointRequest(r.Method, proxySettings.ServiceName, requestedURI, port)
if err != nil {
logger.WithError(err).Errorf("service proxy: error proxying WS request")
http.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
return
}
u := req.URL()
u.Scheme = proxy.WebsocketProtocolFor(u.Scheme)
settings.Url = u.String()
serviceProxy := terminal.NewWebSocketProxy(1)
terminal.ProxyWebSocket(w, r, settings, serviceProxy)
}
func proxyHTTPRequest(s *executor, w http.ResponseWriter, r *http.Request, requestedURI string, port proxy.Port, proxy *proxy.Settings, logger *logrus.Entry) {
req, err := s.serviceEndpointRequest(r.Method, proxy.ServiceName, requestedURI, port)
if err != nil {
logger.WithError(err).Errorf("service proxy: error proxying HTTP request")
http.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
return
}
body, err := req.Stream()
if err != nil {
message, code := handleProxyHTTPErr(err, logger)
w.WriteHeader(code)
if message != "" {
_, _ = fmt.Fprint(w, message)
}
return
}
w.WriteHeader(http.StatusOK)
_, _ = io.Copy(w, body)
}
func handleProxyHTTPErr(err error, logger *logrus.Entry) (string, int) {
statusError, ok := err.(*errors.StatusError)
if !ok {
return "", http.StatusInternalServerError
}
code := int(statusError.Status().Code)
// When the error is a 503 we don't want to give any information
// coming from Kubernetes
if code == http.StatusServiceUnavailable {
logger.Error(statusError.Status().Message)
return "", code
}
details := statusError.Status().Details
if details == nil {
return "", code
}
causes := details.Causes
if len(causes) > 0 {
return causes[0].Message, code
}
return "", code
}
package kubernetes
import (
"io/ioutil"
"net/http"
"net/url"
terminal "gitlab.com/gitlab-org/gitlab-terminal"
api "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
terminalsession "gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
func (s *executor) Connect() (terminalsession.Conn, error) {
settings, err := s.getTerminalSettings()
if err != nil {
return nil, err
}
return terminalConn{settings: settings}, nil
}
type terminalConn struct {
settings *terminal.TerminalSettings
}
func (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {
wsProxy := terminal.NewWebSocketProxy(1) // one stopper: terminal exit handler
terminalsession.ProxyTerminal(
timeoutCh,
disconnectCh,
wsProxy.StopCh,
func() {
terminal.ProxyWebSocket(w, r, t.settings, wsProxy)
},
)
}
func (t terminalConn) Close() error {
return nil
}
func (s *executor) getTerminalSettings() (*terminal.TerminalSettings, error) {
config, err := getKubeClientConfig(s.Config.Kubernetes, s.configurationOverwrites)
if err != nil {
return nil, err
}
wsURL, err := s.getTerminalWebSocketURL(config)
if err != nil {
return nil, err
}
caCert := ""
if len(config.CAFile) > 0 {
buf, err := ioutil.ReadFile(config.CAFile)
if err != nil {
return nil, err
}
caCert = string(buf)
}
term := &terminal.TerminalSettings{
Subprotocols: []string{"channel.k8s.io"},
Url: wsURL.String(),
Header: http.Header{"Authorization": []string{"Bearer " + config.BearerToken}},
CAPem: caCert,
MaxSessionTime: 0,
}
return term, nil
}
func (s *executor) getTerminalWebSocketURL(config *restclient.Config) (*url.URL, error) {
wsURL := s.kubeClient.CoreV1().RESTClient().Post().
Namespace(s.pod.Namespace).
Resource("pods").
Name(s.pod.Name).
SubResource("exec").
VersionedParams(&api.PodExecOptions{
Stdin: true,
Stdout: true,
Stderr: true,
TTY: true,
Container: "build",
Command: []string{"sh", "-c", "bash || sh"},
}, scheme.ParameterCodec).URL()
wsURL.Scheme = proxy.WebsocketProtocolFor(wsURL.Scheme)
return wsURL, nil
}
package kubernetes
import (
"errors"
"fmt"
"io"
"net/http"
"time"
"golang.org/x/net/context"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
type kubeConfigProvider func() (*restclient.Config, error)
var (
// inClusterConfig parses kubernets configuration reading in cluster values
inClusterConfig kubeConfigProvider = restclient.InClusterConfig
// defaultKubectlConfig parses kubectl configuration ad loads the default cluster
defaultKubectlConfig kubeConfigProvider = loadDefaultKubectlConfig
)
func getKubeClientConfig(config *common.KubernetesConfig, overwrites *overwrites) (kubeConfig *restclient.Config, err error) {
if len(config.Host) > 0 {
kubeConfig, err = getOutClusterClientConfig(config)
} else {
kubeConfig, err = guessClientConfig()
}
if err != nil {
return nil, err
}
//apply overwrites
if len(overwrites.bearerToken) > 0 {
kubeConfig.BearerToken = string(overwrites.bearerToken)
}
kubeConfig.UserAgent = common.AppVersion.UserAgent()
return kubeConfig, nil
}
func getOutClusterClientConfig(config *common.KubernetesConfig) (*restclient.Config, error) {
kubeConfig := &restclient.Config{
Host: config.Host,
BearerToken: config.BearerToken,
TLSClientConfig: restclient.TLSClientConfig{
CAFile: config.CAFile,
},
}
// certificate based auth
if len(config.CertFile) > 0 {
if len(config.KeyFile) == 0 || len(config.CAFile) == 0 {
return nil, fmt.Errorf("ca file, cert file and key file must be specified when using file based auth")
}
kubeConfig.TLSClientConfig.CertFile = config.CertFile
kubeConfig.TLSClientConfig.KeyFile = config.KeyFile
}
return kubeConfig, nil
}
func guessClientConfig() (*restclient.Config, error) {
// Try in cluster config first
if inClusterCfg, err := inClusterConfig(); err == nil {
return inClusterCfg, nil
}
// in cluster config failed. Reading default kubectl config
return defaultKubectlConfig()
}
func loadDefaultKubectlConfig() (*restclient.Config, error) {
config, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
}
func getKubeClient(config *common.KubernetesConfig, overwrites *overwrites) (*kubernetes.Clientset, error) {
restConfig, err := getKubeClientConfig(config, overwrites)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restConfig)
}
func closeKubeClient(client *kubernetes.Clientset) bool {
if client == nil {
return false
}
rest, ok := client.CoreV1().RESTClient().(*restclient.RESTClient)
if !ok || rest.Client == nil || rest.Client.Transport == nil {
return false
}
if transport, ok := rest.Client.Transport.(*http.Transport); ok {
transport.CloseIdleConnections()
return true
}
return false
}
func isRunning(pod *api.Pod) (bool, error) {
switch pod.Status.Phase {
case api.PodRunning:
return true, nil
case api.PodSucceeded:
return false, fmt.Errorf("pod already succeeded before it begins running")
case api.PodFailed:
return false, fmt.Errorf("pod status is failed")
default:
return false, nil
}
}
type podPhaseResponse struct {
done bool
phase api.PodPhase
err error
}
func getPodPhase(c *kubernetes.Clientset, pod *api.Pod, out io.Writer) podPhaseResponse {
pod, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return podPhaseResponse{true, api.PodUnknown, err}
}
ready, err := isRunning(pod)
if err != nil {
return podPhaseResponse{true, pod.Status.Phase, err}
}
if ready {
return podPhaseResponse{true, pod.Status.Phase, nil}
}
// check status of containers
for _, container := range pod.Status.ContainerStatuses {
if container.Ready {
continue
}
if container.State.Waiting == nil {
continue
}
switch container.State.Waiting.Reason {
case "ErrImagePull", "ImagePullBackOff":
err = fmt.Errorf("image pull failed: %s", container.State.Waiting.Message)
err = &common.BuildError{Inner: err}
return podPhaseResponse{true, api.PodUnknown, err}
}
}
fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s\n", pod.Namespace, pod.Name, pod.Status.Phase)
return podPhaseResponse{false, pod.Status.Phase, nil}
}
func triggerPodPhaseCheck(c *kubernetes.Clientset, pod *api.Pod, out io.Writer) <-chan podPhaseResponse {
errc := make(chan podPhaseResponse)
go func() {
defer close(errc)
errc <- getPodPhase(c, pod, out)
}()
return errc
}
// waitForPodRunning will use client c to detect when pod reaches the PodRunning
// state. It returns the final PodPhase once either PodRunning, PodSucceeded or
// PodFailed has been reached. In the case of PodRunning, it will also wait until
// all containers within the pod are also Ready.
// It returns error if the call to retrieve pod details fails or the timeout is
// reached.
// The timeout and polling values are configurable through KubernetesConfig
// parameters.
func waitForPodRunning(ctx context.Context, c *kubernetes.Clientset, pod *api.Pod, out io.Writer, config *common.KubernetesConfig) (api.PodPhase, error) {
pollInterval := config.GetPollInterval()
pollAttempts := config.GetPollAttempts()
for i := 0; i <= pollAttempts; i++ {
select {
case r := <-triggerPodPhaseCheck(c, pod, out):
if !r.done {
time.Sleep(time.Duration(pollInterval) * time.Second)
continue
}
return r.phase, r.err
case <-ctx.Done():
return api.PodUnknown, ctx.Err()
}
}
return api.PodUnknown, errors.New("timed out waiting for pod to start")
}
// limits takes a string representing CPU & memory limits,
// and returns a ResourceList with appropriately scaled Quantity
// values for Kubernetes. This allows users to write "500m" for CPU,
// and "50Mi" for memory (etc.)
func limits(cpu, memory string) (api.ResourceList, error) {
var rCPU, rMem resource.Quantity
var err error
parse := func(s string) (resource.Quantity, error) {
var q resource.Quantity
if len(s) == 0 {
return q, nil
}
if q, err = resource.ParseQuantity(s); err != nil {
return q, fmt.Errorf("error parsing resource limit: %w", err)
}
return q, nil
}
if rCPU, err = parse(cpu); err != nil {
return api.ResourceList{}, nil
}
if rMem, err = parse(memory); err != nil {
return api.ResourceList{}, nil
}
l := make(api.ResourceList)
q := resource.Quantity{}
if rCPU != q {
l[api.ResourceCPU] = rCPU
}
if rMem != q {
l[api.ResourceMemory] = rMem
}
return l, nil
}
// buildVariables converts a common.BuildVariables into a list of
// kubernetes EnvVar objects
func buildVariables(bv common.JobVariables) []api.EnvVar {
e := make([]api.EnvVar, len(bv))
for i, b := range bv {
e[i] = api.EnvVar{
Name: b.Key,
Value: b.Value,
}
}
return e
}
package parallels
import (
"errors"
"fmt"
"os/exec"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
prl "gitlab.com/gitlab-org/gitlab-runner/helpers/parallels"
)
type executor struct {
executors.AbstractExecutor
cmd *exec.Cmd
vmName string
sshCommand ssh.Client
provisioned bool
ipAddress string
machineVerified bool
}
func (s *executor) waitForIPAddress(vmName string, seconds int) (string, error) {
var lastError error
if s.ipAddress != "" {
return s.ipAddress, nil
}
s.Debugln("Looking for MAC address...")
macAddr, err := prl.Mac(vmName)
if err != nil {
return "", err
}
s.Debugln("Requesting IP address...")
for i := 0; i < seconds; i++ {
ipAddr, err := prl.IPAddress(macAddr)
if err == nil {
s.Debugln("IP address found", ipAddr, "...")
s.ipAddress = ipAddr
return ipAddr, nil
}
lastError = err
time.Sleep(time.Second)
}
return "", lastError
}
func (s *executor) verifyMachine(vmName string) error {
if s.machineVerified {
return nil
}
ipAddr, err := s.waitForIPAddress(vmName, 120)
if err != nil {
return err
}
// Create SSH command
sshCommand := ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
ConnectRetries: 30,
}
sshCommand.Host = ipAddr
s.Debugln("Connecting to SSH...")
err = sshCommand.Connect()
if err != nil {
return err
}
defer sshCommand.Cleanup()
err = sshCommand.Run(s.Context, ssh.Command{Command: []string{"exit"}})
if err != nil {
return err
}
s.machineVerified = true
return nil
}
func (s *executor) restoreFromSnapshot() error {
s.Debugln("Requesting default snapshot for VM...")
snapshot, err := prl.GetDefaultSnapshot(s.vmName)
if err != nil {
return err
}
s.Debugln("Reverting VM to snapshot", snapshot, "...")
err = prl.RevertToSnapshot(s.vmName, snapshot)
if err != nil {
return err
}
return nil
}
func (s *executor) createVM() error {
baseImage := s.Config.Parallels.BaseName
if baseImage == "" {
return errors.New("Missing Image setting from Parallels config")
}
templateName := s.Config.Parallels.TemplateName
if templateName == "" {
templateName = baseImage + "-template"
}
// remove invalid template (removed?)
templateStatus, _ := prl.Status(templateName)
if templateStatus == prl.Invalid {
prl.Unregister(templateName)
}
if !prl.Exist(templateName) {
s.Debugln("Creating template from VM", baseImage, "...")
err := prl.CreateTemplate(baseImage, templateName)
if err != nil {
return err
}
}
s.Debugln("Creating runner from VM template...")
err := prl.CreateOsVM(s.vmName, templateName)
if err != nil {
return err
}
s.Debugln("Bootstraping VM...")
err = prl.Start(s.vmName)
if err != nil {
return err
}
// TODO: integration tests do fail on this due
// Unable to open new session in this virtual machine.
// Make sure the latest version of Parallels Tools is installed in this virtual machine and it has finished bootingg
s.Debugln("Waiting for VM to start...")
err = prl.TryExec(s.vmName, 120, "exit", "0")
if err != nil {
return err
}
s.Debugln("Waiting for VM to become responsive...")
err = s.verifyMachine(s.vmName)
if err != nil {
return err
}
return nil
}
func (s *executor) updateGuestTime() error {
s.Debugln("Updating VM date...")
timeServer := s.Config.Parallels.TimeServer
if timeServer == "" {
timeServer = "time.apple.com"
}
// Check either ntpdate command exists or not before trying to execute it
// Starting from Mojave ntpdate was removed
_, err := prl.Exec(s.vmName, "which", "ntpdate")
if err != nil {
// Fallback to sntp
return prl.TryExec(s.vmName, 20, "sudo", "sntp", "-sS", timeServer)
}
return prl.TryExec(s.vmName, 20, "sudo", "ntpdate", "-u", timeServer)
}
func (s *executor) Prepare(options common.ExecutorPrepareOptions) error {
err := s.AbstractExecutor.Prepare(options)
if err != nil {
return err
}
if s.BuildShell.PassFile {
return errors.New("Parallels doesn't support shells that require script file")
}
if s.Config.SSH == nil {
return errors.New("Missing SSH configuration")
}
if s.Config.Parallels == nil {
return errors.New("Missing Parallels configuration")
}
if s.Config.Parallels.BaseName == "" {
return errors.New("Missing BaseName setting from Parallels config")
}
version, err := prl.Version()
if err != nil {
return err
}
s.Println("Using Parallels", version, "executor...")
// remove invalid VM (removed?)
vmStatus, _ := prl.Status(s.vmName)
if vmStatus == prl.Invalid {
prl.Unregister(s.vmName)
}
if s.Config.Parallels.DisableSnapshots {
s.vmName = s.Config.Parallels.BaseName + "-" + s.Build.ProjectUniqueName()
if prl.Exist(s.vmName) {
s.Debugln("Deleting old VM...")
prl.Kill(s.vmName)
prl.Delete(s.vmName)
prl.Unregister(s.vmName)
}
} else {
s.vmName = fmt.Sprintf("%s-runner-%s-concurrent-%d",
s.Config.Parallels.BaseName,
s.Build.Runner.ShortDescription(),
s.Build.RunnerID)
}
if prl.Exist(s.vmName) {
s.Println("Restoring VM from snapshot...")
err := s.restoreFromSnapshot()
if err != nil {
s.Println("Previous VM failed. Deleting, because", err)
prl.Kill(s.vmName)
prl.Delete(s.vmName)
prl.Unregister(s.vmName)
}
}
if !prl.Exist(s.vmName) {
s.Println("Creating new VM...")
err := s.createVM()
if err != nil {
return err
}
if !s.Config.Parallels.DisableSnapshots {
s.Println("Creating default snapshot...")
err = prl.CreateSnapshot(s.vmName, "Started")
if err != nil {
return err
}
}
}
s.Debugln("Checking VM status...")
status, err := prl.Status(s.vmName)
if err != nil {
return err
}
// Start VM if stopped
if status == prl.Stopped || status == prl.Suspended {
s.Println("Starting VM...")
err := prl.Start(s.vmName)
if err != nil {
return err
}
}
if status != prl.Running {
s.Debugln("Waiting for VM to run...")
err = prl.WaitForStatus(s.vmName, prl.Running, 60)
if err != nil {
return err
}
}
s.Println("Waiting VM to become responsive...")
err = s.verifyMachine(s.vmName)
if err != nil {
return err
}
s.provisioned = true
// TODO: integration tests do fail on this due
// Unable to open new session in this virtual machine.
// Make sure the latest version of Parallels Tools is installed in this virtual machine and it has finished booting
err = s.updateGuestTime()
if err != nil {
s.Println("Could not sync with timeserver!")
return err
}
ipAddr, err := s.waitForIPAddress(s.vmName, 60)
if err != nil {
return err
}
s.Debugln("Starting SSH command...")
s.sshCommand = ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
}
s.sshCommand.Host = ipAddr
s.Debugln("Connecting to SSH server...")
err = s.sshCommand.Connect()
if err != nil {
return err
}
return nil
}
func (s *executor) Run(cmd common.ExecutorCommand) error {
err := s.sshCommand.Run(cmd.Context, ssh.Command{
Environment: s.BuildShell.Environment,
Command: s.BuildShell.GetCommandWithArguments(),
Stdin: cmd.Script,
})
if _, ok := err.(*ssh.ExitError); ok {
err = &common.BuildError{Inner: err}
}
return err
}
func (s *executor) Cleanup() {
s.sshCommand.Cleanup()
if s.vmName != "" {
prl.Kill(s.vmName)
if s.Config.Parallels.DisableSnapshots || !s.provisioned {
prl.Delete(s.vmName)
}
}
s.AbstractExecutor.Cleanup()
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: false,
DefaultBuildsDir: "builds",
DefaultCacheDir: "cache",
SharedBuildsDir: false,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.LoginShell,
RunnerCommand: "gitlab-runner",
},
ShowHostname: true,
}
creator := func() common.Executor {
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
}
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
}
common.RegisterExecutor("parallels", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
package shell
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/kardianos/osext"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
type executor struct {
executors.AbstractExecutor
}
func (s *executor) Prepare(options common.ExecutorPrepareOptions) error {
if options.User != "" {
s.Shell().User = options.User
}
// expand environment variables to have current directory
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("getwd: %w", err)
}
mapping := func(key string) string {
switch key {
case "PWD":
return wd
default:
return ""
}
}
s.DefaultBuildsDir = os.Expand(s.DefaultBuildsDir, mapping)
s.DefaultCacheDir = os.Expand(s.DefaultCacheDir, mapping)
// Pass control to executor
err = s.AbstractExecutor.Prepare(options)
if err != nil {
return err
}
s.Println("Using Shell executor...")
return nil
}
func (s *executor) killAndWait(cmd *exec.Cmd, waitCh chan error) error {
for {
s.Debugln("Aborting command...")
helpers.KillProcessGroup(cmd)
select {
case <-time.After(time.Second):
case err := <-waitCh:
return err
}
}
}
func (s *executor) Run(cmd common.ExecutorCommand) error {
// Create execution command
c := exec.Command(s.BuildShell.Command, s.BuildShell.Arguments...)
if c == nil {
return errors.New("failed to generate execution command")
}
helpers.SetProcessGroup(c)
defer helpers.KillProcessGroup(c)
// Fill process environment variables
c.Env = append(os.Environ(), s.BuildShell.Environment...)
c.Stdout = s.Trace
c.Stderr = s.Trace
if s.BuildShell.PassFile {
scriptDir, err := ioutil.TempDir("", "build_script")
if err != nil {
return err
}
defer os.RemoveAll(scriptDir)
scriptFile := filepath.Join(scriptDir, "script."+s.BuildShell.Extension)
err = ioutil.WriteFile(scriptFile, []byte(cmd.Script), 0700)
if err != nil {
return err
}
c.Args = append(c.Args, scriptFile)
} else {
c.Stdin = bytes.NewBufferString(cmd.Script)
}
// Start a process
err := c.Start()
if err != nil {
return fmt.Errorf("failed to start process: %w", err)
}
// Wait for process to finish
waitCh := make(chan error)
go func() {
err := c.Wait()
if _, ok := err.(*exec.ExitError); ok {
err = &common.BuildError{Inner: err}
}
waitCh <- err
}()
// Support process abort
select {
case err = <-waitCh:
return err
case <-cmd.Context.Done():
return s.killAndWait(c, waitCh)
}
}
func init() {
// Look for self
runnerCommand, err := osext.Executable()
if err != nil {
logrus.Warningln(err)
}
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: false,
DefaultBuildsDir: "$PWD/builds",
DefaultCacheDir: "$PWD/cache",
SharedBuildsDir: true,
Shell: common.ShellScriptInfo{
Shell: common.GetDefaultShell(),
Type: common.LoginShell,
RunnerCommand: runnerCommand,
},
ShowHostname: false,
}
creator := func() common.Executor {
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
}
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
features.Shared = true
if runtime.GOOS != "windows" {
features.Session = true
features.Terminal = true
}
}
common.RegisterExecutor("shell", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
// +build !windows
package shell
import (
"errors"
"net/http"
"os"
"os/exec"
"github.com/kr/pty"
"gitlab.com/gitlab-org/gitlab-terminal"
terminalsession "gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
type terminalConn struct {
shellFd *os.File
}
func (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {
proxy := terminal.NewFileDescriptorProxy(1) // one stopper: terminal exit handler
terminalsession.ProxyTerminal(
timeoutCh,
disconnectCh,
proxy.StopCh,
func() {
terminal.ProxyFileDescriptor(w, r, t.shellFd, proxy)
},
)
}
func (t terminalConn) Close() error {
return t.shellFd.Close()
}
func (s *executor) Connect() (terminalsession.Conn, error) {
cmd := exec.Command(s.BuildShell.Command, s.BuildShell.Arguments...)
if cmd == nil {
return nil, errors.New("failed to generate shell command")
}
shellFD, err := pty.Start(cmd)
if err != nil {
return nil, err
}
session := terminalConn{shellFd: shellFD}
return session, nil
}
package ssh
import (
"errors"
"fmt"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
)
type executor struct {
executors.AbstractExecutor
sshCommand ssh.Client
}
func (s *executor) Prepare(options common.ExecutorPrepareOptions) error {
err := s.AbstractExecutor.Prepare(options)
if err != nil {
return fmt.Errorf("AbstractExecutor.Prepare error: %w", err)
}
s.Println("Using SSH executor...")
if s.BuildShell.PassFile {
return errors.New("SSH doesn't support shells that require script file")
}
if s.Config.SSH == nil {
return errors.New("missing SSH configuration")
}
s.Debugln("Starting SSH command...")
// Create SSH command
s.sshCommand = ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
}
s.Debugln("Connecting to SSH server...")
err = s.sshCommand.Connect()
if err != nil {
return fmt.Errorf("ssh command Connect() error: %w", err)
}
return nil
}
func (s *executor) Run(cmd common.ExecutorCommand) error {
err := s.sshCommand.Run(cmd.Context, ssh.Command{
Environment: s.BuildShell.Environment,
Command: s.BuildShell.GetCommandWithArguments(),
Stdin: cmd.Script,
})
if _, ok := err.(*ssh.ExitError); ok {
err = &common.BuildError{Inner: err}
}
return err
}
func (s *executor) Cleanup() {
s.sshCommand.Cleanup()
s.AbstractExecutor.Cleanup()
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: false,
DefaultBuildsDir: "builds",
DefaultCacheDir: "cache",
SharedBuildsDir: true,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.LoginShell,
RunnerCommand: "gitlab-runner",
},
ShowHostname: true,
}
creator := func() common.Executor {
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
}
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
features.Shared = true
}
common.RegisterExecutor("ssh", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
package ssh
import (
"fmt"
"net"
"strconv"
"strings"
"github.com/tevino/abool"
cryptoSSH "golang.org/x/crypto/ssh"
)
type StubSSHServer struct {
User string
Password string
Config *cryptoSSH.ServerConfig
stop chan bool
shouldExit *abool.AtomicBool
}
func NewStubServer(user, pass string, privateKey []byte) (*StubSSHServer, error) {
server := &StubSSHServer{
User: user,
Password: pass,
Config: &cryptoSSH.ServerConfig{
PasswordCallback: func(conn cryptoSSH.ConnMetadata, password []byte) (*cryptoSSH.Permissions, error) {
if conn.User() == user && string(password) == pass {
return nil, nil
}
return nil, fmt.Errorf("wrong password for %q", conn.User())
},
},
stop: make(chan bool),
shouldExit: abool.New(),
}
key, err := cryptoSSH.ParsePrivateKey(privateKey)
if err != nil {
return nil, err
}
server.Config.AddHostKey(key)
return server, nil
}
func (s *StubSSHServer) Start() (int, error) {
listener, err := net.Listen("tcp", "127.0.0.1:")
if err != nil {
return 0, err
}
go func() {
<-s.stop
s.shouldExit.Set()
listener.Close()
}()
address := strings.SplitN(listener.Addr().String(), ":", 2)
go s.mainLoop(listener)
return strconv.Atoi(address[1])
}
func (s *StubSSHServer) Stop() {
s.stop <- true
}
func (s *StubSSHServer) mainLoop(listener net.Listener) {
for {
if s.shouldExit.IsSet() {
return
}
conn, err := listener.Accept()
if err != nil {
continue
}
if s.shouldExit.IsSet() {
return
}
//upgrade to ssh connection
cryptoSSH.NewServerConn(conn, s.Config)
// This is enough just for handling incoming connections
}
}
package virtualbox
import (
"errors"
"fmt"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/executors"
"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh"
vbox "gitlab.com/gitlab-org/gitlab-runner/helpers/virtualbox"
)
type executor struct {
executors.AbstractExecutor
vmName string
sshCommand ssh.Client
sshPort string
provisioned bool
machineVerified bool
}
func (s *executor) verifyMachine(vmName string, sshPort string) error {
if s.machineVerified {
return nil
}
// Create SSH command
sshCommand := ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
ConnectRetries: 30,
}
sshCommand.Port = sshPort
sshCommand.Host = "localhost"
s.Debugln("Connecting to SSH...")
err := sshCommand.Connect()
if err != nil {
return err
}
defer sshCommand.Cleanup()
err = sshCommand.Run(s.Context, ssh.Command{Command: []string{"exit"}})
if err != nil {
return err
}
s.machineVerified = true
return nil
}
func (s *executor) restoreFromSnapshot() error {
s.Debugln("Reverting VM to current snapshot...")
err := vbox.RevertToSnapshot(s.vmName)
if err != nil {
return err
}
return nil
}
func (s *executor) determineBaseSnapshot(baseImage string) string {
var err error
baseSnapshot := s.Config.VirtualBox.BaseSnapshot
if baseSnapshot == "" {
baseSnapshot, err = vbox.GetCurrentSnapshot(baseImage)
if err != nil {
if s.Config.VirtualBox.DisableSnapshots {
s.Debugln("No snapshots found for base VM", baseImage)
return ""
}
baseSnapshot = "Base State"
}
}
if baseSnapshot != "" && !vbox.HasSnapshot(baseImage, baseSnapshot) {
if s.Config.VirtualBox.DisableSnapshots {
s.Warningln("Snapshot", baseSnapshot, "not found in base VM", baseImage)
return ""
}
s.Debugln("Creating snapshot", baseSnapshot, "from current base VM", baseImage, "state...")
err = vbox.CreateSnapshot(baseImage, baseSnapshot)
if err != nil {
s.Warningln("Failed to create snapshot", baseSnapshot, "from base VM", baseImage)
return ""
}
}
return baseSnapshot
}
// virtualbox doesn't support templates
func (s *executor) createVM(vmName string) (err error) {
baseImage := s.Config.VirtualBox.BaseName
if baseImage == "" {
return errors.New("Missing Image setting from VirtualBox configuration")
}
_, err = vbox.Status(vmName)
if err != nil {
vbox.Unregister(vmName)
}
if !vbox.Exist(vmName) {
baseSnapshot := s.determineBaseSnapshot(baseImage)
if baseSnapshot == "" {
s.Debugln("Creating testing VM from VM", baseImage, "...")
} else {
s.Debugln("Creating testing VM from VM", baseImage, "snapshot", baseSnapshot, "...")
}
err = vbox.CreateOsVM(baseImage, vmName, baseSnapshot)
if err != nil {
return err
}
}
s.Debugln("Identify SSH Port...")
s.sshPort, err = vbox.FindSSHPort(s.vmName)
if err != nil {
s.Debugln("Creating localhost ssh forwarding...")
vmSSHPort := s.Config.SSH.Port
if vmSSHPort == "" {
vmSSHPort = "22"
}
s.sshPort, err = vbox.ConfigureSSH(vmName, vmSSHPort)
if err != nil {
return err
}
}
s.Debugln("Using local", s.sshPort, "SSH port to connect to VM...")
s.Debugln("Bootstraping VM...")
err = vbox.Start(s.vmName)
if err != nil {
return err
}
s.Debugln("Waiting for VM to become responsive...")
time.Sleep(10)
err = s.verifyMachine(s.vmName, s.sshPort)
if err != nil {
return err
}
return nil
}
func (s *executor) Prepare(options common.ExecutorPrepareOptions) error {
err := s.AbstractExecutor.Prepare(options)
if err != nil {
return err
}
if s.BuildShell.PassFile {
return errors.New("virtualbox doesn't support shells that require script file")
}
if s.Config.SSH == nil {
return errors.New("Missing SSH config")
}
if s.Config.VirtualBox == nil {
return errors.New("Missing VirtualBox configuration")
}
if s.Config.VirtualBox.BaseName == "" {
return errors.New("Missing BaseName setting from VirtualBox configuration")
}
version, err := vbox.Version()
if err != nil {
return err
}
s.Println("Using VirtualBox version", version, "executor...")
if s.Config.VirtualBox.DisableSnapshots {
s.vmName = s.Config.VirtualBox.BaseName + "-" + s.Build.ProjectUniqueName()
if vbox.Exist(s.vmName) {
s.Debugln("Deleting old VM...")
vbox.Kill(s.vmName)
vbox.Delete(s.vmName)
vbox.Unregister(s.vmName)
}
} else {
s.vmName = fmt.Sprintf("%s-runner-%s-concurrent-%d",
s.Config.VirtualBox.BaseName,
s.Build.Runner.ShortDescription(),
s.Build.RunnerID)
}
if vbox.Exist(s.vmName) {
s.Println("Restoring VM from snapshot...")
err := s.restoreFromSnapshot()
if err != nil {
s.Println("Previous VM failed. Deleting, because", err)
vbox.Kill(s.vmName)
vbox.Delete(s.vmName)
vbox.Unregister(s.vmName)
}
}
if !vbox.Exist(s.vmName) {
s.Println("Creating new VM...")
err := s.createVM(s.vmName)
if err != nil {
return err
}
if !s.Config.VirtualBox.DisableSnapshots {
s.Println("Creating default snapshot...")
err = vbox.CreateSnapshot(s.vmName, "Started")
if err != nil {
return err
}
}
}
s.Debugln("Checking VM status...")
status, err := vbox.Status(s.vmName)
if err != nil {
return err
}
if !vbox.IsStatusOnlineOrTransient(status) {
s.Println("Starting VM...")
err := vbox.Start(s.vmName)
if err != nil {
return err
}
}
if status != vbox.Running {
s.Debugln("Waiting for VM to run...")
err = vbox.WaitForStatus(s.vmName, vbox.Running, 60)
if err != nil {
return err
}
}
s.Debugln("Identify SSH Port...")
sshPort, err := vbox.FindSSHPort(s.vmName)
s.sshPort = sshPort
if err != nil {
return err
}
s.Println("Waiting VM to become responsive...")
err = s.verifyMachine(s.vmName, s.sshPort)
if err != nil {
return err
}
s.provisioned = true
s.Println("Starting SSH command...")
s.sshCommand = ssh.Client{
Config: *s.Config.SSH,
Stdout: s.Trace,
Stderr: s.Trace,
}
s.sshCommand.Port = s.sshPort
s.sshCommand.Host = "localhost"
s.Debugln("Connecting to SSH server...")
err = s.sshCommand.Connect()
if err != nil {
return err
}
return nil
}
func (s *executor) Run(cmd common.ExecutorCommand) error {
err := s.sshCommand.Run(cmd.Context, ssh.Command{
Environment: s.BuildShell.Environment,
Command: s.BuildShell.GetCommandWithArguments(),
Stdin: cmd.Script,
})
if _, ok := err.(*ssh.ExitError); ok {
err = &common.BuildError{Inner: err}
}
return err
}
func (s *executor) Cleanup() {
s.sshCommand.Cleanup()
if s.vmName != "" {
vbox.Kill(s.vmName)
if s.Config.VirtualBox.DisableSnapshots || !s.provisioned {
vbox.Delete(s.vmName)
}
}
}
func init() {
options := executors.ExecutorOptions{
DefaultCustomBuildsDirEnabled: false,
DefaultBuildsDir: "builds",
DefaultCacheDir: "cache",
SharedBuildsDir: false,
Shell: common.ShellScriptInfo{
Shell: "bash",
Type: common.LoginShell,
RunnerCommand: "gitlab-runner",
},
ShowHostname: true,
}
creator := func() common.Executor {
return &executor{
AbstractExecutor: executors.AbstractExecutor{
ExecutorOptions: options,
},
}
}
featuresUpdater := func(features *common.FeaturesInfo) {
features.Variables = true
}
common.RegisterExecutor("virtualbox", executors.DefaultExecutorProvider{
Creator: creator,
FeaturesUpdater: featuresUpdater,
DefaultShellName: options.Shell.Shell,
})
}
package archives
import (
"compress/gzip"
"fmt"
"io"
"os"
"github.com/sirupsen/logrus"
)
func writeGzipFile(w io.Writer, fileName string, fileInfo os.FileInfo) error {
if !fileInfo.Mode().IsRegular() {
return fmt.Errorf("the %q is not a regular file", fileName)
}
gz := gzip.NewWriter(w)
gz.Header.Name = fileInfo.Name()
gz.Header.Comment = fileName
gz.Header.ModTime = fileInfo.ModTime()
defer gz.Close()
file, err := os.Open(fileName)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(gz, file)
return err
}
func CreateGzipArchive(w io.Writer, fileNames []string) error {
for _, fileName := range fileNames {
fi, err := os.Lstat(fileName)
if os.IsNotExist(err) {
logrus.Warningln("File ignored:", err)
continue
} else if err != nil {
return err
}
err = writeGzipFile(w, fileName, fi)
if err != nil {
return err
}
}
return nil
}
package archives
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
)
func isPathAGitDirectory(path string) bool {
parts := strings.Split(filepath.Clean(path), string(filepath.Separator))
if len(parts) > 0 && parts[0] == ".git" {
return true
}
return false
}
func errorIfGitDirectory(path string) *os.PathError {
if !isPathAGitDirectory(path) {
return nil
}
return &os.PathError{
Op: ".git inside of archive",
Path: path,
Err: errors.New("Trying to archive or extract .git path"),
}
}
func printGitArchiveWarning(operation string) {
logrus.Warn(fmt.Sprintf("Part of .git directory is on the list of files to %s", operation))
logrus.Warn("This may introduce unexpected problems")
}
package archives
import (
"os"
"sync"
)
// When extracting an archive, the same PathError.Op may be repeated for every
// file in the archive; use pathErrorTracker to suppress repetitious log output
type pathErrorTracker struct {
lock sync.Mutex
seenOps map[string]bool
}
// check whether the error is actionable, which is to say, not nil and either
// not a PathError, or a novel PathError
func (p *pathErrorTracker) actionable(e error) bool {
pathErr, isPathErr := e.(*os.PathError)
if e == nil || isPathErr && pathErr == nil {
return false
}
if !isPathErr {
return true
}
p.lock.Lock()
defer p.lock.Unlock()
seen := p.seenOps[pathErr.Op]
p.seenOps[pathErr.Op] = true
// actionable if *not* seen before
return !seen
}
func newPathErrorTracker() *pathErrorTracker {
return &pathErrorTracker{
seenOps: make(map[string]bool),
}
}
package archives
import (
"archive/zip"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/sirupsen/logrus"
)
func createZipDirectoryEntry(archive *zip.Writer, fh *zip.FileHeader) error {
fh.Name += "/"
_, err := archive.CreateHeader(fh)
return err
}
func createZipSymlinkEntry(archive *zip.Writer, fh *zip.FileHeader) error {
fw, err := archive.CreateHeader(fh)
if err != nil {
return err
}
link, err := os.Readlink(fh.Name)
if err != nil {
return err
}
_, err = io.WriteString(fw, link)
return err
}
func createZipFileEntry(archive *zip.Writer, fh *zip.FileHeader) error {
fh.Method = zip.Deflate
fw, err := archive.CreateHeader(fh)
if err != nil {
return err
}
file, err := os.Open(fh.Name)
if err != nil {
return err
}
_, err = io.Copy(fw, file)
file.Close()
if err != nil {
return err
}
return nil
}
func createZipEntry(archive *zip.Writer, fileName string) error {
fi, err := os.Lstat(fileName)
if err != nil {
logrus.Warningln("File ignored:", err)
return nil
}
fh, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
fh.Name = fileName
fh.Extra = createZipExtra(fi)
switch fi.Mode() & os.ModeType {
case os.ModeDir:
return createZipDirectoryEntry(archive, fh)
case os.ModeSymlink:
return createZipSymlinkEntry(archive, fh)
case os.ModeNamedPipe, os.ModeSocket, os.ModeDevice:
// Ignore the files that of these types
logrus.Warningln("File ignored:", fileName)
return nil
default:
return createZipFileEntry(archive, fh)
}
}
func CreateZipArchive(w io.Writer, fileNames []string) error {
tracker := newPathErrorTracker()
archive := zip.NewWriter(w)
defer archive.Close()
for _, fileName := range fileNames {
if err := errorIfGitDirectory(fileName); tracker.actionable(err) {
printGitArchiveWarning("archive")
}
err := createZipEntry(archive, fileName)
if err != nil {
return err
}
}
return nil
}
func CreateZipFile(fileName string, fileNames []string) error {
// create directories to store archive
err := os.MkdirAll(filepath.Dir(fileName), 0700)
if err != nil {
return err
}
tempFile, err := ioutil.TempFile(filepath.Dir(fileName), "archive_")
if err != nil {
return err
}
defer tempFile.Close()
defer os.Remove(tempFile.Name())
logrus.Debugln("Temporary file:", tempFile.Name())
err = CreateZipArchive(tempFile, fileNames)
if err != nil {
return err
}
tempFile.Close()
err = os.Rename(tempFile.Name(), fileName)
if err != nil {
return err
}
return nil
}
package archives
import (
"archive/zip"
"bytes"
"encoding/binary"
"io"
"os"
"time"
)
const ZipUIDGidFieldType = 0x7875
const ZipTimestampFieldType = 0x5455
// ZipExtraField is taken from https://github.com/LuaDist/zip/blob/master/proginfo/extrafld.txt
type ZipExtraField struct {
Type uint16
Size uint16
}
type ZipUIDGidField struct {
Version uint8
UIDSize uint8
UID uint32
GIDSize uint8
Gid uint32
}
type ZipTimestampField struct {
Flags uint8
ModTime uint32
}
func createZipTimestampField(w io.Writer, fi os.FileInfo) (err error) {
tsField := ZipTimestampField{
1,
uint32(fi.ModTime().Unix()),
}
tsFieldType := ZipExtraField{
Type: ZipTimestampFieldType,
Size: uint16(binary.Size(&tsField)),
}
err = binary.Write(w, binary.LittleEndian, &tsFieldType)
if err == nil {
err = binary.Write(w, binary.LittleEndian, &tsField)
}
return
}
func processZipTimestampField(data []byte, file *zip.FileHeader) error {
if !file.Mode().IsDir() && !file.Mode().IsRegular() {
return nil
}
var tsField ZipTimestampField
err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &tsField)
if err != nil {
return err
}
if (tsField.Flags & 1) == 1 {
modTime := time.Unix(int64(tsField.ModTime), 0)
acTime := time.Now()
return os.Chtimes(file.Name, acTime, modTime)
}
return nil
}
func createZipExtra(fi os.FileInfo) []byte {
var buffer bytes.Buffer
err := createZipUIDGidField(&buffer, fi)
if err == nil {
err = createZipTimestampField(&buffer, fi)
}
if err == nil {
return buffer.Bytes()
}
return nil
}
func readZipExtraField(r io.Reader) (field ZipExtraField, data []byte, err error) {
err = binary.Read(r, binary.LittleEndian, &field)
if err != nil {
return
}
data = make([]byte, field.Size)
_, err = r.Read(data)
if err != nil {
return
}
return
}
func processZipExtra(file *zip.FileHeader) error {
if len(file.Extra) == 0 {
return nil
}
r := bytes.NewReader(file.Extra)
for {
field, data, err := readZipExtraField(r)
if err == io.EOF {
break
} else if err != nil {
return err
}
switch field.Type {
case ZipUIDGidFieldType:
err = processZipUIDGidField(data, file)
case ZipTimestampFieldType:
err = processZipTimestampField(data, file)
}
if err != nil {
return err
}
}
return nil
}
// +build linux darwin freebsd openbsd
package archives
import (
"archive/zip"
"bytes"
"encoding/binary"
"errors"
"io"
"os"
"syscall"
)
func createZipUIDGidField(w io.Writer, fi os.FileInfo) (err error) {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return
}
ugField := ZipUIDGidField{
1,
4, stat.Uid,
4, stat.Gid,
}
ugFieldType := ZipExtraField{
Type: ZipUIDGidFieldType,
Size: uint16(binary.Size(&ugField)),
}
err = binary.Write(w, binary.LittleEndian, &ugFieldType)
if err == nil {
err = binary.Write(w, binary.LittleEndian, &ugField)
}
return err
}
func processZipUIDGidField(data []byte, file *zip.FileHeader) error {
var ugField ZipUIDGidField
err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &ugField)
if err != nil {
return err
}
if !(ugField.Version == 1 && ugField.UIDSize == 4 && ugField.GIDSize == 4) {
return errors.New("uid/gid data not supported")
}
return os.Lchown(file.Name, int(ugField.UID), int(ugField.Gid))
}
package archives
import (
"archive/zip"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/sirupsen/logrus"
)
func extractZipDirectoryEntry(file *zip.File) (err error) {
err = os.Mkdir(file.Name, file.Mode().Perm())
// The error that directory does exists is not a error for us
if os.IsExist(err) {
err = nil
}
return
}
func extractZipSymlinkEntry(file *zip.File) (err error) {
var data []byte
in, err := file.Open()
if err != nil {
return err
}
defer in.Close()
data, err = ioutil.ReadAll(in)
if err != nil {
return err
}
// Remove symlink before creating a new one, otherwise we can error that file does exist
os.Remove(file.Name)
err = os.Symlink(string(data), file.Name)
return
}
func extractZipFileEntry(file *zip.File) (err error) {
var out *os.File
in, err := file.Open()
if err != nil {
return err
}
defer in.Close()
// Remove file before creating a new one, otherwise we can error that file does exist
os.Remove(file.Name)
out, err = os.OpenFile(file.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode().Perm())
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return
}
func extractZipFile(file *zip.File) (err error) {
// Create all parents to extract the file
err = os.MkdirAll(filepath.Dir(file.Name), 0777)
if err != nil {
return err
}
switch file.Mode() & os.ModeType {
case os.ModeDir:
err = extractZipDirectoryEntry(file)
case os.ModeSymlink:
err = extractZipSymlinkEntry(file)
case os.ModeNamedPipe, os.ModeSocket, os.ModeDevice:
// Ignore the files that of these types
logrus.Warningf("File ignored: %q", file.Name)
default:
err = extractZipFileEntry(file)
}
return
}
func ExtractZipArchive(archive *zip.Reader) error {
tracker := newPathErrorTracker()
for _, file := range archive.File {
if err := errorIfGitDirectory(file.Name); tracker.actionable(err) {
printGitArchiveWarning("extract")
}
if err := extractZipFile(file); tracker.actionable(err) {
logrus.Warningf("%s: %s (suppressing repeats)", file.Name, err)
}
}
for _, file := range archive.File {
// Update file permissions
if err := os.Chmod(file.Name, file.Mode().Perm()); tracker.actionable(err) {
logrus.Warningf("%s: %s (suppressing repeats)", file.Name, err)
}
// Process zip metadata
if err := processZipExtra(&file.FileHeader); tracker.actionable(err) {
logrus.Warningf("%s: %s (suppressing repeats)", file.Name, err)
}
}
return nil
}
func ExtractZipFile(fileName string) error {
archive, err := zip.OpenReader(fileName)
if err != nil {
return err
}
defer archive.Close()
return ExtractZipArchive(&archive.Reader)
}
package helpers
import (
"fmt"
"time"
)
type RawLogger interface {
SendRawLog(args ...interface{})
}
type BuildSection struct {
Name string
SkipMetrics bool
Run func() error
}
const (
traceSectionStart = "section_start:%v:%s\r" + ANSI_CLEAR
traceSectionEnd = "section_end:%v:%s\r" + ANSI_CLEAR
)
func nowUnixUTC() int64 {
return time.Now().UTC().Unix()
}
func (s *BuildSection) timestamp(format string, logger RawLogger) {
if s.SkipMetrics {
return
}
sectionLine := fmt.Sprintf(format, nowUnixUTC(), s.Name)
logger.SendRawLog(sectionLine)
}
func (s *BuildSection) start(logger RawLogger) {
s.timestamp(traceSectionStart, logger)
}
func (s *BuildSection) end(logger RawLogger) {
s.timestamp(traceSectionEnd, logger)
}
func (s *BuildSection) Execute(logger RawLogger) error {
s.start(logger)
defer s.end(logger)
return s.Run()
}
package certificate
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"math/big"
"net"
"time"
)
const (
x509CertificatePrivateKeyBits = 2048
x509CertificateExpiryInYears = 2
x509CertificateSerialNumber = 1
x509CertificateOrganization = "GitLab Runner"
)
type X509Generator struct{}
func (c X509Generator) Generate(host string) (tls.Certificate, []byte, error) {
priv, err := rsa.GenerateKey(rand.Reader, x509CertificatePrivateKeyBits)
if err != nil {
return tls.Certificate{}, []byte{}, err
}
template := x509.Certificate{
SerialNumber: big.NewInt(x509CertificateSerialNumber),
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(x509CertificateExpiryInYears, 0, 0),
Subject: pkix.Name{
Organization: []string{x509CertificateOrganization},
},
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
if ip := net.ParseIP(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)
}
publicKeyBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv)
if err != nil {
return tls.Certificate{}, []byte{}, errors.New("failed to create certificate")
}
publicKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: publicKeyBytes})
privateKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
parsedCertificate, err := tls.X509KeyPair(publicKeyPEM, privateKeyPEM)
if err != nil {
return tls.Certificate{}, []byte{}, err
}
return parsedCertificate, publicKeyPEM, nil
}
package helperimage
import (
"fmt"
"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/errors"
)
const (
OSTypeLinux = "linux"
OSTypeWindows = "windows"
name = "gitlab/gitlab-runner-helper"
headRevision = "HEAD"
latestImageRevision = "latest"
)
type Info struct {
Architecture string
Name string
Tag string
IsSupportingLocalImport bool
Cmd []string
}
func (i Info) String() string {
return fmt.Sprintf("%s:%s", i.Name, i.Tag)
}
// Config specifies details about the consumer of this package that need to be
// taken in consideration when building Container.
type Config struct {
OSType string
Architecture string
OperatingSystem string
}
type creator interface {
Create(revision string, cfg Config) (Info, error)
}
var supportedOsTypesFactories = map[string]creator{
OSTypeWindows: new(windowsInfo),
OSTypeLinux: new(linuxInfo),
}
func Get(revision string, cfg Config) (Info, error) {
factory, ok := supportedOsTypesFactories[cfg.OSType]
if !ok {
return Info{}, errors.NewErrOSNotSupported(cfg.OSType)
}
return factory.Create(imageRevision(revision), cfg)
}
func imageRevision(revision string) string {
if revision != headRevision {
return revision
}
return latestImageRevision
}
package helperimage
import (
"fmt"
"runtime"
)
var bashCmd = []string{"gitlab-runner-build"}
type linuxInfo struct{}
func (l *linuxInfo) Create(revision string, cfg Config) (Info, error) {
arch := l.architecture(cfg.Architecture)
return Info{
Architecture: arch,
Name: name,
Tag: fmt.Sprintf("%s-%s", arch, revision),
IsSupportingLocalImport: true,
Cmd: bashCmd,
}, nil
}
func (l *linuxInfo) architecture(arch string) string {
switch arch {
case "armv6l", "armv7l":
return "arm"
case "aarch64":
return "arm64"
case "amd64":
return "x86_64"
}
if arch != "" {
return arch
}
switch runtime.GOARCH {
case "amd64":
return "x86_64"
default:
return runtime.GOARCH
}
}
package helperimage
import (
"errors"
"fmt"
"strings"
)
const (
windows1809 = "1809"
windows1803 = "1803"
baseImage1809 = "servercore1809"
baseImage1803 = "servercore1803"
windowsSupportedArchitecture = "x86_64"
)
var supportedOSVersions = map[string]string{
windows1803: baseImage1803,
windows1809: baseImage1809,
}
var ErrUnsupportedOSVersion = errors.New("could not determine windows version")
var powerShellCmd = []string{"PowerShell", "-NoProfile", "-NoLogo", "-InputFormat", "text", "-OutputFormat", "text", "-NonInteractive", "-ExecutionPolicy", "Bypass", "-Command", "-"}
type windowsInfo struct{}
func (w *windowsInfo) Create(revision string, cfg Config) (Info, error) {
osVersion, err := w.osVersion(cfg.OperatingSystem)
if err != nil {
return Info{}, err
}
return Info{
Architecture: windowsSupportedArchitecture,
Name: name,
Tag: fmt.Sprintf("%s-%s-%s", windowsSupportedArchitecture, revision, osVersion),
IsSupportingLocalImport: false,
Cmd: powerShellCmd,
}, nil
}
func (w *windowsInfo) osVersion(operatingSystem string) (string, error) {
for osVersion, baseImage := range supportedOSVersions {
if strings.Contains(operatingSystem, fmt.Sprintf(" %s ", osVersion)) {
return baseImage, nil
}
}
return "", ErrUnsupportedOSVersion
}
package services
import (
"regexp"
"strings"
"github.com/docker/distribution/reference"
)
type Service struct {
Service string
Version string
ImageName string
Aliases []string
}
func SplitNameAndVersion(serviceDescription string) (out Service) {
ReferenceRegexpNoPort := regexp.MustCompile(`^(.*?)(|:[0-9]+)(|/.*)$`)
out.ImageName = serviceDescription
out.Version = "latest"
if match := reference.ReferenceRegexp.FindStringSubmatch(serviceDescription); match != nil {
matchService := ReferenceRegexpNoPort.FindStringSubmatch(match[1])
out.Service = matchService[1] + matchService[3]
if len(match[2]) > 0 {
out.Version = match[2]
} else {
out.ImageName = match[1] + ":" + out.Version
}
} else {
return
}
alias := strings.Replace(out.Service, "/", "__", -1)
out.Aliases = append(out.Aliases, alias)
// Create alternative link name according to RFC 1123
// Where you can use only `a-zA-Z0-9-`
if alternativeName := strings.Replace(out.Service, "/", "-", -1); alias != alternativeName {
out.Aliases = append(out.Aliases, alternativeName)
}
return
}
package helpers
import (
"bufio"
"bytes"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v2"
)
func ToYAML(src interface{}) string {
data, err := yaml.Marshal(src)
if err == nil {
return string(data)
}
return ""
}
func ToTOML(src interface{}) string {
var data bytes.Buffer
buffer := bufio.NewWriter(&data)
if err := toml.NewEncoder(buffer).Encode(src); err != nil {
return ""
}
if err := buffer.Flush(); err != nil {
return ""
}
return data.String()
}
func ToConfigMap(list interface{}) (map[string]interface{}, bool) {
x, ok := list.(map[string]interface{})
if ok {
return x, ok
}
y, ok := list.(map[interface{}]interface{})
if !ok {
return nil, false
}
result := make(map[string]interface{})
for k, v := range y {
result[k.(string)] = v
}
return result, true
}
func GetMapKey(value map[string]interface{}, keys ...string) (result interface{}, ok bool) {
result = value
for _, key := range keys {
if stringMap, ok := result.(map[string]interface{}); ok {
if result, ok = stringMap[key]; ok {
continue
}
} else if interfaceMap, ok := result.(map[interface{}]interface{}); ok {
if result, ok = interfaceMap[key]; ok {
continue
}
}
return nil, false
}
return result, true
}
package dns
import (
"regexp"
"strings"
"k8s.io/apimachinery/pkg/util/validation"
)
const (
RFC1123NameMaximumLength = 63
RFC1123NotAllowedCharacters = "[^-a-z0-9]"
RFC1123NotAllowedStartCharacters = "^[^a-z0-9]+"
)
func MakeRFC1123Compatible(name string) string {
name = strings.ToLower(name)
nameNotAllowedChars := regexp.MustCompile(RFC1123NotAllowedCharacters)
name = nameNotAllowedChars.ReplaceAllString(name, "")
nameNotAllowedStartChars := regexp.MustCompile(RFC1123NotAllowedStartCharacters)
name = nameNotAllowedStartChars.ReplaceAllString(name, "")
if len(name) > RFC1123NameMaximumLength {
name = name[0:RFC1123NameMaximumLength]
}
return name
}
const emptyRFC1123SubdomainErrorMessage = "validating rfc1123 subdomain"
type RFC1123SubdomainError struct {
errs []string
}
func (d *RFC1123SubdomainError) Error() string {
if len(d.errs) == 0 {
return emptyRFC1123SubdomainErrorMessage
}
return strings.Join(d.errs, ", ")
}
func (d *RFC1123SubdomainError) Is(err error) bool {
_, ok := err.(*RFC1123SubdomainError)
return ok
}
func ValidateDNS1123Subdomain(name string) error {
errs := validation.IsDNS1123Subdomain(name)
if len(errs) == 0 {
return nil
}
return &RFC1123SubdomainError{errs: errs}
}
package docker_helpers
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"os/user"
"path"
"strings"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/homedir"
)
// DefaultDockerRegistry is the name of the index
const DefaultDockerRegistry = "docker.io"
// EncodeAuthConfig constructs a token from an AuthConfig, suitable for
// authorizing against the Docker API with.
func EncodeAuthConfig(authConfig *types.AuthConfig) (string, error) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(authConfig); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buf.Bytes()), nil
}
// SplitDockerImageName breaks a reposName into an index name and remote name
func SplitDockerImageName(reposName string) (string, string) {
nameParts := strings.SplitN(reposName, "/", 2)
var indexName, remoteName string
if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") &&
!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
// 'docker.io'
indexName = DefaultDockerRegistry
remoteName = reposName
} else {
indexName = nameParts[0]
remoteName = nameParts[1]
}
if indexName == "index."+DefaultDockerRegistry {
indexName = DefaultDockerRegistry
}
return indexName, remoteName
}
var HomeDirectory = homedir.Get()
func ReadDockerAuthConfigsFromHomeDir(userName string) (string, map[string]types.AuthConfig, error) {
homeDir := HomeDirectory
if userName != "" {
u, err := user.Lookup(userName)
if err != nil {
return "", nil, err
}
homeDir = u.HomeDir
}
if homeDir == "" {
return "", nil, fmt.Errorf("Failed to get home directory")
}
configFile := path.Join(homeDir, ".docker", "config.json")
r, err := os.Open(configFile)
defer r.Close()
if err != nil {
configFile = path.Join(homeDir, ".dockercfg")
r, err = os.Open(configFile)
if err != nil && !os.IsNotExist(err) {
return "", nil, err
}
}
if r == nil {
return "", make(map[string]types.AuthConfig), nil
}
authConfigs, err := ReadAuthConfigsFromReader(r)
return configFile, authConfigs, err
}
func ReadAuthConfigsFromReader(r io.Reader) (map[string]types.AuthConfig, error) {
config := &configfile.ConfigFile{}
if err := config.LoadFromReader(r); err != nil {
return nil, err
}
auths := make(map[string]types.AuthConfig)
addAll(auths, config.AuthConfigs)
if config.CredentialsStore != "" {
authsFromCredentialsStore, err := readAuthConfigsFromCredentialsStore(config)
if err != nil {
return nil, err
}
addAll(auths, authsFromCredentialsStore)
}
if config.CredentialHelpers != nil {
authsFromCredentialsHelpers, err := readAuthConfigsFromCredentialsHelper(config)
if err != nil {
return nil, err
}
addAll(auths, authsFromCredentialsHelpers)
}
return auths, nil
}
func readAuthConfigsFromCredentialsStore(config *configfile.ConfigFile) (map[string]types.AuthConfig, error) {
store := credentials.NewNativeStore(config, config.CredentialsStore)
newAuths, err := store.GetAll()
if err != nil {
return nil, err
}
return newAuths, nil
}
func readAuthConfigsFromCredentialsHelper(config *configfile.ConfigFile) (map[string]types.AuthConfig, error) {
helpersAuths := make(map[string]types.AuthConfig)
for registry, helper := range config.CredentialHelpers {
store := credentials.NewNativeStore(config, helper)
newAuths, err := store.Get(registry)
if err != nil {
return nil, err
}
helpersAuths[registry] = newAuths
}
return helpersAuths, nil
}
func addAll(to, from map[string]types.AuthConfig) {
for reg, ac := range from {
to[reg] = ac
}
}
// ResolveDockerAuthConfig taken from: https://github.com/docker/docker/blob/master/registry/auth.go
func ResolveDockerAuthConfig(indexName string, configs map[string]types.AuthConfig) *types.AuthConfig {
if configs == nil {
return nil
}
convertToHostname := func(url string) string {
stripped := url
if strings.HasPrefix(url, "http://") {
stripped = strings.Replace(url, "http://", "", 1)
} else if strings.HasPrefix(url, "https://") {
stripped = strings.Replace(url, "https://", "", 1)
}
nameParts := strings.SplitN(stripped, "/", 2)
if nameParts[0] == "index."+DefaultDockerRegistry {
return DefaultDockerRegistry
}
return nameParts[0]
}
// Maybe they have a legacy config file, we will iterate the keys converting
// them to the new format and testing
for registry, authConfig := range configs {
if indexName == convertToHostname(registry) {
return &authConfig
}
}
// When all else fails, return an empty auth config
return nil
}
package docker_helpers
import (
"os"
"strconv"
)
type DockerCredentials struct {
Host string `toml:"host,omitempty" json:"host" long:"host" env:"DOCKER_HOST" description:"Docker daemon address"`
CertPath string `toml:"tls_cert_path,omitempty" json:"tls_cert_path" long:"cert-path" env:"DOCKER_CERT_PATH" description:"Certificate path"`
TLSVerify bool `toml:"tls_verify,omitzero" json:"tls_verify" long:"tlsverify" env:"DOCKER_TLS_VERIFY" description:"Use TLS and verify the remote"`
}
func credentialsFromEnv() DockerCredentials {
tlsVerify, _ := strconv.ParseBool(os.Getenv("DOCKER_TLS_VERIFY"))
return DockerCredentials{
Host: os.Getenv("DOCKER_HOST"),
CertPath: os.Getenv("DOCKER_CERT_PATH"),
TLSVerify: tlsVerify,
}
}
package docker_helpers
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/docker/machine/commands/mcndirs"
"github.com/sirupsen/logrus"
)
const (
defaultDockerMachineExecutable = "docker-machine"
crashreportTokenOption = "--bugsnag-api-token"
crashreportToken = "no-report"
)
var dockerMachineExecutable = defaultDockerMachineExecutable
type logWriter struct {
log func(args ...interface{})
reader *bufio.Reader
}
func (l *logWriter) write(line string) {
line = strings.TrimRight(line, "\n")
if len(line) <= 0 {
return
}
l.log(line)
}
func (l *logWriter) watch() {
for {
line, err := l.reader.ReadString('\n')
if err == nil || err == io.EOF {
l.write(line)
if err == io.EOF {
return
}
} else {
if !strings.Contains(err.Error(), "bad file descriptor") {
logrus.WithError(err).Warn("Problem while reading command output")
}
return
}
}
}
func newLogWriter(logFunction func(args ...interface{}), reader io.Reader) {
writer := &logWriter{
log: logFunction,
reader: bufio.NewReader(reader),
}
go writer.watch()
}
func stdoutLogWriter(cmd *exec.Cmd, fields logrus.Fields) {
log := logrus.WithFields(fields)
reader, err := cmd.StdoutPipe()
if err == nil {
newLogWriter(log.Infoln, reader)
}
}
func stderrLogWriter(cmd *exec.Cmd, fields logrus.Fields) {
log := logrus.WithFields(fields)
reader, err := cmd.StderrPipe()
if err == nil {
newLogWriter(log.Errorln, reader)
}
}
type machineCommand struct {
cache map[string]machineInfo
cacheLock sync.RWMutex
}
type machineInfo struct {
expires time.Time
canConnect bool
}
func (m *machineCommand) Create(driver, name string, opts ...string) error {
args := []string{
"create",
"--driver", driver,
}
for _, opt := range opts {
args = append(args, "--"+opt)
}
args = append(args, name)
cmd := newDockerMachineCommand(args...)
fields := logrus.Fields{
"operation": "create",
"driver": driver,
"name": name,
}
stdoutLogWriter(cmd, fields)
stderrLogWriter(cmd, fields)
logrus.Debugln("Executing", cmd.Path, cmd.Args)
return cmd.Run()
}
func (m *machineCommand) Provision(name string) error {
cmd := newDockerMachineCommand("provision", name)
fields := logrus.Fields{
"operation": "provision",
"name": name,
}
stdoutLogWriter(cmd, fields)
stderrLogWriter(cmd, fields)
return cmd.Run()
}
func (m *machineCommand) Stop(name string, timeout time.Duration) error {
ctx, ctxCancelFn := context.WithTimeout(context.Background(), timeout)
defer ctxCancelFn()
cmd := newDockerMachineCommandCtx(ctx, "stop", name)
fields := logrus.Fields{
"operation": "stop",
"name": name,
}
stdoutLogWriter(cmd, fields)
stderrLogWriter(cmd, fields)
return cmd.Run()
}
func (m *machineCommand) Remove(name string) error {
cmd := newDockerMachineCommand("rm", "-y", name)
fields := logrus.Fields{
"operation": "remove",
"name": name,
}
stdoutLogWriter(cmd, fields)
stderrLogWriter(cmd, fields)
if err := cmd.Run(); err != nil {
return err
}
m.cacheLock.Lock()
delete(m.cache, name)
m.cacheLock.Unlock()
return nil
}
func (m *machineCommand) List() (hostNames []string, err error) {
dir, err := ioutil.ReadDir(mcndirs.GetMachineDir())
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
for _, file := range dir {
if file.IsDir() && !strings.HasPrefix(file.Name(), ".") {
hostNames = append(hostNames, file.Name())
}
}
return
}
func (m *machineCommand) get(args ...string) (out string, err error) {
// Execute docker-machine to fetch IP
cmd := newDockerMachineCommand(args...)
data, err := cmd.Output()
if err != nil {
return
}
// Save the IP
out = strings.TrimSpace(string(data))
if out == "" {
err = fmt.Errorf("failed to get %v", args)
}
return
}
func (m *machineCommand) IP(name string) (string, error) {
return m.get("ip", name)
}
func (m *machineCommand) URL(name string) (string, error) {
return m.get("url", name)
}
func (m *machineCommand) CertPath(name string) (string, error) {
return m.get("inspect", name, "-f", "{{.HostOptions.AuthOptions.StorePath}}")
}
func (m *machineCommand) Status(name string) (string, error) {
return m.get("status", name)
}
func (m *machineCommand) Exist(name string) bool {
configPath := filepath.Join(mcndirs.GetMachineDir(), name, "config.json")
_, err := os.Stat(configPath)
if err != nil {
return false
}
cmd := newDockerMachineCommand("inspect", name)
fields := logrus.Fields{
"operation": "exists",
"name": name,
}
stderrLogWriter(cmd, fields)
return cmd.Run() == nil
}
func (m *machineCommand) CanConnect(name string, skipCache bool) bool {
m.cacheLock.RLock()
cachedInfo, ok := m.cache[name]
m.cacheLock.RUnlock()
if ok && !skipCache && time.Now().Before(cachedInfo.expires) {
return cachedInfo.canConnect
}
canConnect := m.canConnect(name)
if !canConnect {
return false // we only cache positive hits. Machines usually do not disconnect.
}
m.cacheLock.Lock()
m.cache[name] = machineInfo{
expires: time.Now().Add(5 * time.Minute),
canConnect: true,
}
m.cacheLock.Unlock()
return true
}
func (m *machineCommand) canConnect(name string) bool {
// Execute docker-machine config which actively ask the machine if it is up and online
cmd := newDockerMachineCommand("config", name)
err := cmd.Run()
if err == nil {
return true
}
return false
}
func (m *machineCommand) Credentials(name string) (dc DockerCredentials, err error) {
if !m.CanConnect(name, true) {
err = errors.New("Can't connect")
return
}
dc.TLSVerify = true
dc.Host, err = m.URL(name)
if err == nil {
dc.CertPath, err = m.CertPath(name)
}
return
}
func newDockerMachineCommandCtx(ctx context.Context, args ...string) *exec.Cmd {
token := os.Getenv("MACHINE_BUGSNAG_API_TOKEN")
if token == "" {
token = crashreportToken
}
commandArgs := []string{
fmt.Sprintf("%s=%s", crashreportTokenOption, token),
}
commandArgs = append(commandArgs, args...)
cmd := exec.CommandContext(ctx, dockerMachineExecutable, commandArgs...)
cmd.Env = os.Environ()
return cmd
}
func newDockerMachineCommand(args ...string) *exec.Cmd {
return newDockerMachineCommandCtx(context.Background(), args...)
}
func NewMachineCommand() Machine {
return &machineCommand{
cache: map[string]machineInfo{},
}
}
package docker_helpers
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"path/filepath"
"runtime"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/sirupsen/logrus"
)
// The default API version used to create a new docker client.
const DefaultAPIVersion = "1.25"
// IsErrNotFound checks whether a returned error is due to an image or container
// not being found. Proxies the docker implementation.
func IsErrNotFound(err error) bool {
return client.IsErrNotFound(err)
}
// type officialDockerClient wraps a "github.com/docker/docker/client".Client,
// giving it the methods it needs to satisfy the docker_helpers.Client interface
type officialDockerClient struct {
client *client.Client
// Close() means "close idle connections held by engine-api's transport"
Transport *http.Transport
}
func newOfficialDockerClient(c DockerCredentials, apiVersion string) (*officialDockerClient, error) {
transport, err := newHTTPTransport(c)
if err != nil {
logrus.Errorln("Error creating TLS Docker client:", err)
return nil, err
}
httpClient := &http.Client{Transport: transport}
dockerClient, err := client.NewClient(c.Host, apiVersion, httpClient, nil)
if err != nil {
transport.CloseIdleConnections()
logrus.Errorln("Error creating Docker client:", err)
return nil, err
}
return &officialDockerClient{
client: dockerClient,
Transport: transport,
}, nil
}
func wrapError(method string, err error, started time.Time) error {
if err == nil {
return nil
}
seconds := int(time.Since(started).Seconds())
if _, file, line, ok := runtime.Caller(2); ok {
return fmt.Errorf("%w (%s:%d:%ds)", err, filepath.Base(file), line, seconds)
}
return fmt.Errorf("%w (%s:%ds)", err, method, seconds)
}
func (c *officialDockerClient) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
started := time.Now()
image, data, err := c.client.ImageInspectWithRaw(ctx, imageID)
return image, data, wrapError("ImageInspectWithRaw", err, started)
}
func (c *officialDockerClient) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
started := time.Now()
container, err := c.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
return container, wrapError("ContainerCreate", err, started)
}
func (c *officialDockerClient) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
started := time.Now()
err := c.client.ContainerStart(ctx, containerID, options)
return wrapError("ContainerCreate", err, started)
}
func (c *officialDockerClient) ContainerKill(ctx context.Context, containerID string, signal string) error {
started := time.Now()
err := c.client.ContainerKill(ctx, containerID, signal)
return wrapError("ContainerWait", err, started)
}
func (c *officialDockerClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
started := time.Now()
data, err := c.client.ContainerInspect(ctx, containerID)
return data, wrapError("ContainerInspect", err, started)
}
func (c *officialDockerClient) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
started := time.Now()
response, err := c.client.ContainerAttach(ctx, container, options)
return response, wrapError("ContainerAttach", err, started)
}
func (c *officialDockerClient) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
started := time.Now()
err := c.client.ContainerRemove(ctx, containerID, options)
return wrapError("ContainerRemove", err, started)
}
func (c *officialDockerClient) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
started := time.Now()
rc, err := c.client.ContainerLogs(ctx, container, options)
return rc, wrapError("ContainerLogs", err, started)
}
func (c *officialDockerClient) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
started := time.Now()
resp, err := c.client.ContainerExecCreate(ctx, container, config)
return resp, wrapError("ContainerExecCreate", err, started)
}
func (c *officialDockerClient) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
started := time.Now()
resp, err := c.client.ContainerExecAttach(ctx, execID, config)
return resp, wrapError("ContainerExecAttach", err, started)
}
func (c *officialDockerClient) NetworkDisconnect(ctx context.Context, networkID string, containerID string, force bool) error {
started := time.Now()
err := c.client.NetworkDisconnect(ctx, networkID, containerID, force)
return wrapError("NetworkDisconnect", err, started)
}
func (c *officialDockerClient) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
started := time.Now()
networks, err := c.client.NetworkList(ctx, options)
return networks, wrapError("NetworkList", err, started)
}
func (c *officialDockerClient) Info(ctx context.Context) (types.Info, error) {
started := time.Now()
info, err := c.client.Info(ctx)
return info, wrapError("Info", err, started)
}
func (c *officialDockerClient) ImageImportBlocking(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) error {
started := time.Now()
readCloser, err := c.client.ImageImport(ctx, source, ref, options)
if err != nil {
return wrapError("ImageImport", err, started)
}
defer readCloser.Close()
// TODO: respect the context here
if _, err := io.Copy(ioutil.Discard, readCloser); err != nil {
return wrapError("io.Copy: Failed to import image", err, started)
}
return nil
}
func (c *officialDockerClient) ImagePullBlocking(ctx context.Context, ref string, options types.ImagePullOptions) error {
started := time.Now()
readCloser, err := c.client.ImagePull(ctx, ref, options)
if err != nil {
return wrapError("ImagePull", err, started)
}
defer readCloser.Close()
// TODO: respect the context here
if _, err := io.Copy(ioutil.Discard, readCloser); err != nil {
return wrapError("io.Copy: Failed to pull image", err, started)
}
return nil
}
func (c *officialDockerClient) Close() error {
c.Transport.CloseIdleConnections()
return nil
}
// New attempts to create a new Docker client of the specified version. If the
// specified version is empty, it will use the default version.
//
// If no host is given in the DockerCredentials, it will attempt to look up
// details from the environment. If that fails, it will use the default
// connection details for your platform.
func New(c DockerCredentials, apiVersion string) (Client, error) {
if c.Host == "" {
c = credentialsFromEnv()
}
// Use the default if nothing is specified by caller *or* environment
if c.Host == "" {
c.Host = client.DefaultDockerHost
}
if apiVersion == "" {
apiVersion = DefaultAPIVersion
}
return newOfficialDockerClient(c, apiVersion)
}
func newHTTPTransport(c DockerCredentials) (*http.Transport, error) {
url, err := client.ParseHostURL(c.Host)
if err != nil {
return nil, err
}
tr := &http.Transport{}
if err := configureTransport(tr, url.Scheme, url.Host); err != nil {
return nil, err
}
// FIXME: is a TLS connection with InsecureSkipVerify == true ever wanted?
if c.TLSVerify {
options := tlsconfig.Options{}
if c.CertPath != "" {
options.CAFile = filepath.Join(c.CertPath, "ca.pem")
options.CertFile = filepath.Join(c.CertPath, "cert.pem")
options.KeyFile = filepath.Join(c.CertPath, "key.pem")
}
tlsConfig, err := tlsconfig.Client(options)
if err != nil {
tr.CloseIdleConnections()
return nil, err
}
tr.TLSClientConfig = tlsConfig
}
return tr, nil
}
package docker_helpers
import (
"net"
"net/http"
"time"
"github.com/docker/go-connections/sockets"
)
const defaultTimeout = 300 * time.Second
const defaultKeepAlive = 10 * time.Second
const defaultTLSHandshakeTimeout = 60 * time.Second
const defaultResponseHeaderTimeout = 120 * time.Second
const defaultExpectContinueTimeout = 120 * time.Second
const defaultIdleConnTimeout = 10 * time.Second
// configureTransport configures the specified Transport according to the
// specified proto and addr.
// If the proto is unix (using a unix socket to communicate) or npipe the
// compression is disabled.
func configureTransport(tr *http.Transport, proto, addr string) error {
err := sockets.ConfigureTransport(tr, proto, addr)
if err != nil {
return err
}
tr.TLSHandshakeTimeout = defaultTLSHandshakeTimeout
tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout
tr.ExpectContinueTimeout = defaultExpectContinueTimeout
tr.IdleConnTimeout = defaultIdleConnTimeout
// for network protocols set custom sockets with keep-alive
if proto == "tcp" || proto == "http" || proto == "https" {
dialer, err := sockets.DialerFromEnvironment(&net.Dialer{
Timeout: defaultTimeout,
KeepAlive: defaultKeepAlive,
})
if err != nil {
return err
}
tr.Dial = dialer.Dial
}
return nil
}
package helpers
import (
"fmt"
"io"
"github.com/sirupsen/logrus"
)
type fatalLogHook struct {
output io.Writer
}
func (s *fatalLogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.FatalLevel,
}
}
func (s *fatalLogHook) Fire(e *logrus.Entry) error {
fmt.Fprint(s.output, e.Message)
panic(e)
}
func MakeFatalToPanic() func() {
logger := logrus.StandardLogger()
hooks := make(logrus.LevelHooks)
hooks.Add(&fatalLogHook{output: logger.Out})
oldHooks := logger.ReplaceHooks(hooks)
return func() {
logger.ReplaceHooks(oldHooks)
}
}
package featureflags
import (
"strconv"
)
const (
CmdDisableDelayedErrorLevelExpansion string = "FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION"
UseLegacyBuildsDirForDocker string = "FF_USE_LEGACY_BUILDS_DIR_FOR_DOCKER"
UseLegacyVolumesMountingOrder string = "FF_USE_LEGACY_VOLUMES_MOUNTING_ORDER"
)
type FeatureFlag struct {
Name string
DefaultValue string
Deprecated bool
ToBeRemovedWith string
Description string
}
// REMEMBER to update the documentation after adding or removing a feature flag
//
// Please use `make update_feature_flags_docs` to make the update automatic and
// properly formatted. It will replace the existing table with the new one, computed
// basing on the values below
var flags = []FeatureFlag{
{
Name: CmdDisableDelayedErrorLevelExpansion,
DefaultValue: "false",
Deprecated: false,
ToBeRemovedWith: "13.0",
Description: "Disables [EnableDelayedExpansion](https://ss64.com/nt/delayedexpansion.html) for error checking for when using [Window Batch](https://docs.gitlab.com/runner/shells/#windows-batch) shell",
},
{
Name: UseLegacyBuildsDirForDocker,
DefaultValue: "false",
Deprecated: true,
ToBeRemovedWith: "13.0",
Description: "Disables the new strategy for Docker executor to cache the content of `/builds` directory instead of `/builds/group-org`",
},
{
Name: UseLegacyVolumesMountingOrder,
DefaultValue: "false",
Deprecated: true,
ToBeRemovedWith: "13.0",
Description: "Disables the new ordering of volumes mounting when `docker*` executors are being used.",
},
}
func GetAll() []FeatureFlag {
return flags
}
func IsOn(value string) (bool, error) {
if value == "" {
return false, nil
}
on, err := strconv.ParseBool(value)
if err != nil {
return false, err
}
return on, nil
}
package gitlab_ci_yaml_parser
import (
"encoding/json"
"fmt"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
type DataBag map[string]interface{}
func (m *DataBag) Get(keys ...string) (interface{}, bool) {
return helpers.GetMapKey(*m, keys...)
}
func (m *DataBag) GetSlice(keys ...string) ([]interface{}, bool) {
slice, ok := helpers.GetMapKey(*m, keys...)
if slice != nil {
return slice.([]interface{}), ok
}
return nil, false
}
func (m *DataBag) GetStringSlice(keys ...string) (slice []string, ok bool) {
rawSlice, ok := m.GetSlice(keys...)
if !ok {
return
}
for _, rawElement := range rawSlice {
if element, ok := rawElement.(string); ok {
slice = append(slice, element)
}
}
return
}
func (m *DataBag) GetSubOptions(keys ...string) (result DataBag, ok bool) {
value, ok := helpers.GetMapKey(*m, keys...)
if ok {
result, ok = value.(map[string]interface{})
}
return
}
func (m *DataBag) GetString(keys ...string) (result string, ok bool) {
value, ok := helpers.GetMapKey(*m, keys...)
if ok {
result, ok = value.(string)
}
return
}
func (m *DataBag) Decode(result interface{}, keys ...string) error {
value, ok := m.Get(keys...)
if !ok {
return fmt.Errorf("key not found %v", strings.Join(keys, "."))
}
data, err := json.Marshal(value)
if err != nil {
return err
}
return json.Unmarshal(data, result)
}
func convertMapToStringMap(in interface{}) (out interface{}, err error) {
mapString, ok := in.(map[string]interface{})
if ok {
for k, v := range mapString {
mapString[k], err = convertMapToStringMap(v)
if err != nil {
return
}
}
return mapString, nil
}
mapInterface, ok := in.(map[interface{}]interface{})
if ok {
mapString := make(map[string]interface{})
for k, v := range mapInterface {
key, ok := k.(string)
if !ok {
return nil, fmt.Errorf("failed to convert %v to string", k)
}
mapString[key], err = convertMapToStringMap(v)
if err != nil {
return
}
}
return mapString, nil
}
return in, nil
}
func (m *DataBag) Sanitize() (err error) {
n := make(DataBag)
for k, v := range *m {
n[k], err = convertMapToStringMap(v)
if err != nil {
return
}
}
*m = n
return
}
func getOptionsMap(optionKey string, primary, secondary DataBag) (value DataBag) {
value, ok := primary.GetSubOptions(optionKey)
if !ok {
value, _ = secondary.GetSubOptions(optionKey)
}
return
}
func getOptions(optionKey string, primary, secondary DataBag) (value []interface{}, ok bool) {
value, ok = primary.GetSlice(optionKey)
if !ok {
value, ok = secondary.GetSlice(optionKey)
}
return
}
package gitlab_ci_yaml_parser
import (
"errors"
"fmt"
"io/ioutil"
"strconv"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gopkg.in/yaml.v2"
)
type GitLabCiYamlParser struct {
filename string
jobName string
config DataBag
jobConfig DataBag
}
func (c *GitLabCiYamlParser) parseFile() (err error) {
data, err := ioutil.ReadFile(c.filename)
if err != nil {
return err
}
config := make(DataBag)
err = yaml.Unmarshal(data, config)
if err != nil {
return err
}
err = config.Sanitize()
if err != nil {
return err
}
c.config = config
return
}
func (c *GitLabCiYamlParser) loadJob() (err error) {
jobConfig, ok := c.config.GetSubOptions(c.jobName)
if !ok {
return fmt.Errorf("no job named %q", c.jobName)
}
c.jobConfig = jobConfig
return
}
func (c *GitLabCiYamlParser) prepareJobInfo(job *common.JobResponse) (err error) {
job.JobInfo = common.JobInfo{
Name: c.jobName,
}
if stage, ok := c.jobConfig.GetString("stage"); ok {
job.JobInfo.Stage = stage
} else {
job.JobInfo.Stage = "test"
}
return
}
func (c *GitLabCiYamlParser) getCommands(commands interface{}) (common.StepScript, error) {
if lines, ok := commands.([]interface{}); ok {
var steps common.StepScript
for _, line := range lines {
if lineText, ok := line.(string); ok {
steps = append(steps, lineText)
} else {
return common.StepScript{}, errors.New("unsupported script")
}
}
return steps, nil
} else if text, ok := commands.(string); ok {
return common.StepScript(strings.Split(text, "\n")), nil
} else if commands != nil {
return common.StepScript{}, errors.New("unsupported script")
}
return common.StepScript{}, nil
}
func (c *GitLabCiYamlParser) prepareSteps(job *common.JobResponse) (err error) {
if c.jobConfig["script"] == nil {
err = fmt.Errorf("missing 'script' for job")
return
}
var scriptCommands, afterScriptCommands common.StepScript
// get before_script
beforeScript, err := c.getCommands(c.config["before_script"])
if err != nil {
return
}
// get job before_script
jobBeforeScript, err := c.getCommands(c.jobConfig["before_script"])
if err != nil {
return
}
if len(jobBeforeScript) < 1 {
scriptCommands = beforeScript
} else {
scriptCommands = jobBeforeScript
}
// get script
script, err := c.getCommands(c.jobConfig["script"])
if err != nil {
return
}
scriptCommands = append(scriptCommands, script...)
afterScriptCommands, err = c.getCommands(c.jobConfig["after_script"])
if err != nil {
return
}
job.Steps = common.Steps{
common.Step{
Name: common.StepNameScript,
Script: scriptCommands,
Timeout: 3600,
When: common.StepWhenOnSuccess,
AllowFailure: false,
},
common.Step{
Name: common.StepNameAfterScript,
Script: afterScriptCommands,
Timeout: 3600,
When: common.StepWhenAlways,
AllowFailure: false,
},
}
return
}
func (c *GitLabCiYamlParser) buildDefaultVariables(job *common.JobResponse) (defaultVariables common.JobVariables, err error) {
defaultVariables = common.JobVariables{
{Key: "CI", Value: "true", Public: true, Internal: true, File: false},
{Key: "GITLAB_CI", Value: "true", Public: true, Internal: true, File: false},
{Key: "CI_SERVER_NAME", Value: "GitLab CI", Public: true, Internal: true, File: false},
{Key: "CI_SERVER_VERSION", Value: "", Public: true, Internal: true, File: false},
{Key: "CI_SERVER_REVISION", Value: "", Public: true, Internal: true, File: false},
{Key: "CI_PROJECT_ID", Value: strconv.Itoa(job.JobInfo.ProjectID), Public: true, Internal: true, File: false},
{Key: "CI_JOB_ID", Value: strconv.Itoa(job.ID), Public: true, Internal: true, File: false},
{Key: "CI_JOB_NAME", Value: job.JobInfo.Name, Public: true, Internal: true, File: false},
{Key: "CI_JOB_STAGE", Value: job.JobInfo.Stage, Public: true, Internal: true, File: false},
{Key: "CI_JOB_TOKEN", Value: job.Token, Public: true, Internal: true, File: false},
{Key: "CI_REPOSITORY_URL", Value: job.GitInfo.RepoURL, Public: true, Internal: true, File: false},
{Key: "CI_COMMIT_SHA", Value: job.GitInfo.Sha, Public: true, Internal: true, File: false},
{Key: "CI_COMMIT_BEFORE_SHA", Value: job.GitInfo.BeforeSha, Public: true, Internal: true, File: false},
{Key: "CI_COMMIT_REF_NAME", Value: job.GitInfo.Ref, Public: true, Internal: true, File: false},
}
return
}
func (c *GitLabCiYamlParser) buildVariables(configVariables interface{}) (buildVariables common.JobVariables, err error) {
if variables, ok := configVariables.(map[string]interface{}); ok {
for key, value := range variables {
if valueText, ok := value.(string); ok {
buildVariables = append(buildVariables, common.JobVariable{
Key: key,
Value: valueText,
Public: true,
})
} else {
err = fmt.Errorf("invalid value for variable %q", key)
}
}
} else if configVariables != nil {
err = errors.New("unsupported variables")
}
return
}
func (c *GitLabCiYamlParser) prepareVariables(job *common.JobResponse) (err error) {
job.Variables = common.JobVariables{}
defaultVariables, err := c.buildDefaultVariables(job)
if err != nil {
return
}
job.Variables = append(job.Variables, defaultVariables...)
globalVariables, err := c.buildVariables(c.config["variables"])
if err != nil {
return
}
job.Variables = append(job.Variables, globalVariables...)
jobVariables, err := c.buildVariables(c.jobConfig["variables"])
if err != nil {
return
}
job.Variables = append(job.Variables, jobVariables...)
return
}
func (c *GitLabCiYamlParser) prepareImage(job *common.JobResponse) (err error) {
job.Image = common.Image{}
if imageName, ok := c.jobConfig.GetString("image"); ok {
job.Image.Name = imageName
return
}
if imageDefinition, ok := c.jobConfig.GetSubOptions("image"); ok {
job.Image.Name, _ = imageDefinition.GetString("name")
job.Image.Entrypoint, _ = imageDefinition.GetStringSlice("entrypoint")
return
}
if imageName, ok := c.config.GetString("image"); ok {
job.Image.Name = imageName
return
}
if imageDefinition, ok := c.config.GetSubOptions("image"); ok {
job.Image.Name, _ = imageDefinition.GetString("name")
job.Image.Entrypoint, _ = imageDefinition.GetStringSlice("entrypoint")
return
}
return
}
func parseExtendedServiceDefinitionMap(serviceDefinition map[interface{}]interface{}) (image common.Image) {
service := make(DataBag)
for key, value := range serviceDefinition {
service[key.(string)] = value
}
image.Name, _ = service.GetString("name")
image.Alias, _ = service.GetString("alias")
image.Command, _ = service.GetStringSlice("command")
image.Entrypoint, _ = service.GetStringSlice("entrypoint")
return
}
func (c *GitLabCiYamlParser) prepareServices(job *common.JobResponse) (err error) {
job.Services = common.Services{}
if servicesMap, ok := getOptions("services", c.jobConfig, c.config); ok {
for _, service := range servicesMap {
if serviceName, ok := service.(string); ok {
job.Services = append(job.Services, common.Image{
Name: serviceName,
})
continue
}
if serviceDefinition, ok := service.(map[interface{}]interface{}); ok {
job.Services = append(job.Services, parseExtendedServiceDefinitionMap(serviceDefinition))
}
}
}
return
}
func (c *GitLabCiYamlParser) prepareArtifacts(job *common.JobResponse) (err error) {
var ok bool
artifactsMap := getOptionsMap("artifacts", c.jobConfig, c.config)
artifactsPaths, _ := artifactsMap.GetSlice("paths")
paths := common.ArtifactPaths{}
for _, path := range artifactsPaths {
paths = append(paths, path.(string))
}
var artifactsName string
if artifactsName, ok = artifactsMap.GetString("name"); !ok {
artifactsName = ""
}
var artifactsUntracked interface{}
if artifactsUntracked, ok = artifactsMap.Get("untracked"); !ok {
artifactsUntracked = false
}
var artifactsWhen string
if artifactsWhen, ok = artifactsMap.GetString("when"); !ok {
artifactsWhen = string(common.ArtifactWhenOnSuccess)
}
var artifactsExpireIn string
if artifactsExpireIn, ok = artifactsMap.GetString("expireIn"); !ok {
artifactsExpireIn = ""
}
job.Artifacts = make(common.Artifacts, 1)
job.Artifacts[0] = common.Artifact{
Name: artifactsName,
Untracked: artifactsUntracked.(bool),
Paths: paths,
When: common.ArtifactWhen(artifactsWhen),
ExpireIn: artifactsExpireIn,
}
return
}
func (c *GitLabCiYamlParser) prepareCache(job *common.JobResponse) (err error) {
var ok bool
cacheMap := getOptionsMap("cache", c.jobConfig, c.config)
cachePaths, _ := cacheMap.GetSlice("paths")
paths := common.ArtifactPaths{}
for _, path := range cachePaths {
paths = append(paths, path.(string))
}
var cacheKey string
if cacheKey, ok = cacheMap.GetString("key"); !ok {
cacheKey = ""
}
var cacheUntracked interface{}
if cacheUntracked, ok = cacheMap.Get("untracked"); !ok {
cacheUntracked = false
}
job.Cache = make(common.Caches, 1)
job.Cache[0] = common.Cache{
Key: cacheKey,
Untracked: cacheUntracked.(bool),
Paths: paths,
}
return
}
func (c *GitLabCiYamlParser) ParseYaml(job *common.JobResponse) (err error) {
err = c.parseFile()
if err != nil {
return err
}
err = c.loadJob()
if err != nil {
return err
}
parsers := []struct {
method func(job *common.JobResponse) error
}{
{c.prepareJobInfo},
{c.prepareSteps},
{c.prepareVariables},
{c.prepareImage},
{c.prepareServices},
{c.prepareArtifacts},
{c.prepareCache},
}
for _, parser := range parsers {
err = parser.method(job)
if err != nil {
return err
}
}
return nil
}
func NewGitLabCiYamlParser(jobName string) *GitLabCiYamlParser {
return &GitLabCiYamlParser{
filename: ".gitlab-ci.yml",
jobName: jobName,
}
}
package helpers
import (
"github.com/docker/docker/pkg/homedir"
"os"
)
func GetCurrentWorkingDirectory() string {
dir, err := os.Getwd()
if err == nil {
return dir
}
return ""
}
func GetHomeDir() string {
return homedir.Get()
}
package helpers
import (
"os/exec"
"testing"
)
func SkipIntegrationTests(t *testing.T, app ...string) bool {
if testing.Short() {
t.Skip("Skipping long tests")
return true
}
if ok, err := ExecuteCommandSucceeded(app...); !ok {
t.Skip(app[0], "failed", err)
return true
}
return false
}
// ExecuteCommandSucceeded tests whether a particular command execution successfully
// completes. If it does not, it returns the error produced.
func ExecuteCommandSucceeded(app ...string) (bool, error) {
if len(app) > 0 {
cmd := exec.Command(app[0], app[1:]...)
err := cmd.Run()
if err != nil {
return false, err
}
}
return true, nil
}
package path
import golang_path "path"
type unixPath struct{}
func (p *unixPath) Join(elem ...string) string {
return golang_path.Join(elem...)
}
func (p *unixPath) IsAbs(path string) bool {
path = golang_path.Clean(path)
return golang_path.IsAbs(path)
}
func (p *unixPath) IsRoot(path string) bool {
path = golang_path.Clean(path)
return golang_path.IsAbs(path) && golang_path.Dir(path) == path
}
func (p *unixPath) Contains(basePath, targetPath string) bool {
basePath = golang_path.Clean(basePath)
targetPath = golang_path.Clean(targetPath)
for {
if targetPath == basePath {
return true
}
if p.IsRoot(targetPath) || targetPath == "." {
return false
}
targetPath = golang_path.Dir(targetPath)
}
}
func NewUnixPath() Path {
return &unixPath{}
}
// +build darwin dragonfly freebsd linux netbsd openbsd
package helpers
import (
"os/exec"
"syscall"
)
func SetProcessGroup(cmd *exec.Cmd) {
// Create process group
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
}
func KillProcessGroup(cmd *exec.Cmd) {
if cmd == nil {
return
}
process := cmd.Process
if process != nil {
if process.Pid > 0 {
syscall.Kill(-process.Pid, syscall.SIGKILL)
} else {
// doing normal kill
process.Kill()
}
}
}
package prometheus
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
var numJobFailuresDesc = prometheus.NewDesc(
"gitlab_runner_failed_jobs_total",
"Total number of failed jobs",
[]string{"runner", "failure_reason"},
nil,
)
type failurePermutation struct {
runnerDescription string
reason common.JobFailureReason
}
type FailuresCollector struct {
lock sync.RWMutex
failures map[failurePermutation]int64
}
func (fc *FailuresCollector) RecordFailure(reason common.JobFailureReason, runnerDescription string) {
failure := failurePermutation{
runnerDescription: runnerDescription,
reason: reason,
}
fc.lock.Lock()
defer fc.lock.Unlock()
if _, ok := fc.failures[failure]; ok {
fc.failures[failure]++
} else {
fc.failures[failure] = 1
}
}
func (fc *FailuresCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- numJobFailuresDesc
}
func (fc *FailuresCollector) Collect(ch chan<- prometheus.Metric) {
fc.lock.RLock()
defer fc.lock.RUnlock()
for failure, number := range fc.failures {
ch <- prometheus.MustNewConstMetric(
numJobFailuresDesc,
prometheus.CounterValue,
float64(number),
failure.runnerDescription,
string(failure.reason),
)
}
}
func NewFailuresCollector() *FailuresCollector {
return &FailuresCollector{
failures: make(map[failurePermutation]int64),
}
}
package prometheus
import (
"sync/atomic"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
var numErrorsDesc = prometheus.NewDesc("gitlab_runner_errors_total", "The number of catched errors.", []string{"level"}, nil)
type LogHook struct {
errorsNumber map[logrus.Level]*int64
}
func (lh *LogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
}
}
func (lh *LogHook) Fire(entry *logrus.Entry) error {
atomic.AddInt64(lh.errorsNumber[entry.Level], 1)
return nil
}
func (lh *LogHook) Describe(ch chan<- *prometheus.Desc) {
ch <- numErrorsDesc
}
func (lh *LogHook) Collect(ch chan<- prometheus.Metric) {
for _, level := range lh.Levels() {
number := float64(atomic.LoadInt64(lh.errorsNumber[level]))
ch <- prometheus.MustNewConstMetric(numErrorsDesc, prometheus.CounterValue, number, level.String())
}
}
func NewLogHook() LogHook {
lh := LogHook{}
levels := lh.Levels()
lh.errorsNumber = make(map[logrus.Level]*int64, len(levels))
for _, level := range levels {
lh.errorsNumber[level] = new(int64)
}
return lh
}
package helpers
import (
"crypto/rand"
"encoding/hex"
)
func GenerateRandomUUID(length int) (string, error) {
data := make([]byte, length)
_, err := rand.Read(data)
if err != nil {
return "", err
}
return hex.EncodeToString(data), nil
}
package service_helpers
import (
"github.com/ayufan/golang-kardianos-service"
"github.com/sirupsen/logrus"
)
func New(i service.Interface, c *service.Config) (service.Service, error) {
s, err := service.New(i, c)
if err == service.ErrNoServiceSystemDetected {
logrus.Warningln("No service system detected. Some features may not work!")
return &SimpleService{
i: i,
c: c,
}, nil
}
return s, err
}
package service_helpers
import (
"errors"
service "github.com/ayufan/golang-kardianos-service"
"os"
"os/signal"
"syscall"
)
var (
// ErrNotSupported is returned when specific feature is not supported.
ErrNotSupported = errors.New("Not supported")
)
type SimpleService struct {
i service.Interface
c *service.Config
}
// Run should be called shortly after the program entry point.
// After Interface.Stop has finished running, Run will stop blocking.
// After Run stops blocking, the program must exit shortly after.
func (s *SimpleService) Run() (err error) {
err = s.i.Start(s)
if err != nil {
return err
}
sigChan := make(chan os.Signal, 3)
signal.Notify(sigChan, syscall.SIGTERM, os.Interrupt)
<-sigChan
return s.i.Stop(s)
}
// Start signals to the OS service manager the given service should start.
func (s *SimpleService) Start() error {
return service.ErrNoServiceSystemDetected
}
// Stop signals to the OS service manager the given service should stop.
func (s *SimpleService) Stop() error {
return ErrNotSupported
}
// Restart signals to the OS service manager the given service should stop then start.
func (s *SimpleService) Restart() error {
return ErrNotSupported
}
// Install setups up the given service in the OS service manager. This may require
// greater rights. Will return an error if it is already installed.
func (s *SimpleService) Install() error {
return ErrNotSupported
}
// Uninstall removes the given service from the OS service manager. This may require
// greater rights. Will return an error if the service is not present.
func (s *SimpleService) Uninstall() error {
return ErrNotSupported
}
// Status returns nil if the given service is running.
// Will return an error if the service is not running or is not present.
func (s *SimpleService) Status() error {
return ErrNotSupported
}
// Logger opens and returns a system logger. If the user program is running
// interactively rather then as a service, the returned logger will write to
// os.Stderr. If errs is non-nil errors will be sent on errs as well as
// returned from Logger's functions.
func (s *SimpleService) Logger(errs chan<- error) (service.Logger, error) {
return service.ConsoleLogger, nil
}
// SystemLogger opens and returns a system logger. If errs is non-nil errors
// will be sent on errs as well as returned from Logger's functions.
func (s *SimpleService) SystemLogger(errs chan<- error) (service.Logger, error) {
return nil, ErrNotSupported
}
// String displays the name of the service. The display name if present,
// otherwise the name.
func (s *SimpleService) String() string {
return "SimpleService"
}
// TODO: Remove in 13.0
// Backported from Go v1.10.8:
// https://github.com/golang/go/blob/8623503fe54642a21854c551129d550139f3bbac/src/os/env.go
// Go v1.11 changed the behavior of Os.Expand() to gobble '$' only if it
// looks like it belongs to a valid shell variable. For example,
// $VARIABLE and ${VARIABLE} would expand to VARIABLE, but $\VARIABLE
// would retain its '$'. This might break CI variables that depend on
// this behavior.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// General environment variables.
package shell
// Expand replaces ${var} or $var in the string based on the mapping function.
func LegacyExpand(s string, mapping func(string) string) string {
buf := make([]byte, 0, 2*len(s))
// ${} is all ASCII, so bytes are fine for this operation.
i := 0
for j := 0; j < len(s); j++ {
if s[j] == '$' && j+1 < len(s) {
buf = append(buf, s[i:j]...)
name, w := getShellName(s[j+1:])
buf = append(buf, mapping(name)...)
j += w
i = j + 1
}
}
return string(buf) + s[i:]
}
// isShellSpecialVar reports whether the character identifies a special
// shell variable such as $*.
func isShellSpecialVar(c uint8) bool {
switch c {
case '*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
return false
}
// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore
func isAlphaNum(c uint8) bool {
return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
}
// getShellName returns the name that begins the string and the number of bytes
// consumed to extract it. If the name is enclosed in {}, it's part of a ${}
// expansion and two more bytes are needed than the length of the name.
func getShellName(s string) (string, int) {
switch {
case s[0] == '{':
if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' {
return s[1:2], 3
}
// Scan to closing brace
for i := 1; i < len(s); i++ {
if s[i] == '}' {
if i == 1 {
return "", 2 // Bad syntax; eat "${}"
}
return s[1:i], i + 1
}
}
return "", 1 // Bad syntax; eat "${"
case isShellSpecialVar(s[0]):
return s[0:1], 1
}
// Scan alphanumerics.
var i int
for i = 0; i < len(s) && isAlphaNum(s[i]); i++ {
}
return s[:i], i
}
package helpers
// https://github.com/zimbatm/direnv/blob/master/shell.go
import (
"bytes"
"encoding/hex"
"strings"
)
/*
* Escaping
*/
const (
ACK = 6
TAB = 9
LF = 10
CR = 13
US = 31
SPACE = 32
AMPERSTAND = 38
SINGLE_QUOTE = 39
PLUS = 43
NINE = 57
QUESTION = 63
LOWERCASE_Z = 90
OPEN_BRACKET = 91
BACKSLASH = 92
UNDERSCORE = 95
CLOSE_BRACKET = 93
BACKTICK = 96
TILDA = 126
DEL = 127
)
// ShellEscape is taken from https://github.com/solidsnack/shell-escape/blob/master/Text/ShellEscape/Bash.hs
/*
A Bash escaped string. The strings are wrapped in @$\'...\'@ if any
bytes within them must be escaped; otherwise, they are left as is.
Newlines and other control characters are represented as ANSI escape
sequences. High bytes are represented as hex codes. Thus Bash escaped
strings will always fit on one line and never contain non-ASCII bytes.
*/
func ShellEscape(str string) string {
if str == "" {
return "''"
}
in := []byte(str)
out := bytes.NewBuffer(make([]byte, 0, len(str)*2))
i := 0
l := len(in)
escape := false
hex := func(char byte) {
escape = true
data := []byte{BACKSLASH, 'x', 0, 0}
hex.Encode(data[2:], []byte{char})
out.Write(data)
}
backslash := func(char byte) {
escape = true
out.Write([]byte{BACKSLASH, char})
}
escaped := func(str string) {
escape = true
out.WriteString(str)
}
quoted := func(char byte) {
escape = true
out.WriteByte(char)
}
literal := func(char byte) {
out.WriteByte(char)
}
for i < l {
char := in[i]
switch {
case char == TAB:
escaped(`\t`)
case char == LF:
escaped(`\n`)
case char == CR:
escaped(`\r`)
case char <= US:
hex(char)
case char <= AMPERSTAND:
quoted(char)
case char == SINGLE_QUOTE:
backslash(char)
case char <= PLUS:
quoted(char)
case char <= NINE:
literal(char)
case char <= QUESTION:
quoted(char)
case char <= LOWERCASE_Z:
literal(char)
case char == OPEN_BRACKET:
quoted(char)
case char == BACKSLASH:
backslash(char)
case char <= CLOSE_BRACKET:
quoted(char)
case char == UNDERSCORE:
literal(char)
case char <= BACKTICK:
quoted(char)
case char <= TILDA:
quoted(char)
case char == DEL:
hex(char)
default:
hex(char)
}
i++
}
outStr := out.String()
if escape {
outStr = "$'" + outStr + "'"
}
return outStr
}
func ToBackslash(path string) string {
return strings.Replace(path, "/", "\\", -1)
}
func ToSlash(path string) string {
return strings.Replace(path, "\\", "/", -1)
}
package helpers
func ShortenToken(token string) string {
if len(token) >= 8 {
return token[0:8]
}
return token
}
package timeperiod
import (
"time"
"github.com/gorhill/cronexpr"
)
type TimePeriod struct {
expressions []*cronexpr.Expression
location *time.Location
GetCurrentTime func() time.Time
}
func (t *TimePeriod) InPeriod() bool {
now := t.GetCurrentTime().In(t.location)
for _, expression := range t.expressions {
nextIn := expression.Next(now)
timeSince := now.Sub(nextIn)
if -time.Second <= timeSince && timeSince <= time.Second {
return true
}
}
return false
}
func TimePeriods(periods []string, timezone string) (*TimePeriod, error) {
var expressions []*cronexpr.Expression
for _, period := range periods {
expression, err := cronexpr.Parse(period)
if err != nil {
return nil, err
}
expressions = append(expressions, expression)
}
// if not set, default to system setting (the empty string would mean UTC)
if timezone == "" {
timezone = "Local"
}
location, err := time.LoadLocation(timezone)
if err != nil {
return nil, err
}
timePeriod := &TimePeriod{
expressions: expressions,
location: location,
GetCurrentTime: func() time.Time { return time.Now() },
}
return timePeriod, nil
}
package ca_chain
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"fmt"
"io"
"strings"
"github.com/sirupsen/logrus"
)
const (
pemTypeCertificate = "CERTIFICATE"
)
type pemEncoder func(out io.Writer, b *pem.Block) error
type Builder interface {
fmt.Stringer
BuildChainFromTLSConnectionState(TLS *tls.ConnectionState) error
}
func NewBuilder(logger logrus.FieldLogger) Builder {
logger = logger.
WithField("context", "certificate-chain-build")
return &defaultBuilder{
certificates: make([]*x509.Certificate, 0),
seenCertificates: make(map[string]bool, 0),
resolver: newChainResolver(
newURLResolver(logger),
newVerifyResolver(logger),
),
encodePEM: pem.Encode,
logger: logger,
}
}
type defaultBuilder struct {
certificates []*x509.Certificate
seenCertificates map[string]bool
resolver resolver
encodePEM pemEncoder
logger logrus.FieldLogger
}
func (b *defaultBuilder) BuildChainFromTLSConnectionState(TLS *tls.ConnectionState) error {
for _, verifiedChain := range TLS.VerifiedChains {
b.logger.
WithField("chain-leaf", fmt.Sprintf("%v", verifiedChain)).
Debug("Processing chain")
err := b.fetchCertificatesFromVerifiedChain(verifiedChain)
if err != nil {
return fmt.Errorf("error while fetching certificates into the CA Chain: %w", err)
}
}
return nil
}
func (b *defaultBuilder) fetchCertificatesFromVerifiedChain(verifiedChain []*x509.Certificate) error {
var err error
if len(verifiedChain) < 1 {
return nil
}
verifiedChain, err = b.resolver.Resolve(verifiedChain)
if err != nil {
return fmt.Errorf("couldn't resolve certificates chain from the leaf certificate: %w", err)
}
for _, certificate := range verifiedChain {
b.addCertificate(certificate)
}
return nil
}
func (b *defaultBuilder) addCertificate(certificate *x509.Certificate) {
signature := hex.EncodeToString(certificate.Signature)
if b.seenCertificates[signature] {
return
}
b.seenCertificates[signature] = true
b.certificates = append(b.certificates, certificate)
}
func (b *defaultBuilder) String() string {
out := bytes.NewBuffer(nil)
for _, certificate := range b.certificates {
err := b.encodePEM(out, &pem.Block{Type: pemTypeCertificate, Bytes: certificate.Raw})
if err != nil {
b.logger.
WithError(err).
Warning("Failed to encode certificate from chain")
}
}
return strings.TrimSpace(out.String())
}
// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/master/certUtil/io.go
// which is licensed on a MIT license.
//
// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other
// contributors who updated it!
package ca_chain
import (
"bytes"
"crypto/x509"
"encoding/pem"
"fmt"
"strings"
"github.com/fullsailor/pkcs7"
"github.com/sirupsen/logrus"
)
const (
pemStart = "-----BEGIN "
pemCertBlockType = "CERTIFICATE"
)
type ErrorInvalidCertificate struct {
inner error
nonCertBlockType bool
nilBlock bool
}
func (e *ErrorInvalidCertificate) Error() string {
msg := []string{"invalid certificate"}
if e.nilBlock {
msg = append(msg, "empty PEM block")
} else if e.nonCertBlockType {
msg = append(msg, "non-certificate PEM block")
} else if e.inner != nil {
msg = append(msg, e.inner.Error())
}
return strings.Join(msg, ": ")
}
func decodeCertificate(data []byte) (*x509.Certificate, error) {
if isPEM(data) {
block, _ := pem.Decode(data)
if block == nil {
return nil, &ErrorInvalidCertificate{nilBlock: true}
}
if block.Type != pemCertBlockType {
return nil, &ErrorInvalidCertificate{nonCertBlockType: true}
}
data = block.Bytes
}
cert, err := x509.ParseCertificate(data)
if err == nil {
return cert, nil
}
p, err := pkcs7.Parse(data)
if err == nil {
return p.Certificates[0], nil
}
return nil, &ErrorInvalidCertificate{inner: err}
}
func isPEM(data []byte) bool {
return bytes.HasPrefix(data, []byte(pemStart))
}
func isSelfSigned(cert *x509.Certificate) bool {
return cert.CheckSignatureFrom(cert) == nil
}
func prepareCertificateLogger(logger logrus.FieldLogger, cert *x509.Certificate) logrus.FieldLogger {
return preparePrefixedCertificateLogger(logger, cert, "")
}
func preparePrefixedCertificateLogger(logger logrus.FieldLogger, cert *x509.Certificate, prefix string) logrus.FieldLogger {
return logger.
WithFields(logrus.Fields{
fmt.Sprintf("%sSubject", prefix): cert.Subject.CommonName,
fmt.Sprintf("%sIssuer", prefix): cert.Issuer.CommonName,
fmt.Sprintf("%sSerial", prefix): cert.SerialNumber.String(),
fmt.Sprintf("%sIssuerCertURL", prefix): cert.IssuingCertificateURL,
})
}
func verifyCertificate(cert *x509.Certificate) ([][]*x509.Certificate, error) {
return cert.Verify(x509.VerifyOptions{})
}
// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/master/certUtil/chain.go
// which is licensed on a MIT license.
//
// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other
// contributors who updated it!
package ca_chain
import (
"crypto/x509"
"fmt"
"github.com/sirupsen/logrus"
)
type chainResolver struct {
logger logrus.FieldLogger
urlResolver resolver
verifyResolver resolver
}
func newChainResolver(urlResolver resolver, verifyResolver resolver) resolver {
return &chainResolver{
urlResolver: urlResolver,
verifyResolver: verifyResolver,
}
}
func (r *chainResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {
certs, err := r.urlResolver.Resolve(certs)
if err != nil {
return nil, fmt.Errorf("error while resolving certificates chain with URL: %w", err)
}
certs, err = r.verifyResolver.Resolve(certs)
if err != nil {
return nil, fmt.Errorf("error while resolving certificates chain with verification: %w", err)
}
return certs, err
}
// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/master/certUtil/chain.go
// which is licensed on a MIT license.
//
// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other
// contributors who updated it!
package ca_chain
import (
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
const defaultURLResolverLoopLimit = 15
const defaultURLResolverFetchTimeout = 15 * time.Second
type fetcher interface {
Fetch(url string) ([]byte, error)
}
type httpFetcher struct {
client *http.Client
}
func newHTTPFetcher(timeout time.Duration) *httpFetcher {
return &httpFetcher{
client: &http.Client{
Timeout: timeout,
},
}
}
func (f *httpFetcher) Fetch(url string) ([]byte, error) {
resp, err := f.client.Get(url)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
}
type decoder func(data []byte) (*x509.Certificate, error)
type urlResolver struct {
logger logrus.FieldLogger
fetcher fetcher
decoder decoder
loopLimit int
}
func newURLResolver(logger logrus.FieldLogger) resolver {
return &urlResolver{
logger: logger,
fetcher: newHTTPFetcher(defaultURLResolverFetchTimeout),
decoder: decodeCertificate,
loopLimit: defaultURLResolverLoopLimit,
}
}
func (r *urlResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {
if len(certs) < 1 {
return nil, nil
}
loop := 0
for {
loop++
if loop >= r.loopLimit {
r.
logger.
Warning("urlResolver loop limit exceeded; exiting the loop")
break
}
certificate := certs[len(certs)-1]
log := prepareCertificateLogger(r.logger, certificate)
if certificate.IssuingCertificateURL == nil {
log.Debug("Certificate doesn't provide parent URL: exiting the loop")
break
}
newCert, err := r.fetchIssuerCertificate(certificate)
if err != nil {
return nil, fmt.Errorf("error while fetching issuer certificate: %w", err)
}
certs = append(certs, newCert)
if isSelfSigned(newCert) {
log.Debug("Fetched issuer certificate is a ROOT certificate so exiting the loop")
break
}
}
return certs, nil
}
func (r *urlResolver) fetchIssuerCertificate(cert *x509.Certificate) (*x509.Certificate, error) {
log := prepareCertificateLogger(r.logger, cert).
WithField("method", "fetchIssuerCertificate")
issuerURL := cert.IssuingCertificateURL[0]
data, err := r.fetcher.Fetch(issuerURL)
if err != nil {
log.
WithError(err).
WithField("issuerURL", issuerURL).
Warning("Remote certificate fetching error")
return nil, fmt.Errorf("remote fetch failure: %w", err)
}
newCert, err := r.decoder(data)
if err != nil {
log.
WithError(err).
Warning("Certificate decoding error")
return nil, fmt.Errorf("decoding failure: %w", err)
}
preparePrefixedCertificateLogger(log, newCert, "newCert").
Debug("Appending the certificate to the chain")
return newCert, nil
}
// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/master/certUtil/chain.go
// which is licensed on a MIT license.
//
// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other
// contributors who updated it!
package ca_chain
import (
"crypto/x509"
"fmt"
"github.com/sirupsen/logrus"
)
type verifier func(cert *x509.Certificate) ([][]*x509.Certificate, error)
type verifyResolver struct {
logger logrus.FieldLogger
verifier verifier
}
func newVerifyResolver(logger logrus.FieldLogger) resolver {
return &verifyResolver{
logger: logger,
verifier: verifyCertificate,
}
}
func (r *verifyResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {
if len(certs) < 1 {
return certs, nil
}
lastCert := certs[len(certs)-1]
if isSelfSigned(lastCert) {
return certs, nil
}
prepareCertificateLogger(r.logger, lastCert).
Debug("Verifying last certificate to find the final root certificate")
verifyChains, err := r.verifier(lastCert)
if err != nil {
_, ok := err.(x509.UnknownAuthorityError)
if ok {
prepareCertificateLogger(r.logger, lastCert).
WithError(err).
Warning("Last certificate signed by unknown authority; will not update the chain")
return certs, nil
}
return nil, fmt.Errorf("error while verifying last certificate from the chain: %w", err)
}
for _, cert := range verifyChains[0] {
if lastCert.Equal(cert) {
continue
}
prepareCertificateLogger(r.logger, cert).
Debug("Adding cert from verify chain to the final chain")
certs = append(certs, cert)
}
return certs, nil
}
package trace
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"github.com/markelog/trie"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
const maskedText = "[MASKED]"
const defaultBytesLimit = 4 * 1024 * 1024 // 4MB
type Buffer struct {
writer io.WriteCloser
lock sync.RWMutex
logFile *os.File
logSize int
logWriter *bufio.Writer
advanceBuffer bytes.Buffer
bytesLimit int
finish chan struct{}
maskTree *trie.Trie
}
func (b *Buffer) SetMasked(values []string) {
if len(values) == 0 {
b.maskTree = nil
return
}
maskTree := trie.New()
for _, value := range values {
maskTree.Add(value, nil)
}
b.maskTree = maskTree
}
func (b *Buffer) SetLimit(size int) {
b.bytesLimit = size
}
func (b *Buffer) Size() int {
return b.logSize
}
func (b *Buffer) Reader(offset, n int) (io.ReadSeeker, error) {
b.lock.Lock()
defer b.lock.Unlock()
err := b.logWriter.Flush()
if err != nil {
return nil, err
}
return io.NewSectionReader(b.logFile, int64(offset), int64(n)), nil
}
func (b *Buffer) Bytes(offset, n int) ([]byte, error) {
reader, err := b.Reader(offset, n)
if err != nil {
return nil, err
}
return ioutil.ReadAll(reader)
}
func (b *Buffer) Write(data []byte) (n int, err error) {
return b.writer.Write(data)
}
func (b *Buffer) Finish() {
// wait for trace to finish
b.writer.Close()
<-b.finish
}
func (b *Buffer) Close() {
_ = b.logFile.Close()
_ = os.Remove(b.logFile.Name())
}
func (b *Buffer) advanceAllUnsafe() error {
n, err := b.advanceBuffer.WriteTo(b.logWriter)
b.logSize += int(n)
return err
}
func (b *Buffer) advanceAll() {
b.lock.Lock()
defer b.lock.Unlock()
b.advanceAllUnsafe()
}
// advanceLogUnsafe is assumed to be run every character
func (b *Buffer) advanceLogUnsafe() error {
// advance all if no masking is enabled
if b.maskTree == nil {
return b.advanceAllUnsafe()
}
rest := b.advanceBuffer.String()
results := b.maskTree.Search(rest)
if len(results) == 0 {
// we can advance as no match was found
return b.advanceAllUnsafe()
}
// full match was found
if len(results) == 1 && results[0].Key == rest {
b.advanceBuffer.Reset()
b.advanceBuffer.WriteString(maskedText)
return b.advanceAllUnsafe()
}
// partial match, wait for more characters
return nil
}
func (b *Buffer) limitExceededMessage() string {
return fmt.Sprintf("\n%sJob's log exceeded limit of %v bytes.%s\n", helpers.ANSI_BOLD_RED, b.bytesLimit, helpers.ANSI_RESET)
}
func (b *Buffer) writeRune(r rune) error {
b.lock.Lock()
defer b.lock.Unlock()
// over trace limit
if b.logSize > b.bytesLimit {
return io.EOF
}
if _, err := b.advanceBuffer.WriteRune(r); err != nil {
return err
}
if err := b.advanceLogUnsafe(); err != nil {
return err
}
// under trace limit
if b.logSize <= b.bytesLimit {
return nil
}
b.advanceBuffer.Reset()
b.advanceBuffer.WriteString(b.limitExceededMessage())
return b.advanceAllUnsafe()
}
func (b *Buffer) process(pipe *io.PipeReader) {
defer pipe.Close()
reader := bufio.NewReader(pipe)
for {
r, s, err := reader.ReadRune()
if s <= 0 {
break
} else if err == nil {
b.writeRune(r)
} else {
// ignore invalid characters
continue
}
}
b.advanceAll()
close(b.finish)
}
func New() (*Buffer, error) {
logFile, err := ioutil.TempFile("", "trace")
if err != nil {
return nil, err
}
reader, writer := io.Pipe()
buffer := &Buffer{
writer: writer,
bytesLimit: defaultBytesLimit,
finish: make(chan struct{}),
logFile: logFile,
logWriter: bufio.NewWriter(logFile),
}
go buffer.process(reader)
return buffer, nil
}
package url_helpers
import "net/url"
func CleanURL(value string) (ret string) {
u, err := url.Parse(value)
if err != nil {
return
}
u.User = nil
u.RawQuery = ""
u.Fragment = ""
return u.String()
}
package url_helpers
import (
"regexp"
)
var scrubRegexp = regexp.MustCompile(`(?im)([\?&]((?:private|authenticity|rss)[\-_]token)|X-AMZ-Signature|X-AMZ-Credential)=[^& ]*`)
// ScrubSecrets replaces the content of any sensitive query string parameters
// in an URL with `[FILTERED]`
func ScrubSecrets(url string) string {
return scrubRegexp.ReplaceAllString(url, "$1=[FILTERED]")
}
package virtualbox
import (
"bytes"
"errors"
"fmt"
"net"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/sirupsen/logrus"
)
type StatusType string
const (
NotFound StatusType = "notfound"
PoweredOff StatusType = "poweroff"
Saved StatusType = "saved"
Teleported StatusType = "teleported"
Aborted StatusType = "aborted"
Running StatusType = "running"
Paused StatusType = "paused"
Stuck StatusType = "gurumeditation"
Teleporting StatusType = "teleporting"
LiveSnapshotting StatusType = "livesnapshotting"
Starting StatusType = "starting"
Stopping StatusType = "stopping"
Saving StatusType = "saving"
Restoring StatusType = "restoring"
TeleportingPausedVM StatusType = "teleportingpausedvm"
TeleportingIn StatusType = "teleportingin"
FaultTolerantSyncing StatusType = "faulttolerantsyncing"
DeletingSnapshotOnline StatusType = "deletingsnapshotlive"
DeletingSnapshotPaused StatusType = "deletingsnapshotlivepaused"
OnlineSnapshotting StatusType = "onlinesnapshotting"
RestoringSnapshot StatusType = "restoringsnapshot"
DeletingSnapshot StatusType = "deletingsnapshot"
SettingUp StatusType = "settingup"
Snapshotting StatusType = "snapshotting"
Unknown StatusType = "unknown"
// TODO: update as new VM states are added
)
func IsStatusOnlineOrTransient(vmStatus StatusType) bool {
switch vmStatus {
case Running,
Paused,
Stuck,
Teleporting,
LiveSnapshotting,
Starting,
Stopping,
Saving,
Restoring,
TeleportingPausedVM,
TeleportingIn,
FaultTolerantSyncing,
DeletingSnapshotOnline,
DeletingSnapshotPaused,
OnlineSnapshotting,
RestoringSnapshot,
DeletingSnapshot,
SettingUp,
Snapshotting:
return true
}
return false
}
func VboxManageOutput(exe string, args ...string) (string, error) {
var stdout, stderr bytes.Buffer
logrus.Debugf("Executing VBoxManageOutput: %#v", args)
cmd := exec.Command(exe, args...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
stderrString := strings.TrimSpace(stderr.String())
if _, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("VBoxManageOutput error: %s", stderrString)
}
return stdout.String(), err
}
func VBoxManage(args ...string) (string, error) {
return VboxManageOutput("vboxmanage", args...)
}
func Version() (string, error) {
version, err := VBoxManage("--version")
if err != nil {
return "", err
}
return strings.TrimSpace(version), nil
}
func FindSSHPort(vmName string) (port string, err error) {
info, err := VBoxManage("showvminfo", vmName)
if err != nil {
return
}
portRe := regexp.MustCompile(`guestssh.*host port = (\d+)`)
sshPort := portRe.FindStringSubmatch(info)
if len(sshPort) >= 2 {
port = sshPort[1]
} else {
err = errors.New("failed to find guestssh port")
}
return
}
func Exist(vmName string) bool {
_, err := VBoxManage("showvminfo", vmName)
if err != nil {
return false
}
return true
}
func CreateOsVM(vmName string, templateName string, templateSnapshot string) error {
args := []string{"clonevm", vmName, "--mode", "machine", "--name", templateName, "--register"}
if templateSnapshot != "" {
args = append(args, "--snapshot", templateSnapshot, "--options", "link")
}
_, err := VBoxManage(args...)
return err
}
func isPortUnassigned(testPort string, usedPorts [][]string) bool {
for _, port := range usedPorts {
if testPort == port[1] {
return false
}
}
return true
}
func getUsedVirtualBoxPorts() (usedPorts [][]string, err error) {
output, err := VBoxManage("list", "vms", "-l")
if err != nil {
return
}
allPortsRe := regexp.MustCompile(`host port = (\d+)`)
usedPorts = allPortsRe.FindAllStringSubmatch(output, -1)
return
}
func allocatePort(handler func(port string) error) (port string, err error) {
ln, err := net.Listen("tcp", ":0")
if err != nil {
logrus.Debugln("VirtualBox ConfigureSSH:", err)
return
}
defer ln.Close()
usedPorts, err := getUsedVirtualBoxPorts()
if err != nil {
logrus.Debugln("VirtualBox ConfigureSSH:", err)
return
}
addressElements := strings.Split(ln.Addr().String(), ":")
port = addressElements[len(addressElements)-1]
if isPortUnassigned(port, usedPorts) {
err = handler(port)
} else {
err = os.ErrExist
}
return
}
func ConfigureSSH(vmName string, vmSSHPort string) (port string, err error) {
for {
port, err = allocatePort(
func(port string) error {
rule := fmt.Sprintf("guestssh,tcp,127.0.0.1,%s,,%s", port, vmSSHPort)
_, err = VBoxManage("modifyvm", vmName, "--natpf1", rule)
return err
},
)
if err == nil || err != os.ErrExist {
return
}
}
}
func CreateSnapshot(vmName string, snapshotName string) error {
_, err := VBoxManage("snapshot", vmName, "take", snapshotName)
return err
}
func RevertToSnapshot(vmName string) error {
_, err := VBoxManage("snapshot", vmName, "restorecurrent")
return err
}
func matchSnapshotName(snapshotName string, snapshotList string) bool {
snapshotRe := regexp.MustCompile(fmt.Sprintf(`(?m)^Snapshot(Name|UUID)[^=]*="(%s)"\r?$`, regexp.QuoteMeta(snapshotName)))
snapshot := snapshotRe.FindStringSubmatch(snapshotList)
return snapshot != nil
}
func HasSnapshot(vmName string, snapshotName string) bool {
output, err := VBoxManage("snapshot", vmName, "list", "--machinereadable")
if err != nil {
return false
}
return matchSnapshotName(snapshotName, output)
}
func matchCurrentSnapshotName(snapshotList string) []string {
snapshotRe := regexp.MustCompile(`(?m)^CurrentSnapshotName="([^"]*)"\r?$`)
return snapshotRe.FindStringSubmatch(snapshotList)
}
func GetCurrentSnapshot(vmName string) (string, error) {
output, err := VBoxManage("snapshot", vmName, "list", "--machinereadable")
if err != nil {
return "", err
}
snapshot := matchCurrentSnapshotName(output)
if snapshot == nil {
return "", errors.New("Failed to match current snapshot name")
}
return snapshot[1], nil
}
func Start(vmName string) error {
_, err := VBoxManage("startvm", vmName, "--type", "headless")
return err
}
func Kill(vmName string) error {
_, err := VBoxManage("controlvm", vmName, "poweroff")
return err
}
func Delete(vmName string) error {
_, err := VBoxManage("unregistervm", vmName, "--delete")
return err
}
func Status(vmName string) (StatusType, error) {
output, err := VBoxManage("showvminfo", vmName, "--machinereadable")
statusRe := regexp.MustCompile(`VMState="(\w+)"`)
status := statusRe.FindStringSubmatch(output)
if err != nil {
return NotFound, err
}
return StatusType(status[1]), nil
}
func WaitForStatus(vmName string, vmStatus StatusType, seconds int) error {
var status StatusType
var err error
for i := 0; i < seconds; i++ {
status, err = Status(vmName)
if err != nil {
return err
}
if status == vmStatus {
return nil
}
time.Sleep(time.Second)
}
return errors.New("VM " + vmName + " is in " + string(status) + " where it should be in " + string(vmStatus))
}
func Unregister(vmName string) error {
_, err := VBoxManage("unregistervm", vmName)
return err
}
package log
import (
"fmt"
"os"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
const (
FormatRunner = "runner"
FormatText = "text"
FormatJSON = "json"
)
var (
configuration = NewConfig(logrus.StandardLogger())
logFlags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "debug mode",
EnvVar: "DEBUG",
},
cli.StringFlag{
Name: "log-format",
Usage: "Choose log format (options: runner, text, json)",
EnvVar: "LOG_FORMAT",
},
cli.StringFlag{
Name: "log-level, l",
Usage: "Log level (options: debug, info, warn, error, fatal, panic)",
EnvVar: "LOG_LEVEL",
},
}
formats = map[string]logrus.Formatter{
FormatRunner: new(RunnerTextFormatter),
FormatText: new(logrus.TextFormatter),
FormatJSON: new(logrus.JSONFormatter),
}
)
func formatNames() []string {
formatNames := make([]string, 0)
for name := range formats {
formatNames = append(formatNames, name)
}
return formatNames
}
type Config struct {
logger *logrus.Logger
level logrus.Level
format logrus.Formatter
levelSetWithCli bool
formatSetWithCli bool
goroutinesDumpStopCh chan bool
}
func (l *Config) IsLevelSetWithCli() bool {
return l.levelSetWithCli
}
func (l *Config) IsFormatSetWithCli() bool {
return l.formatSetWithCli
}
func (l *Config) handleCliCtx(cliCtx *cli.Context) error {
if cliCtx.IsSet("log-level") || cliCtx.IsSet("l") {
err := l.SetLevel(cliCtx.String("log-level"))
if err != nil {
return err
}
l.levelSetWithCli = true
}
if cliCtx.Bool("debug") {
l.level = logrus.DebugLevel
l.levelSetWithCli = true
}
if cliCtx.IsSet("log-format") {
err := l.SetFormat(cliCtx.String("log-format"))
if err != nil {
return err
}
l.formatSetWithCli = true
}
l.ReloadConfiguration()
return nil
}
func (l *Config) SetLevel(levelString string) error {
level, err := logrus.ParseLevel(levelString)
if err != nil {
return fmt.Errorf("failed to parse log level: %w", err)
}
l.level = level
return nil
}
func (l *Config) SetFormat(format string) error {
formatter, ok := formats[format]
if !ok {
return fmt.Errorf("unknown log format %q, expected one of: %v", l.format, formatNames())
}
l.format = formatter
return nil
}
func (l *Config) ReloadConfiguration() {
l.logger.SetFormatter(l.format)
l.logger.SetLevel(l.level)
if l.level == logrus.DebugLevel {
l.enableGoroutinesDump()
} else {
l.disableGoroutinesDump()
}
}
func (l *Config) enableGoroutinesDump() {
if l.goroutinesDumpStopCh != nil {
return
}
l.goroutinesDumpStopCh = make(chan bool)
watchForGoroutinesDump(l.logger, l.goroutinesDumpStopCh)
}
func (l *Config) disableGoroutinesDump() {
if l.goroutinesDumpStopCh == nil {
return
}
close(l.goroutinesDumpStopCh)
l.goroutinesDumpStopCh = nil
}
func NewConfig(logger *logrus.Logger) *Config {
return &Config{
logger: logger,
level: logrus.InfoLevel,
format: new(RunnerTextFormatter),
}
}
func Configuration() *Config {
return configuration
}
func ConfigureLogging(app *cli.App) {
app.Flags = append(app.Flags, logFlags...)
appBefore := app.Before
app.Before = func(cliCtx *cli.Context) error {
Configuration().logger.SetOutput(os.Stderr)
err := Configuration().handleCliCtx(cliCtx)
if err != nil {
logrus.WithError(err).Fatal("Error while setting up logging configuration")
}
if appBefore != nil {
return appBefore(cliCtx)
}
return nil
}
}
// +build darwin dragonfly freebsd linux netbsd openbsd
package log
import (
"os"
"os/signal"
"runtime"
"syscall"
"github.com/sirupsen/logrus"
)
func watchForGoroutinesDump(logger *logrus.Logger, stopCh chan bool) (chan bool, chan bool) {
dumpedCh := make(chan bool)
finishedCh := make(chan bool)
dumpStacksCh := make(chan os.Signal, 1)
// On USR1 dump stacks of all go routines
signal.Notify(dumpStacksCh, syscall.SIGUSR1)
go func() {
for {
select {
case <-dumpStacksCh:
buf := make([]byte, 1<<20)
len := runtime.Stack(buf, true)
logger.Printf("=== received SIGUSR1 ===\n*** goroutine dump...\n%s\n*** end\n", buf[0:len])
nonBlockingSend(dumpedCh, true)
case <-stopCh:
close(finishedCh)
return
}
}
}()
return dumpedCh, finishedCh
}
func nonBlockingSend(ch chan bool, value bool) {
select {
case ch <- value:
default:
}
}
package log
import (
"bytes"
"fmt"
"sort"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
type RunnerTextFormatter struct {
// Force disabling colors.
DisableColors bool
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *RunnerTextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
b := new(bytes.Buffer)
f.printColored(b, entry)
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *RunnerTextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) {
levelColor, resetColor, levelPrefix := f.getColorsAndPrefix(entry)
indentLength := 50 - len(levelPrefix)
fmt.Fprintf(b, "%s%s%-*s%s ", levelColor, levelPrefix, indentLength, entry.Message, resetColor)
for _, k := range f.prepareKeys(entry) {
v := entry.Data[k]
fmt.Fprintf(b, " %s%s%s=%v", levelColor, k, resetColor, v)
}
}
func (f *RunnerTextFormatter) getColorsAndPrefix(entry *logrus.Entry) (string, string, string) {
definitions := map[logrus.Level]struct {
color string
prefix string
}{
logrus.DebugLevel: {
color: helpers.ANSI_BOLD_WHITE,
},
logrus.WarnLevel: {
color: helpers.ANSI_YELLOW,
prefix: "WARNING: ",
},
logrus.ErrorLevel: {
color: helpers.ANSI_BOLD_RED,
prefix: "ERROR: ",
},
logrus.FatalLevel: {
color: helpers.ANSI_BOLD_RED,
prefix: "FATAL: ",
},
logrus.PanicLevel: {
color: helpers.ANSI_BOLD_RED,
prefix: "PANIC: ",
},
}
color := ""
prefix := ""
definition, ok := definitions[entry.Level]
if ok {
if definition.color != "" {
color = definition.color
}
if definition.prefix != "" {
prefix = definition.prefix
}
}
if f.DisableColors {
return "", "", prefix
}
return color, helpers.ANSI_RESET, prefix
}
func (f *RunnerTextFormatter) prepareKeys(entry *logrus.Entry) []string {
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
return keys
}
func SetRunnerFormatter() {
logrus.SetFormatter(new(RunnerTextFormatter))
}
package log
import (
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers/url"
)
type SecretsCleanupHook struct{}
func (s *SecretsCleanupHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (s *SecretsCleanupHook) Fire(entry *logrus.Entry) error {
entry.Message = url_helpers.ScrubSecrets(entry.Message)
return nil
}
func AddSecretsCleanupLogHook(logger *logrus.Logger) {
if logger == nil {
logger = logrus.StandardLogger()
}
logger.AddHook(new(SecretsCleanupHook))
}
package log
import (
"github.com/ayufan/golang-kardianos-service"
"github.com/sirupsen/logrus"
)
type systemLogger interface {
service.Logger
}
type systemService interface {
service.Service
}
type SystemServiceLogHook struct {
systemLogger
Level logrus.Level
}
func (s *SystemServiceLogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
}
}
func (s *SystemServiceLogHook) Fire(entry *logrus.Entry) error {
if entry.Level > s.Level {
return nil
}
msg, err := entry.String()
if err != nil {
return err
}
switch entry.Level {
case logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:
s.Error(msg)
case logrus.WarnLevel:
s.Warning(msg)
case logrus.InfoLevel:
s.Info(msg)
}
return nil
}
func SetSystemLogger(logrusLogger *logrus.Logger, svc systemService) {
logger, err := svc.SystemLogger(nil)
if err == nil {
hook := new(SystemServiceLogHook)
hook.systemLogger = logger
hook.Level = logrus.GetLevel()
logrusLogger.AddHook(hook)
} else {
logrusLogger.WithError(err).Error("Error while setting up the system logger")
}
}
package test
import (
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
)
// NewHook will create a new global hook that can be used for tests after which
// it will remove when the returned function invoked.
//
// This shouldn't be used when you are writing a new package/structure, you
// should instead pass the logger to that struct and add the Hook to that struct
// only, try to avoid the global logger. This has multiple benefits, for example
// having that struct with specific logger settings that doesn't effect the
// logger in another part of the application. For example:
//
// type MyNewStruct struct {
// logger logrus.FieldLogger
// }
//
// The more hooks we add to the tests the more memory we are leaking.
func NewHook() (*test.Hook, func()) {
// Copy all the previous hooks so we revert back to that state.
oldHooks := logrus.LevelHooks{}
for level, hooks := range logrus.StandardLogger().Hooks {
oldHooks[level] = hooks
}
newHook := test.NewGlobal()
return newHook, func() {
logrus.StandardLogger().ReplaceHooks(oldHooks)
}
}
package network
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/jpillora/backoff"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/tls/ca_chain"
)
type requestCredentials interface {
GetURL() string
GetToken() string
GetTLSCAFile() string
GetTLSCertFile() string
GetTLSKeyFile() string
}
var (
dialer = net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
backOffDelayMin = 100 * time.Millisecond
backOffDelayMax = 60 * time.Second
backOffDelayFactor = 2.0
backOffDelayJitter = true
)
type client struct {
http.Client
url *url.URL
caFile string
certFile string
keyFile string
caData []byte
skipVerify bool
updateTime time.Time
lastUpdate string
requestBackOffs map[string]*backoff.Backoff
lock sync.Mutex
requester requester
}
type ResponseTLSData struct {
CAChain string
CertFile string
KeyFile string
}
func (n *client) getLastUpdate() string {
return n.lastUpdate
}
func (n *client) setLastUpdate(headers http.Header) {
if lu := headers.Get("X-GitLab-Last-Update"); len(lu) > 0 {
n.lastUpdate = lu
}
}
func (n *client) ensureTLSConfig() {
// certificate got modified
if stat, err := os.Stat(n.caFile); err == nil && n.updateTime.Before(stat.ModTime()) {
n.Transport = nil
}
// client certificate got modified
if stat, err := os.Stat(n.certFile); err == nil && n.updateTime.Before(stat.ModTime()) {
n.Transport = nil
}
// client private key got modified
if stat, err := os.Stat(n.keyFile); err == nil && n.updateTime.Before(stat.ModTime()) {
n.Transport = nil
}
// create or update transport
if n.Transport == nil {
n.updateTime = time.Now()
n.createTransport()
}
}
func (n *client) addTLSCA(tlsConfig *tls.Config) {
// load TLS CA certificate
if file := n.caFile; file != "" && !n.skipVerify {
logrus.Debugln("Trying to load", file, "...")
data, err := ioutil.ReadFile(file)
if err == nil {
pool, err := x509.SystemCertPool()
if err != nil {
logrus.Warningln("Failed to load system CertPool:", err)
}
if pool == nil {
pool = x509.NewCertPool()
}
if pool.AppendCertsFromPEM(data) {
tlsConfig.RootCAs = pool
n.caData = data
} else {
logrus.Errorln("Failed to parse PEM in", n.caFile)
}
} else {
if !os.IsNotExist(err) {
logrus.Errorln("Failed to load", n.caFile, err)
}
}
}
}
func (n *client) addTLSAuth(tlsConfig *tls.Config) {
// load TLS client keypair
if cert, key := n.certFile, n.keyFile; cert != "" && key != "" {
logrus.Debugln("Trying to load", cert, "and", key, "pair...")
certificate, err := tls.LoadX509KeyPair(cert, key)
if err == nil {
tlsConfig.Certificates = []tls.Certificate{certificate}
tlsConfig.BuildNameToCertificate()
} else {
if !os.IsNotExist(err) {
logrus.Errorln("Failed to load", cert, key, err)
}
}
}
}
func (n *client) createTransport() {
// create reference TLS config
tlsConfig := tls.Config{
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: n.skipVerify,
}
n.addTLSCA(&tlsConfig)
n.addTLSAuth(&tlsConfig)
// create transport
n.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: func(network, addr string) (net.Conn, error) {
logrus.Debugln("Dialing:", network, addr, "...")
return dialer.Dial(network, addr)
},
TLSClientConfig: &tlsConfig,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
ResponseHeaderTimeout: 10 * time.Minute,
}
n.Timeout = common.DefaultNetworkClientTimeout
}
func (n *client) ensureBackoff(method, uri string) *backoff.Backoff {
n.lock.Lock()
defer n.lock.Unlock()
key := fmt.Sprintf("%s_%s", method, uri)
if n.requestBackOffs[key] == nil {
n.requestBackOffs[key] = &backoff.Backoff{
Min: backOffDelayMin,
Max: backOffDelayMax,
Factor: backOffDelayFactor,
Jitter: backOffDelayJitter,
}
}
return n.requestBackOffs[key]
}
func (n *client) backoffRequired(res *http.Response) bool {
return res.StatusCode >= 400 && res.StatusCode < 600
}
func (n *client) checkBackoffRequest(req *http.Request, res *http.Response) {
backoffDelay := n.ensureBackoff(req.Method, req.RequestURI)
if n.backoffRequired(res) {
time.Sleep(backoffDelay.Duration())
} else {
backoffDelay.Reset()
}
}
func (n *client) do(uri, method string, request io.Reader, requestType string, headers http.Header) (res *http.Response, err error) {
url, err := n.url.Parse(uri)
if err != nil {
return
}
req, err := http.NewRequest(method, url.String(), request)
if err != nil {
err = fmt.Errorf("failed to create NewRequest: %w", err)
return
}
if headers != nil {
req.Header = headers
}
if request != nil {
req.Header.Set("Content-Type", requestType)
req.Header.Set("User-Agent", common.AppVersion.UserAgent())
}
n.ensureTLSConfig()
res, err = n.requester.Do(req)
if err != nil {
return
}
n.checkBackoffRequest(req, res)
return
}
func (n *client) doJSON(uri, method string, statusCode int, request interface{}, response interface{}) (int, string, *http.Response) {
var body io.Reader
if request != nil {
requestBody, err := json.Marshal(request)
if err != nil {
return -1, fmt.Sprintf("failed to marshal project object: %v", err), nil
}
body = bytes.NewReader(requestBody)
}
headers := make(http.Header)
if response != nil {
headers.Set("Accept", "application/json")
}
res, err := n.do(uri, method, body, "application/json", headers)
if err != nil {
return -1, err.Error(), nil
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
if res.StatusCode == statusCode {
if response != nil {
isApplicationJSON, err := isResponseApplicationJSON(res)
if !isApplicationJSON {
return -1, err.Error(), nil
}
d := json.NewDecoder(res.Body)
err = d.Decode(response)
if err != nil {
return -1, fmt.Sprintf("Error decoding json payload %v", err), nil
}
}
}
n.setLastUpdate(res.Header)
return res.StatusCode, res.Status, res
}
func (n *client) getResponseTLSData(TLS *tls.ConnectionState) (ResponseTLSData, error) {
TLSData := ResponseTLSData{
CertFile: n.certFile,
KeyFile: n.keyFile,
}
caChain, err := n.buildCAChain(TLS)
if err != nil {
return TLSData, fmt.Errorf("couldn't build CA Chain: %w", err)
}
TLSData.CAChain = caChain
return TLSData, nil
}
func (n *client) buildCAChain(tls *tls.ConnectionState) (string, error) {
if len(n.caData) != 0 {
return string(n.caData), nil
}
if tls == nil {
return "", nil
}
builder := ca_chain.NewBuilder(logrus.StandardLogger())
err := builder.BuildChainFromTLSConnectionState(tls)
if err != nil {
return "", fmt.Errorf("error while fetching certificates from TLS ConnectionState: %w", err)
}
return builder.String(), nil
}
func isResponseApplicationJSON(res *http.Response) (result bool, err error) {
contentType := res.Header.Get("Content-Type")
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
return false, fmt.Errorf("Content-Type parsing error: %w", err)
}
if mimetype != "application/json" {
return false, fmt.Errorf("Server should return application/json. Got: %v", contentType)
}
return true, nil
}
func fixCIURL(url string) string {
url = strings.TrimRight(url, "/")
if strings.HasSuffix(url, "/ci") {
url = strings.TrimSuffix(url, "/ci")
}
return url
}
func (n *client) findCertificate(certificate *string, base string, name string) {
if *certificate != "" {
return
}
path := filepath.Join(base, name)
if _, err := os.Stat(path); err == nil {
*certificate = path
}
}
func newClient(requestCredentials requestCredentials) (c *client, err error) {
url, err := url.Parse(fixCIURL(requestCredentials.GetURL()) + "/api/v4/")
if err != nil {
return
}
if url.Scheme != "http" && url.Scheme != "https" {
err = errors.New("only http or https scheme supported")
return
}
c = &client{
url: url,
caFile: requestCredentials.GetTLSCAFile(),
certFile: requestCredentials.GetTLSCertFile(),
keyFile: requestCredentials.GetTLSKeyFile(),
requestBackOffs: make(map[string]*backoff.Backoff),
}
c.requester = newRateLimitRequester(&c.Client)
host := strings.Split(url.Host, ":")[0]
if CertificateDirectory != "" {
c.findCertificate(&c.caFile, CertificateDirectory, host+".crt")
c.findCertificate(&c.certFile, CertificateDirectory, host+".auth.crt")
c.findCertificate(&c.keyFile, CertificateDirectory, host+".auth.key")
}
return
}
package network
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"runtime"
"strconv"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
const clientError = -100
var apiRequestStatuses = prometheus.NewDesc(
"gitlab_runner_api_request_statuses_total",
"The total number of api requests, partitioned by runner, endpoint and status.",
[]string{"runner", "endpoint", "status"},
nil,
)
type APIEndpoint string
const (
APIEndpointRequestJob APIEndpoint = "request_job"
APIEndpointUpdateJob APIEndpoint = "update_job"
APIEndpointPatchTrace APIEndpoint = "patch_trace"
)
type apiRequestStatusPermutation struct {
runnerID string
endpoint APIEndpoint
status int
}
type APIRequestStatusesMap struct {
internal map[apiRequestStatusPermutation]int
lock sync.RWMutex
}
func (arspm *APIRequestStatusesMap) Append(runnerID string, endpoint APIEndpoint, status int) {
arspm.lock.Lock()
defer arspm.lock.Unlock()
permutation := apiRequestStatusPermutation{runnerID: runnerID, endpoint: endpoint, status: status}
if _, ok := arspm.internal[permutation]; !ok {
arspm.internal[permutation] = 0
}
arspm.internal[permutation]++
}
// Describe implements prometheus.Collector.
func (arspm *APIRequestStatusesMap) Describe(ch chan<- *prometheus.Desc) {
ch <- apiRequestStatuses
}
// Collect implements prometheus.Collector.
func (arspm *APIRequestStatusesMap) Collect(ch chan<- prometheus.Metric) {
arspm.lock.RLock()
defer arspm.lock.RUnlock()
for permutation, count := range arspm.internal {
ch <- prometheus.MustNewConstMetric(
apiRequestStatuses,
prometheus.CounterValue,
float64(count),
permutation.runnerID,
string(permutation.endpoint),
strconv.Itoa(permutation.status),
)
}
}
func NewAPIRequestStatusesMap() *APIRequestStatusesMap {
return &APIRequestStatusesMap{
internal: make(map[apiRequestStatusPermutation]int),
}
}
type GitLabClient struct {
clients map[string]*client
lock sync.Mutex
requestsStatusesMap *APIRequestStatusesMap
}
func (n *GitLabClient) getClient(credentials requestCredentials) (c *client, err error) {
n.lock.Lock()
defer n.lock.Unlock()
if n.clients == nil {
n.clients = make(map[string]*client)
}
key := fmt.Sprintf("%s_%s_%s_%s", credentials.GetURL(), credentials.GetToken(), credentials.GetTLSCAFile(), credentials.GetTLSCertFile())
c = n.clients[key]
if c == nil {
c, err = newClient(credentials)
if err != nil {
return
}
n.clients[key] = c
}
return
}
func (n *GitLabClient) getLastUpdate(credentials requestCredentials) (lu string) {
cli, err := n.getClient(credentials)
if err != nil {
return ""
}
return cli.getLastUpdate()
}
func (n *GitLabClient) getRunnerVersion(config common.RunnerConfig) common.VersionInfo {
info := common.VersionInfo{
Name: common.NAME,
Version: common.VERSION,
Revision: common.REVISION,
Platform: runtime.GOOS,
Architecture: runtime.GOARCH,
Executor: config.Executor,
Shell: config.Shell,
}
if executor := common.GetExecutor(config.Executor); executor != nil {
executor.GetFeatures(&info.Features)
if info.Shell == "" {
info.Shell = executor.GetDefaultShell()
}
}
if shell := common.GetShell(info.Shell); shell != nil {
shell.GetFeatures(&info.Features)
}
return info
}
func (n *GitLabClient) doRaw(credentials requestCredentials, method, uri string, request io.Reader, requestType string, headers http.Header) (res *http.Response, err error) {
c, err := n.getClient(credentials)
if err != nil {
return nil, err
}
return c.do(uri, method, request, requestType, headers)
}
func (n *GitLabClient) doJSON(credentials requestCredentials, method, uri string, statusCode int, request interface{}, response interface{}) (int, string, *http.Response) {
c, err := n.getClient(credentials)
if err != nil {
return clientError, err.Error(), nil
}
return c.doJSON(uri, method, statusCode, request, response)
}
func (n *GitLabClient) getResponseTLSData(credentials requestCredentials, response *http.Response) (ResponseTLSData, error) {
c, err := n.getClient(credentials)
if err != nil {
return ResponseTLSData{}, fmt.Errorf("couldn't get client: %w", err)
}
return c.getResponseTLSData(response.TLS)
}
func (n *GitLabClient) RegisterRunner(runner common.RunnerCredentials, parameters common.RegisterRunnerParameters) *common.RegisterRunnerResponse {
// TODO: pass executor
request := common.RegisterRunnerRequest{
RegisterRunnerParameters: parameters,
Token: runner.Token,
Info: n.getRunnerVersion(common.RunnerConfig{}),
}
var response common.RegisterRunnerResponse
result, statusText, _ := n.doJSON(&runner, "POST", "runners", http.StatusCreated, &request, &response)
switch result {
case http.StatusCreated:
runner.Log().Println("Registering runner...", "succeeded")
return &response
case http.StatusForbidden:
runner.Log().Errorln("Registering runner...", "forbidden (check registration token)")
return nil
case clientError:
runner.Log().WithField("status", statusText).Errorln("Registering runner...", "error")
return nil
default:
runner.Log().WithField("status", statusText).Errorln("Registering runner...", "failed")
return nil
}
}
func (n *GitLabClient) VerifyRunner(runner common.RunnerCredentials) bool {
request := common.VerifyRunnerRequest{
Token: runner.Token,
}
result, statusText, _ := n.doJSON(&runner, "POST", "runners/verify", http.StatusOK, &request, nil)
switch result {
case http.StatusOK:
// this is expected due to fact that we ask for non-existing job
runner.Log().Println("Verifying runner...", "is alive")
return true
case http.StatusForbidden:
runner.Log().Errorln("Verifying runner...", "is removed")
return false
case clientError:
runner.Log().WithField("status", statusText).Errorln("Verifying runner...", "error")
return true
default:
runner.Log().WithField("status", statusText).Errorln("Verifying runner...", "failed")
return true
}
}
func (n *GitLabClient) UnregisterRunner(runner common.RunnerCredentials) bool {
request := common.UnregisterRunnerRequest{
Token: runner.Token,
}
result, statusText, _ := n.doJSON(&runner, "DELETE", "runners", http.StatusNoContent, &request, nil)
const baseLogText = "Unregistering runner from GitLab"
switch result {
case http.StatusNoContent:
runner.Log().Println(baseLogText, "succeeded")
return true
case http.StatusForbidden:
runner.Log().Errorln(baseLogText, "forbidden")
return false
case clientError:
runner.Log().WithField("status", statusText).Errorln(baseLogText, "error")
return false
default:
runner.Log().WithField("status", statusText).Errorln(baseLogText, "failed")
return false
}
}
func addTLSData(response *common.JobResponse, tlsData ResponseTLSData) {
if tlsData.CAChain != "" {
response.TLSCAChain = tlsData.CAChain
}
if tlsData.CertFile != "" && tlsData.KeyFile != "" {
data, err := ioutil.ReadFile(tlsData.CertFile)
if err == nil {
response.TLSAuthCert = string(data)
}
data, err = ioutil.ReadFile(tlsData.KeyFile)
if err == nil {
response.TLSAuthKey = string(data)
}
}
}
func (n *GitLabClient) RequestJob(config common.RunnerConfig, sessionInfo *common.SessionInfo) (*common.JobResponse, bool) {
request := common.JobRequest{
Info: n.getRunnerVersion(config),
Token: config.Token,
LastUpdate: n.getLastUpdate(&config.RunnerCredentials),
Session: sessionInfo,
}
var response common.JobResponse
result, statusText, httpResponse := n.doJSON(&config.RunnerCredentials, "POST", "jobs/request", http.StatusCreated, &request, &response)
n.requestsStatusesMap.Append(config.RunnerCredentials.ShortDescription(), APIEndpointRequestJob, result)
switch result {
case http.StatusCreated:
config.Log().WithFields(logrus.Fields{
"job": response.ID,
"repo_url": response.RepoCleanURL(),
}).Println("Checking for jobs...", "received")
tlsData, err := n.getResponseTLSData(&config.RunnerCredentials, httpResponse)
if err != nil {
config.Log().
WithError(err).Errorln("Error on fetching TLS Data from API response...", "error")
}
addTLSData(&response, tlsData)
return &response, true
case http.StatusForbidden:
config.Log().Errorln("Checking for jobs...", "forbidden")
return nil, false
case http.StatusNoContent:
config.Log().Debugln("Checking for jobs...", "nothing")
return nil, true
case clientError:
config.Log().WithField("status", statusText).Errorln("Checking for jobs...", "error")
return nil, false
default:
config.Log().WithField("status", statusText).Warningln("Checking for jobs...", "failed")
return nil, true
}
}
func (n *GitLabClient) UpdateJob(config common.RunnerConfig, jobCredentials *common.JobCredentials, jobInfo common.UpdateJobInfo) common.UpdateState {
request := common.UpdateJobRequest{
Info: n.getRunnerVersion(config),
Token: jobCredentials.Token,
State: jobInfo.State,
FailureReason: jobInfo.FailureReason,
}
result, statusText, response := n.doJSON(&config.RunnerCredentials, "PUT", fmt.Sprintf("jobs/%d", jobInfo.ID), http.StatusOK, &request, nil)
n.requestsStatusesMap.Append(config.RunnerCredentials.ShortDescription(), APIEndpointUpdateJob, result)
remoteJobStateResponse := NewRemoteJobStateResponse(response)
log := config.Log().WithFields(logrus.Fields{
"code": result,
"job": jobInfo.ID,
"job-status": remoteJobStateResponse.RemoteState,
})
switch {
case remoteJobStateResponse.IsAborted():
log.Warningln("Submitting job to coordinator...", "aborted")
return common.UpdateAbort
case result == http.StatusOK:
log.Debugln("Submitting job to coordinator...", "ok")
return common.UpdateSucceeded
case result == http.StatusNotFound:
log.Warningln("Submitting job to coordinator...", "aborted")
return common.UpdateAbort
case result == http.StatusForbidden:
log.WithField("status", statusText).Errorln("Submitting job to coordinator...", "forbidden")
return common.UpdateAbort
case result == clientError:
log.WithField("status", statusText).Errorln("Submitting job to coordinator...", "error")
return common.UpdateAbort
default:
log.WithField("status", statusText).Warningln("Submitting job to coordinator...", "failed")
return common.UpdateFailed
}
}
func (n *GitLabClient) PatchTrace(config common.RunnerConfig, jobCredentials *common.JobCredentials, content []byte, startOffset int) common.PatchTraceResult {
id := jobCredentials.ID
baseLog := config.Log().WithField("job", id)
if len(content) == 0 {
baseLog.Debugln("Appending trace to coordinator...", "skipped due to empty patch")
return common.NewPatchTraceResult(startOffset, common.UpdateSucceeded, 0)
}
endOffset := startOffset + len(content)
contentRange := fmt.Sprintf("%d-%d", startOffset, endOffset-1)
headers := make(http.Header)
headers.Set("Content-Range", contentRange)
headers.Set("JOB-TOKEN", jobCredentials.Token)
uri := fmt.Sprintf("jobs/%d/trace", id)
request := bytes.NewReader(content)
response, err := n.doRaw(&config.RunnerCredentials, "PATCH", uri, request, "text/plain", headers)
if err != nil {
config.Log().Errorln("Appending trace to coordinator...", "error", err.Error())
return common.NewPatchTraceResult(startOffset, common.UpdateFailed, 0)
}
n.requestsStatusesMap.Append(config.RunnerCredentials.ShortDescription(), APIEndpointPatchTrace, response.StatusCode)
defer response.Body.Close()
defer io.Copy(ioutil.Discard, response.Body)
tracePatchResponse := NewTracePatchResponse(response, baseLog)
log := baseLog.WithFields(logrus.Fields{
"sent-log": contentRange,
"job-log": tracePatchResponse.RemoteRange,
"job-status": tracePatchResponse.RemoteState,
"code": response.StatusCode,
"status": response.Status,
"update-interval": tracePatchResponse.RemoteTraceUpdateInterval,
})
result := common.PatchTraceResult{
SentOffset: startOffset,
NewUpdateInterval: tracePatchResponse.RemoteTraceUpdateInterval,
}
switch {
case tracePatchResponse.IsAborted():
log.Warningln("Appending trace to coordinator...", "aborted")
result.State = common.UpdateAbort
return result
case response.StatusCode == http.StatusAccepted:
log.Debugln("Appending trace to coordinator...", "ok")
result.SentOffset = endOffset
result.State = common.UpdateSucceeded
return result
case response.StatusCode == http.StatusNotFound:
log.Warningln("Appending trace to coordinator...", "not-found")
result.State = common.UpdateNotFound
return result
case response.StatusCode == http.StatusRequestedRangeNotSatisfiable:
log.Warningln("Appending trace to coordinator...", "range mismatch")
result.SentOffset = tracePatchResponse.NewOffset()
result.State = common.UpdateRangeMismatch
return result
case response.StatusCode == clientError:
log.Errorln("Appending trace to coordinator...", "error")
result.State = common.UpdateAbort
return result
default:
log.Warningln("Appending trace to coordinator...", "failed")
result.State = common.UpdateFailed
return result
}
}
func (n *GitLabClient) createArtifactsForm(mpw *multipart.Writer, reader io.Reader, baseName string) error {
wr, err := mpw.CreateFormFile("file", baseName)
if err != nil {
return err
}
_, err = io.Copy(wr, reader)
if err != nil {
return err
}
return nil
}
func uploadRawArtifactsQuery(options common.ArtifactsOptions) url.Values {
q := url.Values{}
if options.ExpireIn != "" {
q.Set("expire_in", options.ExpireIn)
}
if options.Format != "" {
q.Set("artifact_format", string(options.Format))
}
if options.Type != "" {
q.Set("artifact_type", options.Type)
}
return q
}
func (n *GitLabClient) UploadRawArtifacts(config common.JobCredentials, reader io.Reader, options common.ArtifactsOptions) common.UploadState {
pr, pw := io.Pipe()
defer pr.Close()
mpw := multipart.NewWriter(pw)
go func() {
defer pw.Close()
defer mpw.Close()
err := n.createArtifactsForm(mpw, reader, options.BaseName)
if err != nil {
pw.CloseWithError(err)
}
}()
query := uploadRawArtifactsQuery(options)
headers := make(http.Header)
headers.Set("JOB-TOKEN", config.Token)
res, err := n.doRaw(&config, "POST", fmt.Sprintf("jobs/%d/artifacts?%s", config.ID, query.Encode()), pr, mpw.FormDataContentType(), headers)
log := logrus.WithFields(logrus.Fields{
"id": config.ID,
"token": helpers.ShortenToken(config.Token),
})
if res != nil {
log = log.WithField("responseStatus", res.Status)
}
if err != nil {
log.WithError(err).Errorln("Uploading artifacts to coordinator...", "error")
return common.UploadFailed
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
switch res.StatusCode {
case http.StatusCreated:
log.Println("Uploading artifacts to coordinator...", "ok")
return common.UploadSucceeded
case http.StatusForbidden:
log.WithField("status", res.Status).Errorln("Uploading artifacts to coordinator...", "forbidden")
return common.UploadForbidden
case http.StatusRequestEntityTooLarge:
log.WithField("status", res.Status).Errorln("Uploading artifacts to coordinator...", "too large archive")
return common.UploadTooLarge
default:
log.WithField("status", res.Status).Warningln("Uploading artifacts to coordinator...", "failed")
return common.UploadFailed
}
}
func (n *GitLabClient) DownloadArtifacts(config common.JobCredentials, artifactsFile string) common.DownloadState {
headers := make(http.Header)
headers.Set("JOB-TOKEN", config.Token)
res, err := n.doRaw(&config, "GET", fmt.Sprintf("jobs/%d/artifacts", config.ID), nil, "", headers)
log := logrus.WithFields(logrus.Fields{
"id": config.ID,
"token": helpers.ShortenToken(config.Token),
})
if res != nil {
log = log.WithField("responseStatus", res.Status)
}
if err != nil {
log.Errorln("Downloading artifacts from coordinator...", "error", err.Error())
return common.DownloadFailed
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
switch res.StatusCode {
case http.StatusOK:
file, err := os.Create(artifactsFile)
if err == nil {
defer file.Close()
_, err = io.Copy(file, res.Body)
}
if err != nil {
file.Close()
os.Remove(file.Name())
log.WithError(err).Errorln("Downloading artifacts from coordinator...", "error")
return common.DownloadFailed
}
log.Println("Downloading artifacts from coordinator...", "ok")
return common.DownloadSucceeded
case http.StatusForbidden:
log.WithField("status", res.Status).Errorln("Downloading artifacts from coordinator...", "forbidden")
return common.DownloadForbidden
case http.StatusNotFound:
log.Errorln("Downloading artifacts from coordinator...", "not found")
return common.DownloadNotFound
default:
log.WithField("status", res.Status).Warningln("Downloading artifacts from coordinator...", "failed")
return common.DownloadFailed
}
}
func (n *GitLabClient) ProcessJob(config common.RunnerConfig, jobCredentials *common.JobCredentials) (common.JobTrace, error) {
trace, err := newJobTrace(n, config, jobCredentials)
if err != nil {
return nil, err
}
trace.start()
return trace, nil
}
func NewGitLabClientWithRequestStatusesMap(rsMap *APIRequestStatusesMap) *GitLabClient {
return &GitLabClient{
requestsStatusesMap: rsMap,
}
}
func NewGitLabClient() *GitLabClient {
return NewGitLabClientWithRequestStatusesMap(NewAPIRequestStatusesMap())
}
package network
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/sirupsen/logrus"
)
const (
rangeHeader = "Range"
traceUpdateIntervalHeader = "X-GitLab-Trace-Update-Interval"
)
type TracePatchResponse struct {
*RemoteJobStateResponse
RemoteRange string
RemoteTraceUpdateInterval time.Duration
}
func (p *TracePatchResponse) NewOffset() int {
remoteRangeParts := strings.Split(p.RemoteRange, "-")
if len(remoteRangeParts) == 2 {
newOffset, _ := strconv.Atoi(remoteRangeParts[1])
return newOffset
}
return 0
}
func NewTracePatchResponse(response *http.Response, logger logrus.FieldLogger) *TracePatchResponse {
if response == nil {
return new(TracePatchResponse)
}
updateIntervalRaw := response.Header.Get(traceUpdateIntervalHeader)
remoteTraceUpdateInterval, err := strconv.Atoi(updateIntervalRaw)
if err != nil {
remoteTraceUpdateInterval = emptyRemoteTraceUpdateInterval
logger.WithError(err).
WithField("header-value", updateIntervalRaw).
Warningf("Failed to parse %q header", traceUpdateIntervalHeader)
}
return &TracePatchResponse{
RemoteJobStateResponse: NewRemoteJobStateResponse(response),
RemoteRange: response.Header.Get(rangeHeader),
RemoteTraceUpdateInterval: time.Duration(remoteTraceUpdateInterval) * time.Second,
}
}
package network
import (
"errors"
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
// NOTE: The functionality of the rate limiting below as well as the constant values
// are documented in `docs/configuration/rate_limiting.md`
const (
// RateLimit-ResetTime: Wed, 21 Oct 2015 07:28:00 GMT
rateLimitResetTimeHeader = "RateLimit-ResetTime"
// The fallback is used if the reset header's value is present but cannot be parsed
defaultRateLimitFallbackDelay = time.Minute
defaultRateLimitRetriesCount = 5
)
var (
errRateLimitGaveUp = errors.New("gave up due to rate limit")
)
type rateLimitRequester struct {
client requester
fallbackDelay time.Duration
retriesCount int
}
func newRateLimitRequester(client requester) *rateLimitRequester {
return &rateLimitRequester{
client: client,
fallbackDelay: defaultRateLimitFallbackDelay,
retriesCount: defaultRateLimitRetriesCount,
}
}
func (r *rateLimitRequester) Do(req *http.Request) (*http.Response, error) {
logger := logrus.
WithFields(logrus.Fields{
"context": "ratelimit-requester-gitlab-request",
"url": req.URL.String(),
"method": req.Method,
})
// Worst case would be the configured timeout from reverse proxy * retriesCount
for i := 0; i < r.retriesCount; i++ {
res, rateLimitDuration, err := r.do(req, logger)
if rateLimitDuration == nil {
return res, err
}
logger.
WithField("duration", *rateLimitDuration).
Infoln("Sleeping due to rate limit")
// In some rare cases where the network is slow or the machine hosting
// the runner is resource constrained by the time we get the header
// it might be in the past, but that's ok since sleep will return immediately
time.Sleep(*rateLimitDuration)
}
return nil, errRateLimitGaveUp
}
// If this method returns a non-nil duration this means that we got a rate limited response
// and the called should sleep for the duration. If the duration is nil, return the response and the error
// meaning that we got a non rate limited response
func (r *rateLimitRequester) do(req *http.Request, logger *logrus.Entry) (*http.Response, *time.Duration, error) {
res, err := r.client.Do(req)
if err != nil {
return nil, nil, fmt.Errorf("couldn't execute %s against %s: %w", req.Method, req.URL, err)
}
// The request passed and we got some non rate limited response
if res.StatusCode != http.StatusTooManyRequests {
return res, nil, nil
}
rateLimitResetTimeValue := res.Header.Get(rateLimitResetTimeHeader)
if rateLimitResetTimeValue == "" {
// if we get a 429 but don't have a rate limit reset header we just return the response
// since we can't know how much to wait for the rate limit to reset
return res, nil, nil
}
resetTime, err := time.Parse(time.RFC1123, rateLimitResetTimeValue)
if err != nil {
// If we can't parse the rate limit reset header there's something wrong with it
// we shouldn't fail, to avoid a case where a misconfiguration in the reverse proxy can cause
// all runners to stop working. Wait for the configured fallback instead
logger.
WithError(err).
WithFields(logrus.Fields{
"header": rateLimitResetTimeHeader,
"headerValue": rateLimitResetTimeValue,
}).
Warnln("Couldn't parse rate limit header, falling back")
return res, &r.fallbackDelay, nil
}
resetDuration := resetTime.Sub(time.Now())
return res, &resetDuration, nil
}
package network
import (
"net/http"
)
const (
remoteStateHeader = "Job-Status"
statusCanceled = "canceled"
statusFailed = "failed"
)
type RemoteJobStateResponse struct {
StatusCode int
RemoteState string
}
func (r *RemoteJobStateResponse) IsAborted() bool {
if r.RemoteState == statusCanceled || r.RemoteState == statusFailed {
return true
}
if r.StatusCode == http.StatusForbidden {
return true
}
return false
}
func NewRemoteJobStateResponse(response *http.Response) *RemoteJobStateResponse {
if response == nil {
return &RemoteJobStateResponse{}
}
return &RemoteJobStateResponse{
StatusCode: response.StatusCode,
RemoteState: response.Header.Get(remoteStateHeader),
}
}
package network
import (
"context"
"sync"
"time"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/trace"
)
const (
emptyRemoteTraceUpdateInterval = 0
)
type clientJobTrace struct {
client common.Network
config common.RunnerConfig
jobCredentials *common.JobCredentials
id int
cancelFunc context.CancelFunc
buffer *trace.Buffer
lock sync.RWMutex
state common.JobState
failureReason common.JobFailureReason
finished chan bool
sentTrace int
sentTime time.Time
updateInterval time.Duration
forceSendInterval time.Duration
finishRetryInterval time.Duration
maxTracePatchSize int
failuresCollector common.FailuresCollector
}
func (c *clientJobTrace) Success() {
c.Fail(nil, common.NoneFailure)
}
func (c *clientJobTrace) Fail(err error, failureReason common.JobFailureReason) {
c.lock.Lock()
if c.state != common.Running {
c.lock.Unlock()
return
}
if err == nil {
c.state = common.Success
} else {
c.setFailure(failureReason)
}
c.lock.Unlock()
c.finish()
}
func (c *clientJobTrace) Write(data []byte) (n int, err error) {
return c.buffer.Write(data)
}
func (c *clientJobTrace) SetMasked(masked []string) {
c.buffer.SetMasked(masked)
}
func (c *clientJobTrace) SetCancelFunc(cancelFunc context.CancelFunc) {
c.cancelFunc = cancelFunc
}
func (c *clientJobTrace) SetFailuresCollector(fc common.FailuresCollector) {
c.failuresCollector = fc
}
func (c *clientJobTrace) IsStdout() bool {
return false
}
func (c *clientJobTrace) setFailure(reason common.JobFailureReason) {
c.state = common.Failed
c.failureReason = reason
if c.failuresCollector != nil {
c.failuresCollector.RecordFailure(reason, c.config.ShortDescription())
}
}
func (c *clientJobTrace) start() {
c.finished = make(chan bool)
c.state = common.Running
c.setupLogLimit()
go c.watch()
}
func (c *clientJobTrace) finalTraceUpdate() {
for c.anyTraceToSend() {
switch c.sendPatch() {
case common.UpdateSucceeded:
// we continue sending till we succeed
continue
case common.UpdateAbort:
return
case common.UpdateNotFound:
return
case common.UpdateRangeMismatch:
time.Sleep(c.finishRetryInterval)
case common.UpdateFailed:
time.Sleep(c.finishRetryInterval)
}
}
}
func (c *clientJobTrace) finalStatusUpdate() {
for {
switch c.sendUpdate() {
case common.UpdateSucceeded:
return
case common.UpdateAbort:
return
case common.UpdateNotFound:
return
case common.UpdateRangeMismatch:
return
case common.UpdateFailed:
time.Sleep(c.finishRetryInterval)
}
}
}
func (c *clientJobTrace) finish() {
c.buffer.Finish()
c.finished <- true
c.finalTraceUpdate()
c.finalStatusUpdate()
c.buffer.Close()
}
func (c *clientJobTrace) incrementalUpdate() common.UpdateState {
state := c.sendPatch()
if state != common.UpdateSucceeded {
return state
}
return c.touchJob()
}
func (c *clientJobTrace) anyTraceToSend() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.buffer.Size() != c.sentTrace
}
func (c *clientJobTrace) sendPatch() common.UpdateState {
c.lock.RLock()
content, err := c.buffer.Bytes(c.sentTrace, c.maxTracePatchSize)
sentTrace := c.sentTrace
c.lock.RUnlock()
if err != nil {
return common.UpdateFailed
}
if len(content) == 0 {
return common.UpdateSucceeded
}
result := c.client.PatchTrace(c.config, c.jobCredentials, content, sentTrace)
c.setUpdateInterval(result.NewUpdateInterval)
if result.State == common.UpdateSucceeded || result.State == common.UpdateRangeMismatch {
c.lock.Lock()
c.sentTime = time.Now()
c.sentTrace = result.SentOffset
c.lock.Unlock()
}
return result.State
}
func (c *clientJobTrace) setUpdateInterval(newUpdateInterval time.Duration) {
if newUpdateInterval <= time.Duration(emptyRemoteTraceUpdateInterval) {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.updateInterval = newUpdateInterval
}
// Update Coordinator that the job is still running.
func (c *clientJobTrace) touchJob() common.UpdateState {
c.lock.RLock()
shouldRefresh := time.Since(c.sentTime) > c.forceSendInterval
c.lock.RUnlock()
if !shouldRefresh {
return common.UpdateSucceeded
}
jobInfo := common.UpdateJobInfo{
ID: c.id,
State: common.Running,
}
status := c.client.UpdateJob(c.config, c.jobCredentials, jobInfo)
if status == common.UpdateSucceeded {
c.lock.Lock()
c.sentTime = time.Now()
c.lock.Unlock()
}
return status
}
func (c *clientJobTrace) sendUpdate() common.UpdateState {
c.lock.RLock()
state := c.state
c.lock.RUnlock()
jobInfo := common.UpdateJobInfo{
ID: c.id,
State: state,
FailureReason: c.failureReason,
}
status := c.client.UpdateJob(c.config, c.jobCredentials, jobInfo)
if status == common.UpdateSucceeded {
c.lock.Lock()
c.sentTime = time.Now()
c.lock.Unlock()
}
return status
}
func (c *clientJobTrace) abort() bool {
if c.cancelFunc != nil {
c.cancelFunc()
c.cancelFunc = nil
return true
}
return false
}
func (c *clientJobTrace) watch() {
for {
select {
case <-time.After(c.getUpdateInterval()):
state := c.incrementalUpdate()
if state == common.UpdateAbort && c.abort() {
<-c.finished
return
}
break
case <-c.finished:
return
}
}
}
func (c *clientJobTrace) getUpdateInterval() time.Duration {
c.lock.RLock()
defer c.lock.RUnlock()
return c.updateInterval
}
func (c *clientJobTrace) setupLogLimit() {
bytesLimit := c.config.OutputLimit * 1024 // convert to bytes
if bytesLimit == 0 {
bytesLimit = common.DefaultTraceOutputLimit
}
c.buffer.SetLimit(bytesLimit)
}
func newJobTrace(client common.Network, config common.RunnerConfig, jobCredentials *common.JobCredentials) (*clientJobTrace, error) {
buffer, err := trace.New()
if err != nil {
return nil, err
}
return &clientJobTrace{
client: client,
config: config,
buffer: buffer,
jobCredentials: jobCredentials,
id: jobCredentials.ID,
maxTracePatchSize: common.DefaultTracePatchLimit,
updateInterval: common.DefaultTraceUpdateInterval,
forceSendInterval: common.TraceForceSendInterval,
finishRetryInterval: common.TraceFinishRetryInterval,
}, nil
}
package referees
import (
"bytes"
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
"github.com/prometheus/client_golang/api"
prometheusV1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"github.com/sirupsen/logrus"
)
type MetricsReferee struct {
prometheusAPI prometheusV1.API
queries []string
queryInterval time.Duration
selector string
logger logrus.FieldLogger
}
type MetricsRefereeConfig struct {
PrometheusAddress string `toml:"prometheus_address,omitempty" json:"prometheus_address" description:"A host:port to a prometheus metrics server"`
QueryInterval int `toml:"query_interval,omitempty" json:"query_interval" description:"Query interval (in seconds)"`
Queries []string `toml:"queries" json:"queries" description:"A list of metrics to query (in PromQL)"`
}
type MetricsExecutor interface {
GetMetricsSelector() string
}
func (mr *MetricsReferee) ArtifactBaseName() string {
return "metrics_referee.json"
}
func (mr *MetricsReferee) ArtifactType() string {
return "metrics_referee"
}
func (mr *MetricsReferee) ArtifactFormat() string {
return "gzip"
}
func (mr *MetricsReferee) Execute(
ctx context.Context,
startTime time.Time,
endTime time.Time,
) (*bytes.Reader, error) {
// specify the range used for the PromQL query
queryRange := prometheusV1.Range{
Start: startTime.UTC(),
End: endTime.UTC(),
Step: mr.queryInterval,
}
metrics := make(map[string][]model.SamplePair)
// use config file to pull metrics from prometheus range queries
for _, metricQuery := range mr.queries {
// break up query into name:query
components := strings.Split(metricQuery, ":")
if len(components) != 2 {
err := fmt.Errorf("%q not in name:query format in metric queries", metricQuery)
mr.logger.WithError(err).Error("Failed to parse metrics query")
return nil, err
}
name := components[0]
query := components[1]
result := mr.queryMetrics(ctx, query, queryRange)
if result == nil {
continue
}
metrics[name] = result
}
// convert metrics sample pairs to JSON
output, _ := json.Marshal(metrics)
return bytes.NewReader(output), nil
}
func (mr *MetricsReferee) queryMetrics(ctx context.Context, query string, queryRange prometheusV1.Range) []model.SamplePair {
interval := fmt.Sprintf("%.0fs", mr.queryInterval.Seconds())
query = strings.Replace(query, "{selector}", mr.selector, -1)
query = strings.Replace(query, "{interval}", interval, -1)
queryLogger := mr.logger.WithFields(logrus.Fields{
"query": query,
"start": queryRange.Start,
"end": queryRange.End,
})
queryLogger.Debug("Sending request to Prometheus API")
// execute query over range
result, _, err := mr.prometheusAPI.QueryRange(ctx, query, queryRange)
if err != nil {
queryLogger.WithError(err).Error("Failed to range query Prometheus")
return nil
}
if result == nil {
queryLogger.Error("Received nil range query result")
return nil
}
// ensure matrix result
matrix, ok := result.(model.Matrix)
if !ok {
queryLogger.WithField("result-type", reflect.TypeOf(result)).Info("Failed to type assert result into model.Matrix")
return nil
}
// no results for range query
if matrix.Len() == 0 {
return nil
}
// save first result set values at metric
return matrix[0].Values
}
func newMetricsReferee(executor interface{}, config *Config, log logrus.FieldLogger) Referee {
logger := log.WithField("referee", "metrics")
if config.Metrics == nil {
return nil
}
// see if provider supports metrics refereeing
refereed, ok := executor.(MetricsExecutor)
if !ok {
logger.Info("executor not supported")
return nil
}
// create prometheus client from server address in config
clientConfig := api.Config{Address: config.Metrics.PrometheusAddress}
prometheusClient, err := api.NewClient(clientConfig)
if err != nil {
logger.WithError(err).Error("failed to create prometheus client")
return nil
}
prometheusAPI := prometheusV1.NewAPI(prometheusClient)
return &MetricsReferee{
prometheusAPI: prometheusAPI,
queryInterval: time.Duration(config.Metrics.QueryInterval) * time.Second,
queries: config.Metrics.Queries,
selector: refereed.GetMetricsSelector(),
logger: logger,
}
}
package referees
import (
"bytes"
"context"
"time"
"github.com/sirupsen/logrus"
)
type Referee interface {
Execute(
ctx context.Context,
startTime time.Time,
endTime time.Time,
) (*bytes.Reader, error)
ArtifactBaseName() string
ArtifactType() string
ArtifactFormat() string
}
type refereeFactory func(executor interface{}, config *Config, log logrus.FieldLogger) Referee
type Config struct {
Metrics *MetricsRefereeConfig `toml:"metrics,omitempty" json:"metrics" namespace:"metrics"`
}
var refereeFactories = []refereeFactory{
newMetricsReferee,
}
func CreateReferees(executor interface{}, config *Config, log logrus.FieldLogger) []Referee {
if config == nil {
log.Debug("No referees configured")
return nil
}
var referees []Referee
for _, factory := range refereeFactories {
referee := factory(executor, config, log)
if referee != nil {
referees = append(referees, referee)
}
}
return referees
}
package proxy
import (
"errors"
"net/http"
"strconv"
)
type Pool map[string]*Proxy
type Pooler interface {
Pool() Pool
}
type Proxy struct {
Settings *Settings
ConnectionHandler Requester
}
type Settings struct {
ServiceName string
Ports []Port
}
type Port struct {
Number int
Protocol string
Name string
}
type Requester interface {
ProxyRequest(w http.ResponseWriter, r *http.Request, requestedURI, port string, settings *Settings)
}
func NewPool() Pool {
return Pool{}
}
func NewProxySettings(serviceName string, ports []Port) *Settings {
return &Settings{
ServiceName: serviceName,
Ports: ports,
}
}
// PortByNameOrNumber accepts both a port number or a port name.
// It will try to convert the method into an integer and then
// search if there is any port number with that value or any
// port name by the param value.
func (p *Settings) PortByNameOrNumber(portNameOrNumber string) (Port, error) {
intPort, _ := strconv.Atoi(portNameOrNumber)
for _, port := range p.Ports {
if port.Number == intPort || port.Name == portNameOrNumber {
return port, nil
}
}
return Port{}, errors.New("invalid port")
}
func (p *Port) Scheme() (string, error) {
if p.Protocol == "http" || p.Protocol == "https" {
return p.Protocol, nil
}
return "", errors.New("invalid port scheme")
}
// WebsocketProtocolFor returns the proper Websocket protocol
// based on the HTTP protocol
func WebsocketProtocolFor(httpProtocol string) string {
if httpProtocol == "https" {
return "wss"
}
return "ws"
}
package session
import (
"crypto/tls"
"errors"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate"
)
type sessionFinderFn func(url string) *Session
type Server struct {
config ServerConfig
log *logrus.Entry
tlsListener net.Listener
sessionFinder sessionFinderFn
httpServer *http.Server
CertificatePublicKey []byte
AdvertiseAddress string
}
type ServerConfig struct {
AdvertiseAddress string
ListenAddress string
ShutdownTimeout time.Duration
}
func NewServer(config ServerConfig, logger *logrus.Entry, certGen certificate.Generator, sessionFinder sessionFinderFn) (*Server, error) {
if logger == nil {
logger = logrus.NewEntry(logrus.StandardLogger())
}
server := Server{
config: config,
log: logger,
sessionFinder: sessionFinder,
httpServer: &http.Server{},
}
host, err := server.getPublicHost()
if err != nil {
return nil, err
}
cert, publicKey, err := certGen.Generate(host)
if err != nil {
return nil, err
}
tlsConfig := tls.Config{
Certificates: []tls.Certificate{cert},
}
// We separate out the listener creation here so that we can return an error
// if the provided address is invalid or there is some other listener error.
listener, err := net.Listen("tcp", server.config.ListenAddress)
if err != nil {
return nil, err
}
server.tlsListener = tls.NewListener(listener, &tlsConfig)
err = server.setAdvertiseAddress()
if err != nil {
return nil, err
}
server.CertificatePublicKey = publicKey
server.httpServer.Handler = http.HandlerFunc(server.handleSessionRequest)
return &server, nil
}
func (s *Server) getPublicHost() (string, error) {
for _, address := range []string{s.config.AdvertiseAddress, s.config.ListenAddress} {
if address == "" {
continue
}
host, _, err := net.SplitHostPort(address)
if err != nil {
s.log.
WithField("address", address).
WithError(err).
Warn("Failed to parse session address")
}
if host == "" {
continue
}
return host, nil
}
return "", errors.New("no valid address provided")
}
func (s *Server) setAdvertiseAddress() error {
s.AdvertiseAddress = s.config.AdvertiseAddress
if s.config.AdvertiseAddress == "" {
s.AdvertiseAddress = s.config.ListenAddress
}
if strings.HasPrefix(s.AdvertiseAddress, "https://") ||
strings.HasPrefix(s.AdvertiseAddress, "http://") {
return errors.New("url not valid, scheme defined")
}
s.AdvertiseAddress = "https://" + s.AdvertiseAddress
_, err := url.ParseRequestURI(s.AdvertiseAddress)
return err
}
func (s *Server) handleSessionRequest(w http.ResponseWriter, r *http.Request) {
logger := s.log.WithField("uri", r.RequestURI)
logger.Debug("Processing session request")
session := s.sessionFinder(r.RequestURI)
if session == nil || session.Mux() == nil {
logger.Error("Mux handler not found")
http.NotFound(w, r)
return
}
session.Mux().ServeHTTP(w, r)
}
func (s *Server) Start() error {
if s.httpServer == nil {
return errors.New("http server not set")
}
err := s.httpServer.Serve(s.tlsListener)
// ErrServerClosed is a legitimate error that should not cause failure
if err == http.ErrServerClosed {
return nil
}
return err
}
func (s *Server) Close() {
if s.httpServer != nil {
s.httpServer.Close()
}
}
package session
import (
"net/http"
"reflect"
"sync"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/session/proxy"
"gitlab.com/gitlab-org/gitlab-runner/session/terminal"
)
type connectionInUseError struct{}
func (connectionInUseError) Error() string {
return "Connection already in use"
}
type Session struct {
Endpoint string
Token string
mux *mux.Router
interactiveTerminal terminal.InteractiveTerminal
terminalConn terminal.Conn
proxyPool proxy.Pool
// Signal when client disconnects from terminal.
DisconnectCh chan error
// Signal when terminal session timeout.
TimeoutCh chan error
log *logrus.Entry
lock sync.Mutex
}
func NewSession(logger *logrus.Entry) (*Session, error) {
endpoint, token, err := generateEndpoint()
if err != nil {
return nil, err
}
if logger == nil {
logger = logrus.NewEntry(logrus.StandardLogger())
}
logger = logger.WithField("uri", endpoint)
sess := &Session{
Endpoint: endpoint,
Token: token,
DisconnectCh: make(chan error),
TimeoutCh: make(chan error),
log: logger,
}
sess.setMux()
return sess, nil
}
func generateEndpoint() (string, string, error) {
sessionUUID, err := helpers.GenerateRandomUUID(32)
if err != nil {
return "", "", err
}
token, err := generateToken()
if err != nil {
return "", "", err
}
return "/session/" + sessionUUID, token, nil
}
func generateToken() (string, error) {
token, err := helpers.GenerateRandomUUID(32)
if err != nil {
return "", err
}
return token, nil
}
func (s *Session) withAuthorization(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logger := s.log.WithField("uri", r.RequestURI)
logger.Debug("Endpoint session request")
if s.Token != r.Header.Get("Authorization") {
logger.Error("Authorization header is not valid")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
next.ServeHTTP(w, r)
})
}
func (s *Session) setMux() {
s.lock.Lock()
defer s.lock.Unlock()
s.mux = mux.NewRouter()
s.mux.Handle(s.Endpoint+"/proxy/{resource}/{port}/{requestedUri:.*}", s.withAuthorization(http.HandlerFunc(s.proxyHandler)))
s.mux.Handle(s.Endpoint+"/exec", s.withAuthorization(http.HandlerFunc(s.execHandler)))
}
func (s *Session) proxyHandler(w http.ResponseWriter, r *http.Request) {
logger := s.log.WithField("uri", r.RequestURI)
logger.Debug("Proxy session request")
params := mux.Vars(r)
serviceName := params["resource"]
serviceProxy := s.proxyPool[serviceName]
if serviceProxy == nil {
logger.Warn("Proxy not found")
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
if serviceProxy.ConnectionHandler == nil {
logger.Warn("Proxy connection handler is not defined")
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
serviceProxy.ConnectionHandler.ProxyRequest(w, r, params["requestedUri"], params["port"], serviceProxy.Settings)
}
func (s *Session) execHandler(w http.ResponseWriter, r *http.Request) {
logger := s.log.WithField("uri", r.RequestURI)
logger.Debug("Exec terminal session request")
if !s.terminalAvailable() {
logger.Error("Interactive terminal not set")
http.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
return
}
if !websocket.IsWebSocketUpgrade(r) {
logger.Error("Request is not a web socket connection")
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
terminalConn, err := s.newTerminalConn()
if _, ok := err.(connectionInUseError); ok {
logger.Warn("Terminal already connected, revoking connection")
http.Error(w, http.StatusText(http.StatusLocked), http.StatusLocked)
return
}
if err != nil {
logger.WithError(err).Error("Failed to connect to terminal")
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
defer s.closeTerminalConn(terminalConn)
logger.Debugln("Starting terminal session")
terminalConn.Start(w, r, s.TimeoutCh, s.DisconnectCh)
}
func (s *Session) terminalAvailable() bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.interactiveTerminal != nil
}
func (s *Session) newTerminalConn() (terminal.Conn, error) {
s.lock.Lock()
defer s.lock.Unlock()
if s.terminalConn != nil {
return nil, connectionInUseError{}
}
conn, err := s.interactiveTerminal.Connect()
if err != nil {
return nil, err
}
s.terminalConn = conn
return conn, nil
}
func (s *Session) closeTerminalConn(conn terminal.Conn) {
s.lock.Lock()
defer s.lock.Unlock()
err := conn.Close()
if err != nil {
s.log.WithError(err).Warn("Failed to close terminal connection")
}
if reflect.ValueOf(s.terminalConn) == reflect.ValueOf(conn) {
s.log.Warningln("Closed active terminal connection")
s.terminalConn = nil
}
}
func (s *Session) SetInteractiveTerminal(interactiveTerminal terminal.InteractiveTerminal) {
s.lock.Lock()
defer s.lock.Unlock()
s.interactiveTerminal = interactiveTerminal
}
func (s *Session) SetProxyPool(pooler proxy.Pooler) {
s.lock.Lock()
defer s.lock.Unlock()
s.proxyPool = pooler.Pool()
}
func (s *Session) Mux() *mux.Router {
return s.mux
}
func (s *Session) Connected() bool {
s.lock.Lock()
defer s.lock.Unlock()
return s.terminalConn != nil
}
func (s *Session) Kill() error {
s.lock.Lock()
defer s.lock.Unlock()
if s.terminalConn == nil {
return nil
}
err := s.terminalConn.Close()
s.terminalConn = nil
return err
}
package shells
import (
"errors"
"fmt"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/cache"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers/tls"
)
type AbstractShell struct {
}
func (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {
features.Artifacts = true
features.UploadMultipleArtifacts = true
features.UploadRawArtifacts = true
features.Cache = true
features.Refspecs = true
features.Masking = true
}
func (b *AbstractShell) writeCdBuildDir(w ShellWriter, info common.ShellScriptInfo) {
w.Cd(info.Build.FullProjectDir())
}
func (b *AbstractShell) cacheFile(build *common.Build, userKey string) (key, file string) {
if build.CacheDir == "" {
return
}
// Deduce cache key
key = path.Join(build.JobInfo.Name, build.GitInfo.Ref)
if userKey != "" {
key = build.GetAllVariables().ExpandValue(userKey)
}
// Ignore cache without the key
if key == "" {
return
}
file = path.Join(build.CacheDir, key, "cache.zip")
file, err := filepath.Rel(build.BuildDir, file)
if err != nil {
return "", ""
}
return
}
func (b *AbstractShell) guardRunnerCommand(w ShellWriter, runnerCommand string, action string, f func()) {
if runnerCommand == "" {
w.Warning("%s is not supported by this executor.", action)
return
}
w.IfCmd(runnerCommand, "--version")
f()
w.Else()
w.Warning("Missing %s. %s is disabled.", runnerCommand, action)
w.EndIf()
}
func (b *AbstractShell) cacheExtractor(w ShellWriter, info common.ShellScriptInfo) error {
for _, cacheOptions := range info.Build.Cache {
// Create list of files to extract
archiverArgs := []string{}
for _, path := range cacheOptions.Paths {
archiverArgs = append(archiverArgs, "--path", path)
}
if cacheOptions.Untracked {
archiverArgs = append(archiverArgs, "--untracked")
}
// Skip restoring cache if no cache is defined
if len(archiverArgs) < 1 {
continue
}
// Skip extraction if no cache is defined
cacheKey, cacheFile := b.cacheFile(info.Build, cacheOptions.Key)
if cacheKey == "" {
w.Notice("Skipping cache extraction due to empty cache key")
continue
}
if ok, err := cacheOptions.CheckPolicy(common.CachePolicyPull); err != nil {
return fmt.Errorf("%w for %s", err, cacheKey)
} else if !ok {
w.Notice("Not downloading cache %s due to policy", cacheKey)
continue
}
args := []string{
"cache-extractor",
"--file", cacheFile,
"--timeout", strconv.Itoa(info.Build.GetCacheRequestTimeout()),
}
// Generate cache download address
if url := cache.GetCacheDownloadURL(info.Build, cacheKey); url != nil {
args = append(args, "--url", url.String())
}
// Execute cache-extractor command. Failure is not fatal.
b.guardRunnerCommand(w, info.RunnerCommand, "Extracting cache", func() {
w.Notice("Checking cache for %s...", cacheKey)
w.IfCmdWithOutput(info.RunnerCommand, args...)
w.Notice("Successfully extracted cache")
w.Else()
w.Warning("Failed to extract cache")
w.EndIf()
})
}
return nil
}
func (b *AbstractShell) downloadArtifacts(w ShellWriter, job common.Dependency, info common.ShellScriptInfo) {
args := []string{
"artifacts-downloader",
"--url",
info.Build.Runner.URL,
"--token",
job.Token,
"--id",
strconv.Itoa(job.ID),
}
w.Notice("Downloading artifacts for %s (%d)...", job.Name, job.ID)
w.Command(info.RunnerCommand, args...)
}
func (b *AbstractShell) jobArtifacts(info common.ShellScriptInfo) (otherJobs []common.Dependency) {
for _, otherJob := range info.Build.Dependencies {
if otherJob.ArtifactsFile.Filename == "" {
continue
}
otherJobs = append(otherJobs, otherJob)
}
return
}
func (b *AbstractShell) downloadAllArtifacts(w ShellWriter, info common.ShellScriptInfo) {
otherJobs := b.jobArtifacts(info)
if len(otherJobs) == 0 {
return
}
b.guardRunnerCommand(w, info.RunnerCommand, "Artifacts downloading", func() {
for _, otherJob := range otherJobs {
b.downloadArtifacts(w, otherJob, info)
}
})
}
func (b *AbstractShell) writePrepareScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
return nil
}
func (b *AbstractShell) writeGetSourcesScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeExports(w, info)
if !info.Build.IsSharedEnv() {
b.writeGitSSLConfig(w, info.Build, []string{"--global"})
}
if info.PreCloneScript != "" && info.Build.GetGitStrategy() != common.GitNone {
b.writeCommands(w, info.PreCloneScript)
}
if err := b.writeCloneFetchCmds(w, info); err != nil {
return err
}
return b.writeSubmoduleUpdateCmds(w, info)
}
func (b *AbstractShell) writeExports(w ShellWriter, info common.ShellScriptInfo) {
for _, variable := range info.Build.GetAllVariables() {
w.Variable(variable)
}
}
func (b *AbstractShell) writeGitSSLConfig(w ShellWriter, build *common.Build, where []string) {
repoURL, err := url.Parse(build.Runner.URL)
if err != nil {
w.Warning("git SSL config: Can't parse repository URL. %s", err)
return
}
repoURL.Path = ""
host := repoURL.String()
variables := build.GetCITLSVariables()
args := append([]string{"config"}, where...)
for variable, config := range map[string]string{
tls.VariableCAFile: "sslCAInfo",
tls.VariableCertFile: "sslCert",
tls.VariableKeyFile: "sslKey",
} {
if variables.Get(variable) == "" {
continue
}
key := fmt.Sprintf("http.%s.%s", host, config)
w.Command("git", append(args, key, w.EnvVariableKey(variable))...)
}
return
}
func (b *AbstractShell) writeCloneFetchCmds(w ShellWriter, info common.ShellScriptInfo) error {
build := info.Build
// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable
// when defining the job) we're skipping this step.
//
// In other case we're disabling smudging here to prevent us from memory
// allocation failures.
//
// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and
// https://github.com/git-lfs/git-lfs/issues/3524 for context.
if !build.IsLFSSmudgeDisabled() {
w.Variable(common.JobVariable{Key: "GIT_LFS_SKIP_SMUDGE", Value: "1"})
}
err := b.handleGetSourcesStrategy(w, build)
if err != nil {
return err
}
if build.GetGitCheckout() {
b.writeCheckoutCmd(w, build)
// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable
// when defining the job) we're skipping this step.
//
// In other case, because we've disabled LFS smudging above, we need now manually call
// `git lfs pull` to fetch and checkout all LFS objects that may be present in
// the repository.
//
// Repositories without LFS objects (and without any LFS metadata) will be not
// affected by this command.
//
// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and
// https://github.com/git-lfs/git-lfs/issues/3524 for context.
if !build.IsLFSSmudgeDisabled() {
w.IfCmd("git", "lfs", "version")
w.Command("git", "lfs", "pull")
w.EmptyLine()
w.EndIf()
}
} else {
w.Notice("Skipping Git checkout")
}
return nil
}
func (b *AbstractShell) handleGetSourcesStrategy(w ShellWriter, build *common.Build) error {
projectDir := build.FullProjectDir()
gitDir := path.Join(build.FullProjectDir(), ".git")
switch build.GetGitStrategy() {
case common.GitFetch:
b.writeRefspecFetchCmd(w, build, projectDir, gitDir)
case common.GitClone:
w.RmDir(projectDir)
b.writeRefspecFetchCmd(w, build, projectDir, gitDir)
case common.GitNone:
w.Notice("Skipping Git repository setup")
w.MkDir(projectDir)
default:
return errors.New("unknown GIT_STRATEGY")
}
return nil
}
func (b *AbstractShell) writeRefspecFetchCmd(w ShellWriter, build *common.Build, projectDir string, gitDir string) {
depth := build.GitInfo.Depth
if depth > 0 {
w.Notice("Fetching changes with git depth set to %d...", depth)
} else {
w.Notice("Fetching changes...")
}
// initializing
templateDir := w.MkTmpDir("git-template")
templateFile := path.Join(templateDir, "config")
w.Command("git", "config", "-f", templateFile, "fetch.recurseSubmodules", "false")
if build.IsSharedEnv() {
b.writeGitSSLConfig(w, build, []string{"-f", templateFile})
}
w.Command("git", "init", projectDir, "--template", templateDir)
w.Cd(projectDir)
b.writeGitCleanup(w, build)
// Add `git remote` or update existing
w.IfCmd("git", "remote", "add", "origin", build.GetRemoteURL())
w.Notice("Created fresh repository.")
w.Else()
w.Command("git", "remote", "set-url", "origin", build.GetRemoteURL())
w.EndIf()
fetchArgs := []string{"fetch", "origin", "--prune"}
fetchArgs = append(fetchArgs, build.GitInfo.Refspecs...)
if depth > 0 {
fetchArgs = append(fetchArgs, "--depth", strconv.Itoa(depth))
}
w.Command("git", fetchArgs...)
}
func (b *AbstractShell) writeGitCleanup(w ShellWriter, build *common.Build) {
// Remove .git/{index,shallow,HEAD}.lock files from .git, which can fail the fetch command
// The file can be left if previous build was terminated during git operation
w.RmFile(".git/index.lock")
w.RmFile(".git/shallow.lock")
w.RmFile(".git/HEAD.lock")
w.RmFile(".git/hooks/post-checkout")
}
func (b *AbstractShell) writeCheckoutCmd(w ShellWriter, build *common.Build) {
w.Notice("Checking out %s as %s...", build.GitInfo.Sha[0:8], build.GitInfo.Ref)
w.Command("git", "checkout", "-f", "-q", build.GitInfo.Sha)
cleanFlags := build.GetGitCleanFlags()
if len(cleanFlags) > 0 {
cleanArgs := append([]string{"clean"}, cleanFlags...)
w.Command("git", cleanArgs...)
}
}
func (b *AbstractShell) writeSubmoduleUpdateCmds(w ShellWriter, info common.ShellScriptInfo) (err error) {
build := info.Build
switch build.GetSubmoduleStrategy() {
case common.SubmoduleNormal:
b.writeSubmoduleUpdateCmd(w, build, false)
case common.SubmoduleRecursive:
b.writeSubmoduleUpdateCmd(w, build, true)
case common.SubmoduleNone:
w.Notice("Skipping Git submodules setup")
default:
return errors.New("unknown GIT_SUBMODULE_STRATEGY")
}
return nil
}
func (b *AbstractShell) writeSubmoduleUpdateCmd(w ShellWriter, build *common.Build, recursive bool) {
if recursive {
w.Notice("Updating/initializing submodules recursively...")
} else {
w.Notice("Updating/initializing submodules...")
}
// Sync .git/config to .gitmodules in case URL changes (e.g. new build token)
args := []string{"submodule", "sync"}
if recursive {
args = append(args, "--recursive")
}
w.Command("git", args...)
// Update / initialize submodules
updateArgs := []string{"submodule", "update", "--init"}
foreachArgs := []string{"submodule", "foreach"}
if recursive {
updateArgs = append(updateArgs, "--recursive")
foreachArgs = append(foreachArgs, "--recursive")
}
// Clean changed files in submodules
// "git submodule update --force" option not supported in Git 1.7.1 (shipped with CentOS 6)
w.Command("git", append(foreachArgs, "git clean -ffxd")...)
w.Command("git", append(foreachArgs, "git reset --hard")...)
w.Command("git", updateArgs...)
if !build.IsLFSSmudgeDisabled() {
w.IfCmd("git", "lfs", "version")
w.Command("git", append(foreachArgs, "git lfs pull")...)
w.EndIf()
}
}
func (b *AbstractShell) writeRestoreCacheScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
// Try to restore from main cache, if not found cache for master
return b.cacheExtractor(w, info)
}
func (b *AbstractShell) writeDownloadArtifactsScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
// Process all artifacts
b.downloadAllArtifacts(w, info)
return nil
}
// Write the given string of commands using the provided ShellWriter object.
func (b *AbstractShell) writeCommands(w ShellWriter, commands ...string) {
for _, command := range commands {
command = strings.TrimSpace(command)
if command != "" {
lines := strings.SplitN(command, "\n", 2)
if len(lines) > 1 {
// TODO: this should be collapsable once we introduce that in GitLab
w.Notice("$ %s # collapsed multi-line command", lines[0])
} else {
w.Notice("$ %s", lines[0])
}
} else {
w.EmptyLine()
}
w.Line(command)
w.CheckForErrors()
}
}
func (b *AbstractShell) writeUserScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
var scriptStep *common.Step
for _, step := range info.Build.Steps {
if step.Name == common.StepNameScript {
scriptStep = &step
break
}
}
if scriptStep == nil {
return nil
}
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
if info.PreBuildScript != "" {
b.writeCommands(w, info.PreBuildScript)
}
b.writeCommands(w, scriptStep.Script...)
if info.PostBuildScript != "" {
b.writeCommands(w, info.PostBuildScript)
}
return nil
}
func (b *AbstractShell) cacheArchiver(w ShellWriter, info common.ShellScriptInfo) error {
for _, cacheOptions := range info.Build.Cache {
// Skip archiving if no cache is defined
cacheKey, cacheFile := b.cacheFile(info.Build, cacheOptions.Key)
if cacheKey == "" {
w.Notice("Skipping cache archiving due to empty cache key")
continue
}
if ok, err := cacheOptions.CheckPolicy(common.CachePolicyPush); err != nil {
return fmt.Errorf("%w for %s", err, cacheKey)
} else if !ok {
w.Notice("Not uploading cache %s due to policy", cacheKey)
continue
}
args := []string{
"cache-archiver",
"--file", cacheFile,
"--timeout", strconv.Itoa(info.Build.GetCacheRequestTimeout()),
}
// Create list of files to archive
archiverArgs := []string{}
for _, path := range cacheOptions.Paths {
archiverArgs = append(archiverArgs, "--path", path)
}
if cacheOptions.Untracked {
archiverArgs = append(archiverArgs, "--untracked")
}
if len(archiverArgs) < 1 {
// Skip creating archive
continue
}
args = append(args, archiverArgs...)
// Generate cache upload address
if url := cache.GetCacheUploadURL(info.Build, cacheKey); url != nil {
args = append(args, "--url", url.String())
}
// Execute cache-archiver command. Failure is not fatal.
b.guardRunnerCommand(w, info.RunnerCommand, "Creating cache", func() {
w.Notice("Creating cache %s...", cacheKey)
w.IfCmdWithOutput(info.RunnerCommand, args...)
w.Notice("Created cache")
w.Else()
w.Warning("Failed to create cache")
w.EndIf()
})
}
return nil
}
func (b *AbstractShell) writeUploadArtifact(w ShellWriter, info common.ShellScriptInfo, artifact common.Artifact) {
args := []string{
"artifacts-uploader",
"--url",
info.Build.Runner.URL,
"--token",
info.Build.Token,
"--id",
strconv.Itoa(info.Build.ID),
}
// Create list of files to archive
archiverArgs := []string{}
for _, path := range artifact.Paths {
archiverArgs = append(archiverArgs, "--path", path)
}
if artifact.Untracked {
archiverArgs = append(archiverArgs, "--untracked")
}
if len(archiverArgs) < 1 {
// Skip creating archive
return
}
args = append(args, archiverArgs...)
if artifact.Name != "" {
args = append(args, "--name", artifact.Name)
}
if artifact.ExpireIn != "" {
args = append(args, "--expire-in", artifact.ExpireIn)
}
if artifact.Format != "" {
args = append(args, "--artifact-format", string(artifact.Format))
}
if artifact.Type != "" {
args = append(args, "--artifact-type", artifact.Type)
}
b.guardRunnerCommand(w, info.RunnerCommand, "Uploading artifacts", func() {
w.Notice("Uploading artifacts...")
w.Command(info.RunnerCommand, args...)
})
}
func (b *AbstractShell) writeUploadArtifacts(w ShellWriter, info common.ShellScriptInfo, onSuccess bool) {
if info.Build.Runner.URL == "" {
return
}
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
for _, artifact := range info.Build.Artifacts {
if onSuccess {
if !artifact.When.OnSuccess() {
continue
}
} else {
if !artifact.When.OnFailure() {
continue
}
}
b.writeUploadArtifact(w, info, artifact)
}
}
func (b *AbstractShell) writeAfterScript(w ShellWriter, info common.ShellScriptInfo) error {
var afterScriptStep *common.Step
for _, step := range info.Build.Steps {
if step.Name == common.StepNameAfterScript {
afterScriptStep = &step
break
}
}
if afterScriptStep == nil {
return nil
}
if len(afterScriptStep.Script) == 0 {
return nil
}
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
w.Notice("Running after script...")
b.writeCommands(w, afterScriptStep.Script...)
return nil
}
func (b *AbstractShell) writeArchiveCacheScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeExports(w, info)
b.writeCdBuildDir(w, info)
// Find cached files and archive them
return b.cacheArchiver(w, info)
}
func (b *AbstractShell) writeUploadArtifactsOnSuccessScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeUploadArtifacts(w, info, true)
return
}
func (b *AbstractShell) writeUploadArtifactsOnFailureScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
b.writeUploadArtifacts(w, info, false)
return
}
func (b *AbstractShell) writeScript(w ShellWriter, buildStage common.BuildStage, info common.ShellScriptInfo) error {
methods := map[common.BuildStage]func(ShellWriter, common.ShellScriptInfo) error{
common.BuildStagePrepare: b.writePrepareScript,
common.BuildStageGetSources: b.writeGetSourcesScript,
common.BuildStageRestoreCache: b.writeRestoreCacheScript,
common.BuildStageDownloadArtifacts: b.writeDownloadArtifactsScript,
common.BuildStageUserScript: b.writeUserScript,
common.BuildStageAfterScript: b.writeAfterScript,
common.BuildStageArchiveCache: b.writeArchiveCacheScript,
common.BuildStageUploadOnSuccessArtifacts: b.writeUploadArtifactsOnSuccessScript,
common.BuildStageUploadOnFailureArtifacts: b.writeUploadArtifactsOnFailureScript,
}
fn := methods[buildStage]
if fn == nil {
return errors.New("Not supported script type: " + string(buildStage))
}
return fn(w, info)
}
package shells
import (
"bufio"
"bytes"
"fmt"
"io"
"path"
"runtime"
"strconv"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
const bashDetectShell = `if [ -x /usr/local/bin/bash ]; then
exec /usr/local/bin/bash $@
elif [ -x /usr/bin/bash ]; then
exec /usr/bin/bash $@
elif [ -x /bin/bash ]; then
exec /bin/bash $@
elif [ -x /usr/local/bin/sh ]; then
exec /usr/local/bin/sh $@
elif [ -x /usr/bin/sh ]; then
exec /usr/bin/sh $@
elif [ -x /bin/sh ]; then
exec /bin/sh $@
elif [ -x /busybox/sh ]; then
exec /busybox/sh $@
else
echo shell not found
exit 1
fi
`
type BashShell struct {
AbstractShell
Shell string
}
type BashWriter struct {
bytes.Buffer
TemporaryPath string
Shell string
indent int
}
func (b *BashWriter) GetTemporaryPath() string {
return b.TemporaryPath
}
func (b *BashWriter) Line(text string) {
b.WriteString(strings.Repeat(" ", b.indent) + text + "\n")
}
func (b *BashWriter) CheckForErrors() {
}
func (b *BashWriter) Indent() {
b.indent++
}
func (b *BashWriter) Unindent() {
b.indent--
}
func (b *BashWriter) Command(command string, arguments ...string) {
b.Line(b.buildCommand(command, arguments...))
}
func (b *BashWriter) buildCommand(command string, arguments ...string) string {
list := []string{
helpers.ShellEscape(command),
}
for _, argument := range arguments {
list = append(list, strconv.Quote(argument))
}
return strings.Join(list, " ")
}
func (b *BashWriter) TmpFile(name string) string {
return b.Absolute(path.Join(b.TemporaryPath, name))
}
func (b *BashWriter) EnvVariableKey(name string) string {
return fmt.Sprintf("$%s", name)
}
func (b *BashWriter) Variable(variable common.JobVariable) {
if variable.File {
variableFile := b.TmpFile(variable.Key)
b.Line(fmt.Sprintf("mkdir -p %q", helpers.ToSlash(b.TemporaryPath)))
b.Line(fmt.Sprintf("echo -n %s > %q", helpers.ShellEscape(variable.Value), variableFile))
b.Line(fmt.Sprintf("export %s=%q", helpers.ShellEscape(variable.Key), variableFile))
} else {
b.Line(fmt.Sprintf("export %s=%s", helpers.ShellEscape(variable.Key), helpers.ShellEscape(variable.Value)))
}
}
func (b *BashWriter) IfDirectory(path string) {
b.Line(fmt.Sprintf("if [[ -d %q ]]; then", path))
b.Indent()
}
func (b *BashWriter) IfFile(path string) {
b.Line(fmt.Sprintf("if [[ -e %q ]]; then", path))
b.Indent()
}
func (b *BashWriter) IfCmd(cmd string, arguments ...string) {
cmdline := b.buildCommand(cmd, arguments...)
b.Line(fmt.Sprintf("if %s >/dev/null 2>/dev/null; then", cmdline))
b.Indent()
}
func (b *BashWriter) IfCmdWithOutput(cmd string, arguments ...string) {
cmdline := b.buildCommand(cmd, arguments...)
b.Line(fmt.Sprintf("if %s; then", cmdline))
b.Indent()
}
func (b *BashWriter) Else() {
b.Unindent()
b.Line("else")
b.Indent()
}
func (b *BashWriter) EndIf() {
b.Unindent()
b.Line("fi")
}
func (b *BashWriter) Cd(path string) {
b.Command("cd", path)
}
func (b *BashWriter) MkDir(path string) {
b.Command("mkdir", "-p", path)
}
func (b *BashWriter) MkTmpDir(name string) string {
path := path.Join(b.TemporaryPath, name)
b.MkDir(path)
return path
}
func (b *BashWriter) RmDir(path string) {
b.Command("rm", "-r", "-f", path)
}
func (b *BashWriter) RmFile(path string) {
b.Command("rm", "-f", path)
}
func (b *BashWriter) Absolute(dir string) string {
if path.IsAbs(dir) {
return dir
}
return path.Join("$PWD", dir)
}
func (b *BashWriter) Print(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...)
b.Line("echo " + helpers.ShellEscape(coloredText))
}
func (b *BashWriter) Notice(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_GREEN + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + helpers.ShellEscape(coloredText))
}
func (b *BashWriter) Warning(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_YELLOW + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + helpers.ShellEscape(coloredText))
}
func (b *BashWriter) Error(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_RED + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + helpers.ShellEscape(coloredText))
}
func (b *BashWriter) EmptyLine() {
b.Line("echo")
}
func (b *BashWriter) Finish(trace bool) string {
var buffer bytes.Buffer
w := bufio.NewWriter(&buffer)
if b.Shell != "" {
io.WriteString(w, "#!/usr/bin/env "+b.Shell+"\n\n")
}
if trace {
io.WriteString(w, "set -o xtrace\n")
}
io.WriteString(w, "set -eo pipefail\n")
io.WriteString(w, "set +o noclobber\n")
io.WriteString(w, ": | eval "+helpers.ShellEscape(b.String())+"\n")
io.WriteString(w, "exit 0\n")
w.Flush()
return buffer.String()
}
func (b *BashShell) GetName() string {
return b.Shell
}
func (b *BashShell) GetConfiguration(info common.ShellScriptInfo) (script *common.ShellConfiguration, err error) {
var detectScript string
var shellCommand string
if info.Type == common.LoginShell {
detectScript = strings.Replace(bashDetectShell, "$@", "--login", -1)
shellCommand = b.Shell + " --login"
} else {
detectScript = strings.Replace(bashDetectShell, "$@", "", -1)
shellCommand = b.Shell
}
script = &common.ShellConfiguration{}
script.DockerCommand = []string{"sh", "-c", detectScript}
// su
if info.User != "" {
script.Command = "su"
if runtime.GOOS == "linux" {
script.Arguments = append(script.Arguments, "-s", "/bin/"+b.Shell)
}
script.Arguments = append(script.Arguments, info.User)
script.Arguments = append(script.Arguments, "-c", shellCommand)
} else {
script.Command = b.Shell
if info.Type == common.LoginShell {
script.Arguments = append(script.Arguments, "--login")
}
}
return
}
func (b *BashShell) GenerateScript(buildStage common.BuildStage, info common.ShellScriptInfo) (script string, err error) {
w := &BashWriter{
TemporaryPath: info.Build.TmpProjectDir(),
Shell: b.Shell,
}
if buildStage == common.BuildStagePrepare {
if len(info.Build.Hostname) != 0 {
w.Line("echo " + strconv.Quote("Running on $(hostname) via "+info.Build.Hostname+"..."))
} else {
w.Line("echo " + strconv.Quote("Running on $(hostname)..."))
}
}
err = b.writeScript(w, buildStage, info)
script = w.Finish(info.Build.IsDebugTraceEnabled())
return
}
func (b *BashShell) IsDefault() bool {
return runtime.GOOS != "windows" && b.Shell == "bash"
}
func init() {
common.RegisterShell(&BashShell{Shell: "sh"})
common.RegisterShell(&BashShell{Shell: "bash"})
}
package shells
import (
"bufio"
"bytes"
"fmt"
"io"
"path"
"path/filepath"
"runtime"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags"
)
type CmdShell struct {
AbstractShell
}
type CmdWriter struct {
bytes.Buffer
TemporaryPath string
indent int
disableDelayedErrorLevelExpansion bool
}
func batchQuote(text string) string {
return "\"" + batchEscapeInsideQuotedString(text) + "\""
}
func batchEscapeInsideQuotedString(text string) string {
// taken from: http://www.robvanderwoude.com/escapechars.php
text = strings.Replace(text, "^", "^^", -1)
text = strings.Replace(text, "!", "^^!", -1)
text = strings.Replace(text, "&", "^&", -1)
text = strings.Replace(text, "<", "^<", -1)
text = strings.Replace(text, ">", "^>", -1)
text = strings.Replace(text, "|", "^|", -1)
text = strings.Replace(text, "\r", "", -1)
text = strings.Replace(text, "\n", "!nl!", -1)
return text
}
func batchEscapeVariable(text string) string {
text = strings.Replace(text, "%", "%%", -1)
text = batchEscape(text)
return text
}
// If not inside a quoted string (e.g., echo text), escape more things
func batchEscape(text string) string {
text = batchEscapeInsideQuotedString(text)
text = strings.Replace(text, "(", "^(", -1)
text = strings.Replace(text, ")", "^)", -1)
return text
}
func (b *CmdShell) GetName() string {
return "cmd"
}
func (b *CmdWriter) GetTemporaryPath() string {
return b.TemporaryPath
}
func (b *CmdWriter) Line(text string) {
b.WriteString(strings.Repeat(" ", b.indent) + text + "\r\n")
}
func (b *CmdWriter) CheckForErrors() {
b.checkErrorLevel()
}
func (b *CmdWriter) Indent() {
b.indent++
}
func (b *CmdWriter) Unindent() {
b.indent--
}
func (b *CmdWriter) checkErrorLevel() {
errCheck := "IF !errorlevel! NEQ 0 exit /b !errorlevel!"
b.Line(b.updateErrLevelCheck(errCheck))
b.Line("")
}
func (b *CmdWriter) updateErrLevelCheck(errCheck string) string {
if b.disableDelayedErrorLevelExpansion {
return strings.Replace(errCheck, "!", "%", -1)
}
return errCheck
}
func (b *CmdWriter) Command(command string, arguments ...string) {
b.Line(b.buildCommand(command, arguments...))
b.checkErrorLevel()
}
func (b *CmdWriter) buildCommand(command string, arguments ...string) string {
list := []string{
batchQuote(command),
}
for _, argument := range arguments {
list = append(list, batchQuote(argument))
}
return strings.Join(list, " ")
}
func (b *CmdWriter) TmpFile(name string) string {
filePath := b.Absolute(path.Join(b.TemporaryPath, name))
return helpers.ToBackslash(filePath)
}
func (b *CmdWriter) EnvVariableKey(name string) string {
return fmt.Sprintf("%%%s%%", name)
}
func (b *CmdWriter) Variable(variable common.JobVariable) {
if variable.File {
variableFile := b.TmpFile(variable.Key)
b.Line(fmt.Sprintf("md %q 2>NUL 1>NUL", batchEscape(helpers.ToBackslash(b.TemporaryPath))))
b.Line(fmt.Sprintf("echo %s > %s", batchEscapeVariable(variable.Value), batchEscape(variableFile)))
b.Line("SET " + batchEscapeVariable(variable.Key) + "=" + batchEscape(variableFile))
} else {
b.Line("SET " + batchEscapeVariable(variable.Key) + "=" + batchEscapeVariable(variable.Value))
}
}
func (b *CmdWriter) IfDirectory(path string) {
b.Line("IF EXIST " + batchQuote(helpers.ToBackslash(path)) + " (")
b.Indent()
}
func (b *CmdWriter) IfFile(path string) {
b.Line("IF EXIST " + batchQuote(helpers.ToBackslash(path)) + " (")
b.Indent()
}
func (b *CmdWriter) IfCmd(cmd string, arguments ...string) {
cmdline := b.buildCommand(cmd, arguments...)
b.Line(fmt.Sprintf("%s 2>NUL 1>NUL", cmdline))
errCheck := "IF !errorlevel! EQU 0 ("
b.Line(b.updateErrLevelCheck(errCheck))
b.Indent()
}
func (b *CmdWriter) IfCmdWithOutput(cmd string, arguments ...string) {
cmdline := b.buildCommand(cmd, arguments...)
b.Line(cmdline)
errCheck := "IF !errorlevel! EQU 0 ("
b.Line(b.updateErrLevelCheck(errCheck))
b.Indent()
}
func (b *CmdWriter) Else() {
b.Unindent()
b.Line(") ELSE (")
b.Indent()
}
func (b *CmdWriter) EndIf() {
b.Unindent()
b.Line(")")
}
func (b *CmdWriter) Cd(path string) {
b.Line("cd /D " + batchQuote(helpers.ToBackslash(path)))
b.checkErrorLevel()
}
func (b *CmdWriter) MkDir(path string) {
args := batchQuote(helpers.ToBackslash(path)) + " 2>NUL 1>NUL"
b.Line("dir " + args + " || md " + args)
}
func (b *CmdWriter) MkTmpDir(name string) string {
path := helpers.ToBackslash(path.Join(b.TemporaryPath, name))
b.MkDir(path)
return path
}
func (b *CmdWriter) RmDir(path string) {
b.Line("rd /s /q " + batchQuote(helpers.ToBackslash(path)) + " 2>NUL 1>NUL")
}
func (b *CmdWriter) RmFile(path string) {
b.Line("del /f /q " + batchQuote(helpers.ToBackslash(path)) + " 2>NUL 1>NUL")
}
func (b *CmdWriter) Print(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + batchEscapeVariable(coloredText))
}
func (b *CmdWriter) Notice(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_GREEN + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + batchEscapeVariable(coloredText))
}
func (b *CmdWriter) Warning(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_YELLOW + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + batchEscapeVariable(coloredText))
}
func (b *CmdWriter) Error(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_RED + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + batchEscapeVariable(coloredText))
}
func (b *CmdWriter) EmptyLine() {
b.Line("echo.")
}
func (b *CmdWriter) Absolute(dir string) string {
if filepath.IsAbs(dir) {
return dir
}
return filepath.Join("%CD%", dir)
}
func (b *CmdWriter) Finish(trace bool) string {
var buffer bytes.Buffer
w := bufio.NewWriter(&buffer)
if trace {
io.WriteString(w, "@echo on\r\n")
} else {
io.WriteString(w, "@echo off\r\n")
}
io.WriteString(w, "setlocal enableextensions\r\n")
io.WriteString(w, "setlocal enableDelayedExpansion\r\n")
io.WriteString(w, "set nl=^\r\n\r\n\r\n")
io.WriteString(w, b.String())
w.Flush()
return buffer.String()
}
func (b *CmdShell) GetConfiguration(info common.ShellScriptInfo) (script *common.ShellConfiguration, err error) {
script = &common.ShellConfiguration{
Command: "cmd",
Arguments: []string{"/C"},
PassFile: true,
Extension: "cmd",
}
return
}
func (b *CmdShell) GenerateScript(buildStage common.BuildStage, info common.ShellScriptInfo) (script string, err error) {
w := &CmdWriter{
TemporaryPath: info.Build.TmpProjectDir(),
disableDelayedErrorLevelExpansion: info.Build.IsFeatureFlagOn(featureflags.CmdDisableDelayedErrorLevelExpansion),
}
if buildStage == common.BuildStagePrepare {
if len(info.Build.Hostname) != 0 {
w.Line("echo Running on %COMPUTERNAME% via " + batchEscape(info.Build.Hostname) + "...")
} else {
w.Line("echo Running on %COMPUTERNAME%...")
}
w.Warning("DEPRECATION: CMD shell is deprecated and will be removed in 13.0: https://gitlab.com/gitlab-org/gitlab-runner/issues/4163")
}
err = b.writeScript(w, buildStage, info)
script = w.Finish(info.Build.IsDebugTraceEnabled())
return
}
func (b *CmdShell) IsDefault() bool {
// TODO: Remove in 13.0 - Make PowerShell default shell for Windows.
return runtime.GOOS == "windows"
}
func init() {
common.RegisterShell(&CmdShell{})
}
package shells
import (
"bufio"
"bytes"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"gitlab.com/gitlab-org/gitlab-runner/common"
"gitlab.com/gitlab-org/gitlab-runner/helpers"
)
const dockerWindowsExecutor = "docker-windows"
type PowerShell struct {
AbstractShell
}
type PsWriter struct {
bytes.Buffer
TemporaryPath string
indent int
}
func psQuote(text string) string {
// taken from: http://www.robvanderwoude.com/escapechars.php
text = strings.Replace(text, "`", "``", -1)
// text = strings.Replace(text, "\0", "`0", -1)
text = strings.Replace(text, "\a", "`a", -1)
text = strings.Replace(text, "\b", "`b", -1)
text = strings.Replace(text, "\f", "^f", -1)
text = strings.Replace(text, "\r", "`r", -1)
text = strings.Replace(text, "\n", "`n", -1)
text = strings.Replace(text, "\t", "^t", -1)
text = strings.Replace(text, "\v", "^v", -1)
text = strings.Replace(text, "#", "`#", -1)
text = strings.Replace(text, "'", "`'", -1)
text = strings.Replace(text, "\"", "`\"", -1)
return "\"" + text + "\""
}
func psQuoteVariable(text string) string {
text = psQuote(text)
text = strings.Replace(text, "$", "`$", -1)
return text
}
func (b *PsWriter) GetTemporaryPath() string {
return b.TemporaryPath
}
func (b *PsWriter) Line(text string) {
b.WriteString(strings.Repeat(" ", b.indent) + text + "\r\n")
}
func (b *PsWriter) CheckForErrors() {
b.checkErrorLevel()
}
func (b *PsWriter) Indent() {
b.indent++
}
func (b *PsWriter) Unindent() {
b.indent--
}
func (b *PsWriter) checkErrorLevel() {
b.Line("if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }")
b.Line("")
}
func (b *PsWriter) Command(command string, arguments ...string) {
b.Line(b.buildCommand(command, arguments...))
b.checkErrorLevel()
}
func (b *PsWriter) buildCommand(command string, arguments ...string) string {
list := []string{
psQuote(command),
}
for _, argument := range arguments {
list = append(list, psQuote(argument))
}
return "& " + strings.Join(list, " ")
}
func (b *PsWriter) TmpFile(name string) string {
filePath := b.Absolute(path.Join(b.TemporaryPath, name))
return helpers.ToBackslash(filePath)
}
func (b *PsWriter) EnvVariableKey(name string) string {
return fmt.Sprintf("$%s", name)
}
func (b *PsWriter) Variable(variable common.JobVariable) {
if variable.File {
variableFile := b.TmpFile(variable.Key)
b.Line(fmt.Sprintf("New-Item -ItemType directory -Force -Path %s | out-null", psQuote(helpers.ToBackslash(b.TemporaryPath))))
b.Line(fmt.Sprintf("Set-Content %s -Value %s -Encoding UTF8 -Force", psQuote(variableFile), psQuoteVariable(variable.Value)))
b.Line("$" + variable.Key + "=" + psQuote(variableFile))
} else {
b.Line("$" + variable.Key + "=" + psQuoteVariable(variable.Value))
}
b.Line("$env:" + variable.Key + "=$" + variable.Key)
}
func (b *PsWriter) IfDirectory(path string) {
b.Line("if(Test-Path " + psQuote(helpers.ToBackslash(path)) + " -PathType Container) {")
b.Indent()
}
func (b *PsWriter) IfFile(path string) {
b.Line("if(Test-Path " + psQuote(helpers.ToBackslash(path)) + " -PathType Leaf) {")
b.Indent()
}
func (b *PsWriter) IfCmd(cmd string, arguments ...string) {
b.ifInTryCatch(b.buildCommand(cmd, arguments...) + " 2>$null")
}
func (b *PsWriter) IfCmdWithOutput(cmd string, arguments ...string) {
b.ifInTryCatch(b.buildCommand(cmd, arguments...))
}
func (b *PsWriter) ifInTryCatch(cmd string) {
b.Line("Set-Variable -Name cmdErr -Value $false")
b.Line("Try {")
b.Indent()
b.Line(cmd)
b.Line("if(!$?) { throw &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }")
b.Unindent()
b.Line("} Catch {")
b.Indent()
b.Line("Set-Variable -Name cmdErr -Value $true")
b.Unindent()
b.Line("}")
b.Line("if(!$cmdErr) {")
b.Indent()
}
func (b *PsWriter) Else() {
b.Unindent()
b.Line("} else {")
b.Indent()
}
func (b *PsWriter) EndIf() {
b.Unindent()
b.Line("}")
}
func (b *PsWriter) Cd(path string) {
b.Line("cd " + psQuote(helpers.ToBackslash(path)))
b.checkErrorLevel()
}
func (b *PsWriter) MkDir(path string) {
b.Line(fmt.Sprintf("New-Item -ItemType directory -Force -Path %s | out-null", psQuote(helpers.ToBackslash(path))))
}
func (b *PsWriter) MkTmpDir(name string) string {
path := helpers.ToBackslash(path.Join(b.TemporaryPath, name))
b.MkDir(path)
return path
}
func (b *PsWriter) RmDir(path string) {
path = psQuote(helpers.ToBackslash(path))
b.Line("if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path " + path + " -PathType Container) ) {")
b.Indent()
b.Line("Remove-Item2 -Force -Recurse " + path)
b.Unindent()
b.Line("} elseif(Test-Path " + path + ") {")
b.Indent()
b.Line("Remove-Item -Force -Recurse " + path)
b.Unindent()
b.Line("}")
b.Line("")
}
func (b *PsWriter) RmFile(path string) {
path = psQuote(helpers.ToBackslash(path))
b.Line("if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path " + path + " -PathType Leaf) ) {")
b.Indent()
b.Line("Remove-Item2 -Force " + path)
b.Unindent()
b.Line("} elseif(Test-Path " + path + ") {")
b.Indent()
b.Line("Remove-Item -Force " + path)
b.Unindent()
b.Line("}")
b.Line("")
}
func (b *PsWriter) Print(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...)
b.Line("echo " + psQuoteVariable(coloredText))
}
func (b *PsWriter) Notice(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_GREEN + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + psQuoteVariable(coloredText))
}
func (b *PsWriter) Warning(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_YELLOW + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + psQuoteVariable(coloredText))
}
func (b *PsWriter) Error(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_BOLD_RED + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + psQuoteVariable(coloredText))
}
func (b *PsWriter) EmptyLine() {
b.Line("echo \"\"")
}
func (b *PsWriter) Absolute(dir string) string {
if filepath.IsAbs(dir) {
return dir
}
b.Line("$CurrentDirectory = (Resolve-Path .\\).Path")
return filepath.Join("$CurrentDirectory", dir)
}
func (b *PsWriter) Finish(trace bool) string {
var buffer bytes.Buffer
w := bufio.NewWriter(&buffer)
// write BOM
io.WriteString(w, "\xef\xbb\xbf")
if trace {
io.WriteString(w, "Set-PSDebug -Trace 2\r\n")
}
// add empty line to close code-block when it is piped to STDIN
b.Line("")
io.WriteString(w, b.String())
w.Flush()
return buffer.String()
}
func (b *PowerShell) GetName() string {
return "powershell"
}
func (b *PowerShell) GetConfiguration(info common.ShellScriptInfo) (script *common.ShellConfiguration, err error) {
script = &common.ShellConfiguration{
Command: "powershell",
Arguments: []string{"-noprofile", "-noninteractive", "-executionpolicy", "Bypass", "-command"},
PassFile: info.Build.Runner.Executor != dockerWindowsExecutor,
Extension: "ps1",
DockerCommand: []string{"PowerShell", "-NoProfile", "-NoLogo", "-InputFormat", "text", "-OutputFormat", "text", "-NonInteractive", "-ExecutionPolicy", "Bypass", "-Command", "-"},
}
return
}
func (b *PowerShell) GenerateScript(buildStage common.BuildStage, info common.ShellScriptInfo) (script string, err error) {
w := &PsWriter{
TemporaryPath: info.Build.TmpProjectDir(),
}
if buildStage == common.BuildStagePrepare {
if len(info.Build.Hostname) != 0 {
w.Line("echo \"Running on $env:computername via " + psQuoteVariable(info.Build.Hostname) + "...\"")
} else {
w.Line("echo \"Running on $env:computername...\"")
}
}
err = b.writeScript(w, buildStage, info)
// No need to set up BOM or tracing since no script was generated.
if w.Buffer.Len() > 0 {
script = w.Finish(info.Build.IsDebugTraceEnabled())
}
return
}
func (b *PowerShell) IsDefault() bool {
return false
}
func init() {
common.RegisterShell(&PowerShell{})
}