forked from barak/tarpoon
Add glide.yaml and vendor deps
This commit is contained in:
parent
db918f12ad
commit
5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions
200
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go
generated
vendored
Normal file
200
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go
generated
vendored
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package app implements a Server object for running the scheduler.
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/healthz"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// NewSchedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewSchedulerCommand() *cobra.Command {
|
||||
s := options.NewSchedulerServer()
|
||||
s.AddFlags(pflag.CommandLine)
|
||||
cmd := &cobra.Command{
|
||||
Use: "kube-scheduler",
|
||||
Long: `The Kubernetes scheduler is a policy-rich, topology-aware,
|
||||
workload-specific function that significantly impacts availability, performance,
|
||||
and capacity. The scheduler needs to take into account individual and collective
|
||||
resource requirements, quality of service requirements, hardware/software/policy
|
||||
constraints, affinity and anti-affinity specifications, data locality, inter-workload
|
||||
interference, deadlines, and so on. Workload-specific requirements will be exposed
|
||||
through the API as necessary.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Run runs the specified SchedulerServer. This should never exit.
|
||||
func Run(s *options.SchedulerServer) error {
|
||||
if c, err := configz.New("componentconfig"); err == nil {
|
||||
c.Set(s.KubeSchedulerConfiguration)
|
||||
} else {
|
||||
glog.Errorf("unable to register configz: %s", err)
|
||||
}
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to build config from flags: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
kubeconfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
if s.EnableProfiling {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
if s.EnableContentionProfiling {
|
||||
goruntime.SetBlockProfileRate(1)
|
||||
}
|
||||
}
|
||||
configz.InstallHandler(mux)
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
}()
|
||||
|
||||
configFactory := factory.NewConfigFactory(leaderElectionClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
|
||||
config, err := createConfig(s, configFactory)
|
||||
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create scheduler configuration: %v", err)
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")})
|
||||
|
||||
sched := scheduler.New(config)
|
||||
|
||||
run := func(_ <-chan struct{}) {
|
||||
sched.Run()
|
||||
select {}
|
||||
}
|
||||
|
||||
if !s.LeaderElection.LeaderElect {
|
||||
run(nil)
|
||||
glog.Fatal("this statement is unreachable")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
id, err := os.Hostname()
|
||||
if err != nil {
|
||||
glog.Errorf("unable to get hostname: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: enable other lock types
|
||||
rl := resourcelock.EndpointsLock{
|
||||
EndpointsMeta: v1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "kube-scheduler",
|
||||
},
|
||||
Client: leaderElectionClient,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: config.Recorder,
|
||||
},
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
||||
Lock: &rl,
|
||||
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: run,
|
||||
OnStoppedLeading: func() {
|
||||
glog.Fatalf("lost master")
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
glog.Fatal("this statement is unreachable")
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func createConfig(s *options.SchedulerServer, configFactory *factory.ConfigFactory) (*scheduler.Config, error) {
|
||||
if _, err := os.Stat(s.PolicyConfigFile); err == nil {
|
||||
var (
|
||||
policy schedulerapi.Policy
|
||||
configData []byte
|
||||
)
|
||||
configData, err := ioutil.ReadFile(s.PolicyConfigFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read policy config: %v", err)
|
||||
}
|
||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||
return nil, fmt.Errorf("invalid configuration: %v", err)
|
||||
}
|
||||
return configFactory.CreateFromConfig(policy)
|
||||
}
|
||||
|
||||
// if the config file isn't provided, use the specified (or default) provider
|
||||
return configFactory.CreateFromProvider(s.AlgorithmProvider)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue