1
0
Fork 0
forked from barak/tarpoon

Add glide.yaml and vendor deps

This commit is contained in:
Dalton Hubble 2016-12-03 22:43:32 -08:00
parent db918f12ad
commit 5b3d5e81bd
18880 changed files with 5166045 additions and 1 deletions

55
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD generated vendored Normal file
View file

@ -0,0 +1,55 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"glusterfs.go",
"glusterfs_util.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/errors:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/heketi/heketi/client/api/go-client",
"//vendor:github.com/heketi/heketi/pkg/glusterfs/api",
],
)
go_test(
name = "go_default_test",
srcs = ["glusterfs_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/release_1_5/fake:go_default_library",
"//pkg/client/testing/core:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/types:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/testing:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
],
)

2
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS generated vendored Normal file
View file

@ -0,0 +1,2 @@
assignees:
- rootfs

19
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/doc.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package glusterfs contains the internal representation of glusterfs
// volumes.
package glusterfs // import "k8s.io/kubernetes/pkg/volume/glusterfs"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,373 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"fmt"
"os"
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), v1.ReadWriteOnce) || !contains(plug.GetAccessModes(), v1.ReadOnlyMany) || !contains(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
ep := &v1.Endpoints{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
var fcmd exec.FakeCmd
fcmd = exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// mount
func() ([]byte, error) {
return []byte{}, nil
},
},
}
fake := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}, &fake)
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir)
if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestParseClassParameters(t *testing.T) {
secret := v1.Secret{
Type: "kubernetes.io/glusterfs",
Data: map[string][]byte{
"data": []byte("mypassword"),
},
}
tests := []struct {
name string
parameters map[string]string
secret *v1.Secret
expectError bool
expectConfig *provisioningConfig
}{
{
"password",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
false, // expect error
&provisioningConfig{
url: "https://localhost:8080",
user: "admin",
userKey: "password",
secretValue: "password",
},
},
{
"secret",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"secretname": "mysecret",
"secretnamespace": "default",
},
&secret,
false, // expect error
&provisioningConfig{
url: "https://localhost:8080",
user: "admin",
secretName: "mysecret",
secretNamespace: "default",
secretValue: "mypassword",
},
},
{
"no authentication",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
},
&secret,
false, // expect error
&provisioningConfig{
url: "https://localhost:8080",
},
},
{
"missing secret",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
"secretnamespace": "default",
},
nil, // secret
true, // expect error
nil,
},
{
"secret with no namespace",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
},
&secret,
true, // expect error
nil,
},
{
"missing url",
map[string]string{
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"unknown parameter",
map[string]string{
"unknown": "yes",
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
}
for _, test := range tests {
client := &fake.Clientset{}
client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if test.secret != nil {
return true, test.secret, nil
}
return true, nil, fmt.Errorf("Test %s did not set a secret", test.name)
})
cfg, err := parseClassParameters(test.parameters, client)
if err != nil && !test.expectError {
t.Errorf("Test %s got unexpected error %v", test.name, err)
}
if err == nil && test.expectError {
t.Errorf("test %s expected error and got none", test.name)
}
if test.expectConfig != nil {
if !reflect.DeepEqual(cfg, test.expectConfig) {
t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg)
}
}
}
}

View file

@ -0,0 +1,71 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"bufio"
"fmt"
"os"
"github.com/golang/glog"
)
// readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can
// properly expose them
// return nil on any failure
func readGlusterLog(path string, podName string) error {
var line1 string
var line2 string
linecount := 0
glog.Infof("glusterfs: failure, now attempting to read the gluster log for pod %s", podName)
// Check and make sure path exists
if len(path) == 0 {
return fmt.Errorf("glusterfs: log file does not exist for pod: %s", podName)
}
// open the log file
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("glusterfs: could not open log file for pod: %s", podName)
}
defer file.Close()
// read in and scan the file using scanner
// from stdlib
fscan := bufio.NewScanner(file)
// rather than guessing on bytes or using Seek
// going to scan entire file and take the last two lines
// generally the file should be small since it is pod specific
for fscan.Scan() {
if linecount > 0 {
line1 = line2
}
line2 = "\n" + fscan.Text()
linecount++
}
if linecount > 0 {
return fmt.Errorf("%v", line1+line2+"\n")
}
return nil
}