Remove non-documentation lint

Because of extensive nature of changes, tested all three non-memstore
backends - passed.
This commit is contained in:
kortschak 2014-08-28 11:51:39 +09:30
parent 6614466d23
commit 484bf145a8
35 changed files with 277 additions and 284 deletions

View file

@ -103,7 +103,7 @@ func (it *AllIterator) Next() bool {
} else {
k, _ := cur.Seek(last)
if !bytes.Equal(k, last) {
return fmt.Errorf("Couldn't pick up after", k)
return fmt.Errorf("could not pick up after", k)
}
}
for i < bufferSize {

View file

@ -32,7 +32,7 @@ import (
var (
boltType graph.Type
bufferSize = 50
errNotExist = errors.New("Quad does not exist")
errNotExist = errors.New("quad does not exist")
)
func init() {
@ -43,7 +43,7 @@ type Iterator struct {
uid uint64
tags graph.Tagger
bucket []byte
checkId []byte
checkID []byte
dir quad.Direction
qs *QuadStore
result *Token
@ -56,7 +56,7 @@ type Iterator struct {
func NewIterator(bucket []byte, d quad.Direction, value graph.Value, qs *QuadStore) *Iterator {
tok := value.(*Token)
if !bytes.Equal(tok.bucket, nodeBucket) {
glog.Error("Creating an iterator from a non-node value.")
glog.Error("creating an iterator from a non-node value")
return &Iterator{done: true}
}
@ -68,8 +68,8 @@ func NewIterator(bucket []byte, d quad.Direction, value graph.Value, qs *QuadSto
size: qs.SizeOf(value),
}
it.checkId = make([]byte, len(tok.key))
copy(it.checkId, tok.key)
it.checkID = make([]byte, len(tok.key))
copy(it.checkID, tok.key)
return &it
}
@ -101,7 +101,7 @@ func (it *Iterator) TagResults(dst map[string]graph.Value) {
}
func (it *Iterator) Clone() graph.Iterator {
out := NewIterator(it.bucket, it.dir, &Token{nodeBucket, it.checkId}, it.qs)
out := NewIterator(it.bucket, it.dir, &Token{nodeBucket, it.checkID}, it.qs)
out.Tagger().CopyFrom(it)
return out
}
@ -134,8 +134,8 @@ func (it *Iterator) Next() bool {
b := tx.Bucket(it.bucket)
cur := b.Cursor()
if last == nil {
k, _ := cur.Seek(it.checkId)
if bytes.HasPrefix(k, it.checkId) {
k, _ := cur.Seek(it.checkID)
if bytes.HasPrefix(k, it.checkID) {
var out []byte
out = make([]byte, len(k))
copy(out, k)
@ -148,12 +148,12 @@ func (it *Iterator) Next() bool {
} else {
k, _ := cur.Seek(last)
if !bytes.Equal(k, last) {
return fmt.Errorf("Couldn't pick up after", k)
return fmt.Errorf("could not pick up after", k)
}
}
for i < bufferSize {
k, v := cur.Next()
if k == nil || !bytes.HasPrefix(k, it.checkId) {
if k == nil || !bytes.HasPrefix(k, it.checkID) {
it.buffer = append(it.buffer, nil)
break
}
@ -170,7 +170,7 @@ func (it *Iterator) Next() bool {
})
if err != nil {
if err != errNotExist {
glog.Error("Error nexting in database: ", err)
glog.Errorf("Error nexting in database: %v", err)
}
it.done = true
return false
@ -272,7 +272,7 @@ func (it *Iterator) Contains(v graph.Value) bool {
return false
}
offset := PositionOf(val, it.dir, it.qs)
if bytes.HasPrefix(val.key[offset:], it.checkId) {
if bytes.HasPrefix(val.key[offset:], it.checkID) {
// You may ask, why don't we check to see if it's a valid (not deleted) quad
// again?
//
@ -299,7 +299,7 @@ func (it *Iterator) DebugString(indent int) string {
it.tags.Tags(),
it.dir,
it.size,
it.qs.NameOf(&Token{it.bucket, it.checkId}),
it.qs.NameOf(&Token{it.bucket, it.checkID}),
)
}

View file

@ -101,20 +101,20 @@ func (qs *QuadStore) createBuckets() error {
for _, index := range [][4]quad.Direction{spo, osp, pos, cps} {
_, err = tx.CreateBucket(bucketFor(index))
if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err)
return fmt.Errorf("could not create bucket: %s", err)
}
}
_, err = tx.CreateBucket(logBucket)
if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err)
return fmt.Errorf("could not create bucket: %s", err)
}
_, err = tx.CreateBucket(nodeBucket)
if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err)
return fmt.Errorf("could not create bucket: %s", err)
}
_, err = tx.CreateBucket(metaBucket)
if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err)
return fmt.Errorf("could not create bucket: %s", err)
}
return nil
})
@ -183,13 +183,13 @@ var (
)
func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
old_size := qs.size
old_horizon := qs.horizon
oldSize := qs.size
oldHorizon := qs.horizon
err := qs.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(logBucket)
b.FillPercent = localFillPercent
resizeMap := make(map[string]int64)
size_change := int64(0)
sizeChange := int64(0)
for _, d := range deltas {
bytes, err := json.Marshal(d)
if err != nil {
@ -215,7 +215,7 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
if d.Quad.Label != "" {
resizeMap[d.Quad.Label] += delta
}
size_change += delta
sizeChange += delta
qs.horizon = d.ID
}
for k, v := range resizeMap {
@ -226,14 +226,14 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
}
}
}
qs.size += size_change
qs.size += sizeChange
return qs.WriteHorizonAndSize(tx)
})
if err != nil {
glog.Error("Couldn't write to DB for Delta set. Error: ", err)
qs.horizon = old_horizon
qs.size = old_size
qs.horizon = oldHorizon
qs.size = oldSize
return err
}
return nil