Remove non-documentation lint

Because of extensive nature of changes, tested all three non-memstore
backends - passed.
This commit is contained in:
kortschak 2014-08-28 11:51:39 +09:30
parent 6614466d23
commit 484bf145a8
35 changed files with 277 additions and 284 deletions

View file

@ -67,10 +67,10 @@ var (
timeout = flag.Duration("timeout", 30*time.Second, "Elapsed time until an individual query times out.") timeout = flag.Duration("timeout", 30*time.Second, "Elapsed time until an individual query times out.")
) )
// Filled in by `go build ldflags="-X main.VERSION `ver`"`. // Filled in by `go build ldflags="-X main.Version `ver`"`.
var ( var (
BUILD_DATE string BuildDate string
VERSION string Version string
) )
func usage() { func usage() {
@ -158,8 +158,8 @@ func main() {
flag.Parse() flag.Parse()
var buildString string var buildString string
if VERSION != "" { if Version != "" {
buildString = fmt.Sprint("Cayley ", VERSION, " built ", BUILD_DATE) buildString = fmt.Sprint("Cayley ", Version, " built ", BuildDate)
glog.Infoln(buildString) glog.Infoln(buildString)
} }
@ -178,7 +178,7 @@ func main() {
) )
switch cmd { switch cmd {
case "version": case "version":
if VERSION != "" { if Version != "" {
fmt.Println(buildString) fmt.Println(buildString)
} else { } else {
fmt.Println("Cayley snapshot") fmt.Println("Cayley snapshot")

View file

@ -489,8 +489,8 @@ func checkQueries(t *testing.T) {
timedOut bool timedOut bool
) )
for r := range c { for r := range c {
ses.BuildJson(r) ses.BuildJSON(r)
j, err := ses.GetJson() j, err := ses.GetJSON()
if j == nil && err == nil { if j == nil && err == nil {
continue continue
} }

View file

@ -103,7 +103,7 @@ func (it *AllIterator) Next() bool {
} else { } else {
k, _ := cur.Seek(last) k, _ := cur.Seek(last)
if !bytes.Equal(k, last) { if !bytes.Equal(k, last) {
return fmt.Errorf("Couldn't pick up after", k) return fmt.Errorf("could not pick up after", k)
} }
} }
for i < bufferSize { for i < bufferSize {

View file

@ -32,7 +32,7 @@ import (
var ( var (
boltType graph.Type boltType graph.Type
bufferSize = 50 bufferSize = 50
errNotExist = errors.New("Quad does not exist") errNotExist = errors.New("quad does not exist")
) )
func init() { func init() {
@ -43,7 +43,7 @@ type Iterator struct {
uid uint64 uid uint64
tags graph.Tagger tags graph.Tagger
bucket []byte bucket []byte
checkId []byte checkID []byte
dir quad.Direction dir quad.Direction
qs *QuadStore qs *QuadStore
result *Token result *Token
@ -56,7 +56,7 @@ type Iterator struct {
func NewIterator(bucket []byte, d quad.Direction, value graph.Value, qs *QuadStore) *Iterator { func NewIterator(bucket []byte, d quad.Direction, value graph.Value, qs *QuadStore) *Iterator {
tok := value.(*Token) tok := value.(*Token)
if !bytes.Equal(tok.bucket, nodeBucket) { if !bytes.Equal(tok.bucket, nodeBucket) {
glog.Error("Creating an iterator from a non-node value.") glog.Error("creating an iterator from a non-node value")
return &Iterator{done: true} return &Iterator{done: true}
} }
@ -68,8 +68,8 @@ func NewIterator(bucket []byte, d quad.Direction, value graph.Value, qs *QuadSto
size: qs.SizeOf(value), size: qs.SizeOf(value),
} }
it.checkId = make([]byte, len(tok.key)) it.checkID = make([]byte, len(tok.key))
copy(it.checkId, tok.key) copy(it.checkID, tok.key)
return &it return &it
} }
@ -101,7 +101,7 @@ func (it *Iterator) TagResults(dst map[string]graph.Value) {
} }
func (it *Iterator) Clone() graph.Iterator { func (it *Iterator) Clone() graph.Iterator {
out := NewIterator(it.bucket, it.dir, &Token{nodeBucket, it.checkId}, it.qs) out := NewIterator(it.bucket, it.dir, &Token{nodeBucket, it.checkID}, it.qs)
out.Tagger().CopyFrom(it) out.Tagger().CopyFrom(it)
return out return out
} }
@ -134,8 +134,8 @@ func (it *Iterator) Next() bool {
b := tx.Bucket(it.bucket) b := tx.Bucket(it.bucket)
cur := b.Cursor() cur := b.Cursor()
if last == nil { if last == nil {
k, _ := cur.Seek(it.checkId) k, _ := cur.Seek(it.checkID)
if bytes.HasPrefix(k, it.checkId) { if bytes.HasPrefix(k, it.checkID) {
var out []byte var out []byte
out = make([]byte, len(k)) out = make([]byte, len(k))
copy(out, k) copy(out, k)
@ -148,12 +148,12 @@ func (it *Iterator) Next() bool {
} else { } else {
k, _ := cur.Seek(last) k, _ := cur.Seek(last)
if !bytes.Equal(k, last) { if !bytes.Equal(k, last) {
return fmt.Errorf("Couldn't pick up after", k) return fmt.Errorf("could not pick up after", k)
} }
} }
for i < bufferSize { for i < bufferSize {
k, v := cur.Next() k, v := cur.Next()
if k == nil || !bytes.HasPrefix(k, it.checkId) { if k == nil || !bytes.HasPrefix(k, it.checkID) {
it.buffer = append(it.buffer, nil) it.buffer = append(it.buffer, nil)
break break
} }
@ -170,7 +170,7 @@ func (it *Iterator) Next() bool {
}) })
if err != nil { if err != nil {
if err != errNotExist { if err != errNotExist {
glog.Error("Error nexting in database: ", err) glog.Errorf("Error nexting in database: %v", err)
} }
it.done = true it.done = true
return false return false
@ -272,7 +272,7 @@ func (it *Iterator) Contains(v graph.Value) bool {
return false return false
} }
offset := PositionOf(val, it.dir, it.qs) offset := PositionOf(val, it.dir, it.qs)
if bytes.HasPrefix(val.key[offset:], it.checkId) { if bytes.HasPrefix(val.key[offset:], it.checkID) {
// You may ask, why don't we check to see if it's a valid (not deleted) quad // You may ask, why don't we check to see if it's a valid (not deleted) quad
// again? // again?
// //
@ -299,7 +299,7 @@ func (it *Iterator) DebugString(indent int) string {
it.tags.Tags(), it.tags.Tags(),
it.dir, it.dir,
it.size, it.size,
it.qs.NameOf(&Token{it.bucket, it.checkId}), it.qs.NameOf(&Token{it.bucket, it.checkID}),
) )
} }

View file

@ -101,20 +101,20 @@ func (qs *QuadStore) createBuckets() error {
for _, index := range [][4]quad.Direction{spo, osp, pos, cps} { for _, index := range [][4]quad.Direction{spo, osp, pos, cps} {
_, err = tx.CreateBucket(bucketFor(index)) _, err = tx.CreateBucket(bucketFor(index))
if err != nil { if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err) return fmt.Errorf("could not create bucket: %s", err)
} }
} }
_, err = tx.CreateBucket(logBucket) _, err = tx.CreateBucket(logBucket)
if err != nil { if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err) return fmt.Errorf("could not create bucket: %s", err)
} }
_, err = tx.CreateBucket(nodeBucket) _, err = tx.CreateBucket(nodeBucket)
if err != nil { if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err) return fmt.Errorf("could not create bucket: %s", err)
} }
_, err = tx.CreateBucket(metaBucket) _, err = tx.CreateBucket(metaBucket)
if err != nil { if err != nil {
return fmt.Errorf("Couldn't create bucket: %s", err) return fmt.Errorf("could not create bucket: %s", err)
} }
return nil return nil
}) })
@ -183,13 +183,13 @@ var (
) )
func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error { func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
old_size := qs.size oldSize := qs.size
old_horizon := qs.horizon oldHorizon := qs.horizon
err := qs.db.Update(func(tx *bolt.Tx) error { err := qs.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(logBucket) b := tx.Bucket(logBucket)
b.FillPercent = localFillPercent b.FillPercent = localFillPercent
resizeMap := make(map[string]int64) resizeMap := make(map[string]int64)
size_change := int64(0) sizeChange := int64(0)
for _, d := range deltas { for _, d := range deltas {
bytes, err := json.Marshal(d) bytes, err := json.Marshal(d)
if err != nil { if err != nil {
@ -215,7 +215,7 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
if d.Quad.Label != "" { if d.Quad.Label != "" {
resizeMap[d.Quad.Label] += delta resizeMap[d.Quad.Label] += delta
} }
size_change += delta sizeChange += delta
qs.horizon = d.ID qs.horizon = d.ID
} }
for k, v := range resizeMap { for k, v := range resizeMap {
@ -226,14 +226,14 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
} }
} }
} }
qs.size += size_change qs.size += sizeChange
return qs.WriteHorizonAndSize(tx) return qs.WriteHorizonAndSize(tx)
}) })
if err != nil { if err != nil {
glog.Error("Couldn't write to DB for Delta set. Error: ", err) glog.Error("Couldn't write to DB for Delta set. Error: ", err)
qs.horizon = old_horizon qs.horizon = oldHorizon
qs.size = old_size qs.size = oldSize
return err return err
} }
return nil return nil

View file

@ -258,7 +258,7 @@ func (t Type) String() string {
type StatsContainer struct { type StatsContainer struct {
IteratorStats IteratorStats
Kind string Kind string
Uid uint64 UID uint64
SubIts []StatsContainer SubIts []StatsContainer
} }
@ -266,7 +266,7 @@ func DumpStats(it Iterator) StatsContainer {
var out StatsContainer var out StatsContainer
out.IteratorStats = it.Stats() out.IteratorStats = it.Stats()
out.Kind = it.Type().String() out.Kind = it.Type().String()
out.Uid = it.UID() out.UID = it.UID()
for _, sub := range it.SubIterators() { for _, sub := range it.SubIterators() {
out.SubIts = append(out.SubIts, DumpStats(sub)) out.SubIts = append(out.SubIts, DumpStats(sub))
} }

View file

@ -110,7 +110,7 @@ func closeIteratorList(its []graph.Iterator, except graph.Iterator) {
// Find if there is a single subiterator which is a valid replacement for this // Find if there is a single subiterator which is a valid replacement for this
// And. // And.
func (_ *And) optimizeReplacement(its []graph.Iterator) graph.Iterator { func (*And) optimizeReplacement(its []graph.Iterator) graph.Iterator {
// If we were created with no SubIterators, we're as good as Null. // If we were created with no SubIterators, we're as good as Null.
if len(its) == 0 { if len(its) == 0 {
return &Null{} return &Null{}

View file

@ -20,7 +20,7 @@ import (
) )
type Node struct { type Node struct {
Id int `json:"id"` ID int `json:"id"`
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitempty"`
Values []string `json:"values,omitempty"` Values []string `json:"values,omitempty"`
IsLinkNode bool `json:"is_link_node"` IsLinkNode bool `json:"is_link_node"`
@ -38,15 +38,15 @@ type queryShape struct {
nodes []Node nodes []Node
links []Link links []Link
qs graph.QuadStore qs graph.QuadStore
nodeId int nodeID int
hasaIds []int hasaIDs []int
hasaDirs []quad.Direction hasaDirs []quad.Direction
} }
func OutputQueryShapeForIterator(it graph.Iterator, qs graph.QuadStore, outputMap map[string]interface{}) { func OutputQueryShapeForIterator(it graph.Iterator, qs graph.QuadStore, outputMap map[string]interface{}) {
s := &queryShape{ s := &queryShape{
qs: qs, qs: qs,
nodeId: 1, nodeID: 1,
} }
node := s.MakeNode(it.Clone()) node := s.MakeNode(it.Clone())
@ -64,16 +64,16 @@ func (s *queryShape) AddLink(l *Link) {
} }
func (s *queryShape) LastHasa() (int, quad.Direction) { func (s *queryShape) LastHasa() (int, quad.Direction) {
return s.hasaIds[len(s.hasaIds)-1], s.hasaDirs[len(s.hasaDirs)-1] return s.hasaIDs[len(s.hasaIDs)-1], s.hasaDirs[len(s.hasaDirs)-1]
} }
func (s *queryShape) PushHasa(i int, d quad.Direction) { func (s *queryShape) PushHasa(i int, d quad.Direction) {
s.hasaIds = append(s.hasaIds, i) s.hasaIDs = append(s.hasaIDs, i)
s.hasaDirs = append(s.hasaDirs, d) s.hasaDirs = append(s.hasaDirs, d)
} }
func (s *queryShape) RemoveHasa() { func (s *queryShape) RemoveHasa() {
s.hasaIds = s.hasaIds[:len(s.hasaIds)-1] s.hasaIDs = s.hasaIDs[:len(s.hasaIDs)-1]
s.hasaDirs = s.hasaDirs[:len(s.hasaDirs)-1] s.hasaDirs = s.hasaDirs[:len(s.hasaDirs)-1]
} }
@ -88,16 +88,16 @@ func (s *queryShape) StealNode(left *Node, right *Node) {
left.IsFixed = left.IsFixed || right.IsFixed left.IsFixed = left.IsFixed || right.IsFixed
for i, link := range s.links { for i, link := range s.links {
rewrite := false rewrite := false
if link.LinkNode == right.Id { if link.LinkNode == right.ID {
link.LinkNode = left.Id link.LinkNode = left.ID
rewrite = true rewrite = true
} }
if link.Source == right.Id { if link.Source == right.ID {
link.Source = left.Id link.Source = left.ID
rewrite = true rewrite = true
} }
if link.Target == right.Id { if link.Target == right.ID {
link.Target = left.Id link.Target = left.ID
rewrite = true rewrite = true
} }
if rewrite { if rewrite {
@ -107,24 +107,24 @@ func (s *queryShape) StealNode(left *Node, right *Node) {
} }
func (s *queryShape) MakeNode(it graph.Iterator) *Node { func (s *queryShape) MakeNode(it graph.Iterator) *Node {
n := Node{Id: s.nodeId} n := Node{ID: s.nodeID}
for _, tag := range it.Tagger().Tags() { for _, tag := range it.Tagger().Tags() {
n.Tags = append(n.Tags, tag) n.Tags = append(n.Tags, tag)
} }
for k, _ := range it.Tagger().Fixed() { for k := range it.Tagger().Fixed() {
n.Tags = append(n.Tags, k) n.Tags = append(n.Tags, k)
} }
switch it.Type() { switch it.Type() {
case graph.And: case graph.And:
for _, sub := range it.SubIterators() { for _, sub := range it.SubIterators() {
s.nodeId++ s.nodeID++
newNode := s.MakeNode(sub) newNode := s.MakeNode(sub)
if sub.Type() != graph.Or { if sub.Type() != graph.Or {
s.StealNode(&n, newNode) s.StealNode(&n, newNode)
} else { } else {
s.AddNode(newNode) s.AddNode(newNode)
s.AddLink(&Link{n.Id, newNode.Id, 0, 0}) s.AddLink(&Link{n.ID, newNode.ID, 0, 0})
} }
} }
case graph.Fixed: case graph.Fixed:
@ -134,35 +134,35 @@ func (s *queryShape) MakeNode(it graph.Iterator) *Node {
} }
case graph.HasA: case graph.HasA:
hasa := it.(*HasA) hasa := it.(*HasA)
s.PushHasa(n.Id, hasa.dir) s.PushHasa(n.ID, hasa.dir)
s.nodeId++ s.nodeID++
newNode := s.MakeNode(hasa.primaryIt) newNode := s.MakeNode(hasa.primaryIt)
s.AddNode(newNode) s.AddNode(newNode)
s.RemoveHasa() s.RemoveHasa()
case graph.Or: case graph.Or:
for _, sub := range it.SubIterators() { for _, sub := range it.SubIterators() {
s.nodeId++ s.nodeID++
newNode := s.MakeNode(sub) newNode := s.MakeNode(sub)
if sub.Type() == graph.Or { if sub.Type() == graph.Or {
s.StealNode(&n, newNode) s.StealNode(&n, newNode)
} else { } else {
s.AddNode(newNode) s.AddNode(newNode)
s.AddLink(&Link{n.Id, newNode.Id, 0, 0}) s.AddLink(&Link{n.ID, newNode.ID, 0, 0})
} }
} }
case graph.LinksTo: case graph.LinksTo:
n.IsLinkNode = true n.IsLinkNode = true
lto := it.(*LinksTo) lto := it.(*LinksTo)
s.nodeId++ s.nodeID++
newNode := s.MakeNode(lto.primaryIt) newNode := s.MakeNode(lto.primaryIt)
hasaID, hasaDir := s.LastHasa() hasaID, hasaDir := s.LastHasa()
if (hasaDir == quad.Subject && lto.dir == quad.Object) || if (hasaDir == quad.Subject && lto.dir == quad.Object) ||
(hasaDir == quad.Object && lto.dir == quad.Subject) { (hasaDir == quad.Object && lto.dir == quad.Subject) {
s.AddNode(newNode) s.AddNode(newNode)
if hasaDir == quad.Subject { if hasaDir == quad.Subject {
s.AddLink(&Link{hasaID, newNode.Id, 0, n.Id}) s.AddLink(&Link{hasaID, newNode.ID, 0, n.ID})
} else { } else {
s.AddLink(&Link{newNode.Id, hasaID, 0, n.Id}) s.AddLink(&Link{newNode.ID, hasaID, 0, n.ID})
} }
} else if lto.primaryIt.Type() == graph.Fixed { } else if lto.primaryIt.Type() == graph.Fixed {
s.StealNode(&n, newNode) s.StealNode(&n, newNode)

View file

@ -77,14 +77,14 @@ func TestQueryShape(t *testing.T) {
// Link should be correctly typed. // Link should be correctly typed.
nodes = shape["nodes"].([]Node) nodes = shape["nodes"].([]Node)
link := shape["links"].([]Link)[0] link := shape["links"].([]Link)[0]
if link.Source != nodes[2].Id { if link.Source != nodes[2].ID {
t.Errorf("Failed to get correct link source, got:%v expect:%v", link.Source, nodes[2].Id) t.Errorf("Failed to get correct link source, got:%v expect:%v", link.Source, nodes[2].ID)
} }
if link.Target != nodes[0].Id { if link.Target != nodes[0].ID {
t.Errorf("Failed to get correct link target, got:%v expect:%v", link.Target, nodes[0].Id) t.Errorf("Failed to get correct link target, got:%v expect:%v", link.Target, nodes[0].ID)
} }
if link.LinkNode != nodes[1].Id { if link.LinkNode != nodes[1].ID {
t.Errorf("Failed to get correct link node, got:%v expect:%v", link.LinkNode, nodes[1].Id) t.Errorf("Failed to get correct link node, got:%v expect:%v", link.LinkNode, nodes[1].ID)
} }
if link.Pred != 0 { if link.Pred != 0 {
t.Errorf("Failed to get correct number of predecessors:%v expect:0", link.Pred) t.Errorf("Failed to get correct number of predecessors:%v expect:0", link.Pred)

View file

@ -38,10 +38,10 @@ import (
type Operator int type Operator int
const ( const (
kCompareLT Operator = iota compareLT Operator = iota
kCompareLTE compareLTE
kCompareGT compareGT
kCompareGTE compareGTE
// Why no Equals? Because that's usually an AndIterator. // Why no Equals? Because that's usually an AndIterator.
) )
@ -99,13 +99,13 @@ func (it *Comparison) Close() {
func RunIntOp(a int64, op Operator, b int64) bool { func RunIntOp(a int64, op Operator, b int64) bool {
switch op { switch op {
case kCompareLT: case compareLT:
return a < b return a < b
case kCompareLTE: case compareLTE:
return a <= b return a <= b
case kCompareGT: case compareGT:
return a > b return a > b
case kCompareGTE: case compareGTE:
return a >= b return a >= b
default: default:
log.Fatal("Unknown operator type") log.Fatal("Unknown operator type")

View file

@ -40,25 +40,25 @@ var comparisonTests = []struct {
{ {
message: "successful int64 less than comparison", message: "successful int64 less than comparison",
operand: int64(3), operand: int64(3),
operator: kCompareLT, operator: compareLT,
expect: []string{"0", "1", "2"}, expect: []string{"0", "1", "2"},
}, },
{ {
message: "empty int64 less than comparison", message: "empty int64 less than comparison",
operand: int64(0), operand: int64(0),
operator: kCompareLT, operator: compareLT,
expect: nil, expect: nil,
}, },
{ {
message: "successful int64 greater than comparison", message: "successful int64 greater than comparison",
operand: int64(2), operand: int64(2),
operator: kCompareGT, operator: compareGT,
expect: []string{"3", "4"}, expect: []string{"3", "4"},
}, },
{ {
message: "successful int64 greater than or equal comparison", message: "successful int64 greater than or equal comparison",
operand: int64(2), operand: int64(2),
operator: kCompareGTE, operator: compareGTE,
expect: []string{"2", "3", "4"}, expect: []string{"2", "3", "4"},
}, },
} }
@ -86,25 +86,25 @@ var vciContainsTests = []struct {
}{ }{
{ {
message: "1 is less than 2", message: "1 is less than 2",
operator: kCompareGTE, operator: compareGTE,
check: 1, check: 1,
expect: false, expect: false,
}, },
{ {
message: "2 is greater than or equal to 2", message: "2 is greater than or equal to 2",
operator: kCompareGTE, operator: compareGTE,
check: 2, check: 2,
expect: true, expect: true,
}, },
{ {
message: "3 is greater than or equal to 2", message: "3 is greater than or equal to 2",
operator: kCompareGTE, operator: compareGTE,
check: 3, check: 3,
expect: true, expect: true,
}, },
{ {
message: "5 is absent from iterator", message: "5 is absent from iterator",
operator: kCompareGTE, operator: compareGTE,
check: 5, check: 5,
expect: false, expect: false,
}, },

View file

@ -33,7 +33,7 @@ type Iterator struct {
uid uint64 uid uint64
tags graph.Tagger tags graph.Tagger
nextPrefix []byte nextPrefix []byte
checkId []byte checkID []byte
dir quad.Direction dir quad.Direction
open bool open bool
iter ldbit.Iterator iter ldbit.Iterator
@ -56,7 +56,7 @@ func NewIterator(prefix string, d quad.Direction, value graph.Value, qs *QuadSto
it := Iterator{ it := Iterator{
uid: iterator.NextUID(), uid: iterator.NextUID(),
nextPrefix: p, nextPrefix: p,
checkId: vb, checkID: vb,
dir: d, dir: d,
originalPrefix: prefix, originalPrefix: prefix,
ro: opts, ro: opts,
@ -106,7 +106,7 @@ func (it *Iterator) TagResults(dst map[string]graph.Value) {
} }
func (it *Iterator) Clone() graph.Iterator { func (it *Iterator) Clone() graph.Iterator {
out := NewIterator(it.originalPrefix, it.dir, Token(it.checkId), it.qs) out := NewIterator(it.originalPrefix, it.dir, Token(it.checkID), it.qs)
out.tags.CopyFrom(it) out.tags.CopyFrom(it)
return out return out
} }
@ -231,7 +231,7 @@ func (it *Iterator) Contains(v graph.Value) bool {
return false return false
} }
offset := PositionOf(val[0:2], it.dir, it.qs) offset := PositionOf(val[0:2], it.dir, it.qs)
if bytes.HasPrefix(val[offset:], it.checkId[1:]) { if bytes.HasPrefix(val[offset:], it.checkID[1:]) {
// You may ask, why don't we check to see if it's a valid (not deleted) quad // You may ask, why don't we check to see if it's a valid (not deleted) quad
// again? // again?
// //
@ -247,7 +247,7 @@ func (it *Iterator) Contains(v graph.Value) bool {
} }
func (it *Iterator) Size() (int64, bool) { func (it *Iterator) Size() (int64, bool) {
return it.qs.SizeOf(Token(it.checkId)), true return it.qs.SizeOf(Token(it.checkID)), true
} }
func (it *Iterator) DebugString(indent int) string { func (it *Iterator) DebugString(indent int) string {
@ -259,7 +259,7 @@ func (it *Iterator) DebugString(indent int) string {
it.tags.Tags(), it.tags.Tags(),
it.dir, it.dir,
size, size,
it.qs.NameOf(Token(it.checkId)), it.qs.NameOf(Token(it.checkID)),
) )
} }

View file

@ -72,7 +72,7 @@ func createNewLevelDB(path string, _ graph.Options) error {
opts := &opt.Options{} opts := &opt.Options{}
db, err := leveldb.OpenFile(path, opts) db, err := leveldb.OpenFile(path, opts)
if err != nil { if err != nil {
glog.Errorf("Error: couldn't create database: %v", err) glog.Errorf("Error: could not create database: %v", err)
return err return err
} }
defer db.Close() defer db.Close()
@ -89,27 +89,27 @@ func newQuadStore(path string, options graph.Options) (graph.QuadStore, error) {
var qs QuadStore var qs QuadStore
var err error var err error
qs.path = path qs.path = path
cache_size := DefaultCacheSize cacheSize := DefaultCacheSize
if val, ok := options.IntKey("cache_size_mb"); ok { if val, ok := options.IntKey("cache_size_mb"); ok {
cache_size = val cacheSize = val
} }
qs.dbOpts = &opt.Options{ qs.dbOpts = &opt.Options{
BlockCache: cache.NewLRUCache(cache_size * opt.MiB), BlockCache: cache.NewLRUCache(cacheSize * opt.MiB),
} }
qs.dbOpts.ErrorIfMissing = true qs.dbOpts.ErrorIfMissing = true
write_buffer_mb := DefaultWriteBufferSize writeBufferSize := DefaultWriteBufferSize
if val, ok := options.IntKey("write_buffer_mb"); ok { if val, ok := options.IntKey("writeBufferSize"); ok {
write_buffer_mb = val writeBufferSize = val
} }
qs.dbOpts.WriteBuffer = write_buffer_mb * opt.MiB qs.dbOpts.WriteBuffer = writeBufferSize * opt.MiB
qs.writeopts = &opt.WriteOptions{ qs.writeopts = &opt.WriteOptions{
Sync: false, Sync: false,
} }
qs.readopts = &opt.ReadOptions{} qs.readopts = &opt.ReadOptions{}
db, err := leveldb.OpenFile(qs.path, qs.dbOpts) db, err := leveldb.OpenFile(qs.path, qs.dbOpts)
if err != nil { if err != nil {
glog.Errorln("Error, couldn't open! ", err) glog.Errorln("Error, could not open! ", err)
return nil, err return nil, err
} }
qs.db = db qs.db = db
@ -139,13 +139,6 @@ func (qs *QuadStore) Horizon() int64 {
return qs.horizon return qs.horizon
} }
func (qa *QuadStore) createDeltaKeyFor(d graph.Delta) []byte {
key := make([]byte, 0, 19)
key = append(key, 'd')
key = append(key, []byte(fmt.Sprintf("%018x", d.ID))...)
return key
}
func hashOf(s string) []byte { func hashOf(s string) []byte {
h := hashPool.Get().(hash.Hash) h := hashPool.Get().(hash.Hash)
h.Reset() h.Reset()
@ -190,13 +183,13 @@ var (
func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error { func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
batch := &leveldb.Batch{} batch := &leveldb.Batch{}
resizeMap := make(map[string]int64) resizeMap := make(map[string]int64)
size_change := int64(0) sizeChange := int64(0)
for _, d := range deltas { for _, d := range deltas {
bytes, err := json.Marshal(d) bytes, err := json.Marshal(d)
if err != nil { if err != nil {
return err return err
} }
batch.Put(qs.createDeltaKeyFor(d), bytes) batch.Put(keyFor(d), bytes)
err = qs.buildQuadWrite(batch, d.Quad, d.ID, d.Action == graph.Add) err = qs.buildQuadWrite(batch, d.Quad, d.ID, d.Action == graph.Add)
if err != nil { if err != nil {
return err return err
@ -211,7 +204,7 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
if d.Quad.Label != "" { if d.Quad.Label != "" {
resizeMap[d.Quad.Label] += delta resizeMap[d.Quad.Label] += delta
} }
size_change += delta sizeChange += delta
qs.horizon = d.ID qs.horizon = d.ID
} }
for k, v := range resizeMap { for k, v := range resizeMap {
@ -224,18 +217,25 @@ func (qs *QuadStore) ApplyDeltas(deltas []graph.Delta) error {
} }
err := qs.db.Write(batch, qs.writeopts) err := qs.db.Write(batch, qs.writeopts)
if err != nil { if err != nil {
glog.Error("Couldn't write to DB for quadset.") glog.Error("could not write to DB for quadset.")
return err return err
} }
qs.size += size_change qs.size += sizeChange
return nil return nil
} }
func keyFor(d graph.Delta) []byte {
key := make([]byte, 0, 19)
key = append(key, 'd')
key = append(key, []byte(fmt.Sprintf("%018x", d.ID))...)
return key
}
func (qs *QuadStore) buildQuadWrite(batch *leveldb.Batch, q quad.Quad, id int64, isAdd bool) error { func (qs *QuadStore) buildQuadWrite(batch *leveldb.Batch, q quad.Quad, id int64, isAdd bool) error {
var entry IndexEntry var entry IndexEntry
data, err := qs.db.Get(qs.createKeyFor(spo, q), qs.readopts) data, err := qs.db.Get(qs.createKeyFor(spo, q), qs.readopts)
if err != nil && err != leveldb.ErrNotFound { if err != nil && err != leveldb.ErrNotFound {
glog.Error("Couldn't access DB to prepare index: ", err) glog.Error("could not access DB to prepare index: ", err)
return err return err
} }
if err == nil { if err == nil {
@ -251,12 +251,12 @@ func (qs *QuadStore) buildQuadWrite(batch *leveldb.Batch, q quad.Quad, id int64,
if isAdd && len(entry.History)%2 == 0 { if isAdd && len(entry.History)%2 == 0 {
glog.Error("Entry History is out of sync for", entry) glog.Error("Entry History is out of sync for", entry)
return errors.New("Odd index history") return errors.New("odd index history")
} }
bytes, err := json.Marshal(entry) bytes, err := json.Marshal(entry)
if err != nil { if err != nil {
glog.Errorf("Couldn't write to buffer for entry %#v: %s", entry, err) glog.Errorf("could not write to buffer for entry %#v: %s", entry, err)
return err return err
} }
batch.Put(qs.createKeyFor(spo, q), bytes) batch.Put(qs.createKeyFor(spo, q), bytes)
@ -288,7 +288,7 @@ func (qs *QuadStore) UpdateValueKeyBy(name string, amount int64, batch *leveldb.
if b != nil && err != leveldb.ErrNotFound { if b != nil && err != leveldb.ErrNotFound {
err = json.Unmarshal(b, value) err = json.Unmarshal(b, value)
if err != nil { if err != nil {
glog.Errorf("Error: couldn't reconstruct value: %v", err) glog.Errorf("Error: could not reconstruct value: %v", err)
return err return err
} }
value.Size += amount value.Size += amount
@ -302,7 +302,7 @@ func (qs *QuadStore) UpdateValueKeyBy(name string, amount int64, batch *leveldb.
// Repackage and rewrite. // Repackage and rewrite.
bytes, err := json.Marshal(&value) bytes, err := json.Marshal(&value)
if err != nil { if err != nil {
glog.Errorf("Couldn't write to buffer for value %s: %s", name, err) glog.Errorf("could not write to buffer for value %s: %s", name, err)
return err return err
} }
if batch == nil { if batch == nil {
@ -319,20 +319,20 @@ func (qs *QuadStore) Close() {
if err == nil { if err == nil {
werr := qs.db.Put([]byte("__size"), buf.Bytes(), qs.writeopts) werr := qs.db.Put([]byte("__size"), buf.Bytes(), qs.writeopts)
if werr != nil { if werr != nil {
glog.Error("Couldn't write size before closing!") glog.Error("could not write size before closing!")
} }
} else { } else {
glog.Errorf("Couldn't convert size before closing!") glog.Errorf("could not convert size before closing!")
} }
buf.Reset() buf.Reset()
err = binary.Write(buf, binary.LittleEndian, qs.horizon) err = binary.Write(buf, binary.LittleEndian, qs.horizon)
if err == nil { if err == nil {
werr := qs.db.Put([]byte("__horizon"), buf.Bytes(), qs.writeopts) werr := qs.db.Put([]byte("__horizon"), buf.Bytes(), qs.writeopts)
if werr != nil { if werr != nil {
glog.Error("Couldn't write horizon before closing!") glog.Error("could not write horizon before closing!")
} }
} else { } else {
glog.Errorf("Couldn't convert horizon before closing!") glog.Errorf("could not convert horizon before closing!")
} }
qs.db.Close() qs.db.Close()
qs.open = false qs.open = false
@ -342,7 +342,7 @@ func (qs *QuadStore) Quad(k graph.Value) quad.Quad {
var q quad.Quad var q quad.Quad
b, err := qs.db.Get(k.(Token), qs.readopts) b, err := qs.db.Get(k.(Token), qs.readopts)
if err != nil && err != leveldb.ErrNotFound { if err != nil && err != leveldb.ErrNotFound {
glog.Error("Error: couldn't get quad from DB.") glog.Error("Error: could not get quad from DB.")
return quad.Quad{} return quad.Quad{}
} }
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
@ -351,7 +351,7 @@ func (qs *QuadStore) Quad(k graph.Value) quad.Quad {
} }
err = json.Unmarshal(b, &q) err = json.Unmarshal(b, &q)
if err != nil { if err != nil {
glog.Error("Error: couldn't reconstruct quad.") glog.Error("Error: could not reconstruct quad.")
return quad.Quad{} return quad.Quad{}
} }
return q return q
@ -361,20 +361,20 @@ func (qs *QuadStore) ValueOf(s string) graph.Value {
return Token(qs.createValueKeyFor(s)) return Token(qs.createValueKeyFor(s))
} }
func (qs *QuadStore) valueData(value_key []byte) ValueData { func (qs *QuadStore) valueData(key []byte) ValueData {
var out ValueData var out ValueData
if glog.V(3) { if glog.V(3) {
glog.V(3).Infof("%s %v", string(value_key[0]), value_key) glog.V(3).Infof("%c %v", key[0], key)
} }
b, err := qs.db.Get(value_key, qs.readopts) b, err := qs.db.Get(key, qs.readopts)
if err != nil && err != leveldb.ErrNotFound { if err != nil && err != leveldb.ErrNotFound {
glog.Errorln("Error: couldn't get value from DB") glog.Errorln("Error: could not get value from DB")
return out return out
} }
if b != nil && err != leveldb.ErrNotFound { if b != nil && err != leveldb.ErrNotFound {
err = json.Unmarshal(b, &out) err = json.Unmarshal(b, &out)
if err != nil { if err != nil {
glog.Errorln("Error: couldn't reconstruct value") glog.Errorln("Error: could not reconstruct value")
return ValueData{} return ValueData{}
} }
} }
@ -400,7 +400,7 @@ func (qs *QuadStore) getInt64ForKey(key string, empty int64) (int64, error) {
var out int64 var out int64
b, err := qs.db.Get([]byte(key), qs.readopts) b, err := qs.db.Get([]byte(key), qs.readopts)
if err != nil && err != leveldb.ErrNotFound { if err != nil && err != leveldb.ErrNotFound {
glog.Errorln("Couldn't read " + key + ": " + err.Error()) glog.Errorln("could not read " + key + ": " + err.Error())
return 0, err return 0, err
} }
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
@ -410,7 +410,7 @@ func (qs *QuadStore) getInt64ForKey(key string, empty int64) (int64, error) {
buf := bytes.NewBuffer(b) buf := bytes.NewBuffer(b)
err = binary.Read(buf, binary.LittleEndian, &out) err = binary.Read(buf, binary.LittleEndian, &out)
if err != nil { if err != nil {
glog.Errorln("Error: couldn't parse", key) glog.Errorln("Error: could not parse", key)
return 0, err return 0, err
} }
return out, nil return out, nil
@ -471,9 +471,8 @@ func (qs *QuadStore) QuadDirection(val graph.Value, d quad.Direction) graph.Valu
offset := PositionOf(v[0:2], d, qs) offset := PositionOf(v[0:2], d, qs)
if offset != -1 { if offset != -1 {
return Token(append([]byte("z"), v[offset:offset+hashSize]...)) return Token(append([]byte("z"), v[offset:offset+hashSize]...))
} else {
return Token(qs.Quad(val).Get(d))
} }
return Token(qs.Quad(val).Get(d))
} }
func compareBytes(a, b graph.Value) bool { func compareBytes(a, b graph.Value) bool {

View file

@ -31,7 +31,7 @@ type (
func newNodesAllIterator(qs *QuadStore) *nodesAllIterator { func newNodesAllIterator(qs *QuadStore) *nodesAllIterator {
var out nodesAllIterator var out nodesAllIterator
out.Int64 = *iterator.NewInt64(1, qs.idCounter-1) out.Int64 = *iterator.NewInt64(1, qs.nextID-1)
out.qs = qs out.qs = qs
return &out return &out
} }
@ -45,7 +45,7 @@ func (it *nodesAllIterator) Next() bool {
if !it.Int64.Next() { if !it.Int64.Next() {
return false return false
} }
_, ok := it.qs.revIdMap[it.Int64.Result().(int64)] _, ok := it.qs.revIDMap[it.Int64.Result().(int64)]
if !ok { if !ok {
return it.Next() return it.Next()
} }
@ -54,7 +54,7 @@ func (it *nodesAllIterator) Next() bool {
func newQuadsAllIterator(qs *QuadStore) *quadsAllIterator { func newQuadsAllIterator(qs *QuadStore) *quadsAllIterator {
var out quadsAllIterator var out quadsAllIterator
out.Int64 = *iterator.NewInt64(1, qs.quadIdCounter-1) out.Int64 = *iterator.NewInt64(1, qs.nextQuadID-1)
out.qs = qs out.qs = qs
return &out return &out
} }

View file

@ -70,27 +70,27 @@ type LogEntry struct {
} }
type QuadStore struct { type QuadStore struct {
idCounter int64 nextID int64
quadIdCounter int64 nextQuadID int64
idMap map[string]int64 idMap map[string]int64
revIdMap map[int64]string revIDMap map[int64]string
log []LogEntry log []LogEntry
size int64 size int64
index QuadDirectionIndex index QuadDirectionIndex
// vip_index map[string]map[int64]map[string]map[int64]*b.Tree // vip_index map[string]map[int64]map[string]map[int64]*b.Tree
} }
func newQuadStore() *QuadStore { func newQuadStore() *QuadStore {
return &QuadStore{ return &QuadStore{
idMap: make(map[string]int64), idMap: make(map[string]int64),
revIdMap: make(map[int64]string), revIDMap: make(map[int64]string),
// Sentinel null entry so indices start at 1 // Sentinel null entry so indices start at 1
log: make([]LogEntry, 1, 200), log: make([]LogEntry, 1, 200),
index: NewQuadDirectionIndex(), index: NewQuadDirectionIndex(),
idCounter: 1, nextID: 1,
quadIdCounter: 1, nextQuadID: 1,
} }
} }
@ -148,10 +148,10 @@ func (qs *QuadStore) AddDelta(d graph.Delta) error {
if _, exists := qs.indexOf(d.Quad); exists { if _, exists := qs.indexOf(d.Quad); exists {
return graph.ErrQuadExists return graph.ErrQuadExists
} }
qid := qs.quadIdCounter qid := qs.nextQuadID
qs.log = append(qs.log, LogEntry{Delta: d}) qs.log = append(qs.log, LogEntry{Delta: d})
qs.size++ qs.size++
qs.quadIdCounter++ qs.nextQuadID++
for dir := quad.Subject; dir <= quad.Label; dir++ { for dir := quad.Subject; dir <= quad.Label; dir++ {
sid := d.Quad.Get(dir) sid := d.Quad.Get(dir)
@ -159,9 +159,9 @@ func (qs *QuadStore) AddDelta(d graph.Delta) error {
continue continue
} }
if _, ok := qs.idMap[sid]; !ok { if _, ok := qs.idMap[sid]; !ok {
qs.idMap[sid] = qs.idCounter qs.idMap[sid] = qs.nextID
qs.revIdMap[qs.idCounter] = sid qs.revIDMap[qs.nextID] = sid
qs.idCounter++ qs.nextID++
} }
} }
@ -184,11 +184,11 @@ func (qs *QuadStore) RemoveDelta(d graph.Delta) error {
return graph.ErrQuadNotExist return graph.ErrQuadNotExist
} }
quadID := qs.quadIdCounter quadID := qs.nextQuadID
qs.log = append(qs.log, LogEntry{Delta: d}) qs.log = append(qs.log, LogEntry{Delta: d})
qs.log[prevQuadID].DeletedBy = quadID qs.log[prevQuadID].DeletedBy = quadID
qs.size-- qs.size--
qs.quadIdCounter++ qs.nextQuadID++
return nil return nil
} }
@ -227,7 +227,7 @@ func (qs *QuadStore) ValueOf(name string) graph.Value {
} }
func (qs *QuadStore) NameOf(id graph.Value) string { func (qs *QuadStore) NameOf(id graph.Value) string {
return qs.revIdMap[id.(int64)] return qs.revIDMap[id.(int64)]
} }
func (qs *QuadStore) QuadsAllIterator() graph.Iterator { func (qs *QuadStore) QuadsAllIterator() graph.Iterator {

View file

@ -164,11 +164,11 @@ func TestLinksToOptimization(t *testing.T) {
} }
v := newIt.(*Iterator) v := newIt.(*Iterator)
v_clone := v.Clone() vClone := v.Clone()
if v_clone.DebugString(0) != v.DebugString(0) { if vClone.DebugString(0) != v.DebugString(0) {
t.Fatal("Wrong iterator. Got ", v_clone.DebugString(0)) t.Fatal("Wrong iterator. Got ", vClone.DebugString(0))
} }
vt := v_clone.Tagger() vt := vClone.Tagger()
if len(vt.Tags()) < 1 || vt.Tags()[0] != "foo" { if len(vt.Tags()) < 1 || vt.Tags()[0] != "foo" {
t.Fatal("Tag on LinksTo did not persist") t.Fatal("Tag on LinksTo did not persist")
} }

View file

@ -130,7 +130,7 @@ func (it *Iterator) Clone() graph.Iterator {
func (it *Iterator) Next() bool { func (it *Iterator) Next() bool {
var result struct { var result struct {
Id string `bson:"_id"` ID string `bson:"_id"`
Added []int64 `bson:"Added"` Added []int64 `bson:"Added"`
Deleted []int64 `bson:"Deleted"` Deleted []int64 `bson:"Deleted"`
} }
@ -145,7 +145,7 @@ func (it *Iterator) Next() bool {
if it.collection == "quads" && len(result.Added) <= len(result.Deleted) { if it.collection == "quads" && len(result.Added) <= len(result.Deleted) {
return it.Next() return it.Next()
} }
it.result = result.Id it.result = result.ID
return true return true
} }

View file

@ -18,45 +18,48 @@ import (
"container/list" "container/list"
) )
type IDLru struct { // TODO(kortschak) Reimplement without container/list.
// cache implements an LRU cache.
type cache struct {
cache map[string]*list.Element cache map[string]*list.Element
priority *list.List priority *list.List
maxSize int maxSize int
} }
type KV struct { type kv struct {
key string key string
value string value string
} }
func NewIDLru(size int) *IDLru { func newCache(size int) *cache {
var lru IDLru var lru cache
lru.maxSize = size lru.maxSize = size
lru.priority = list.New() lru.priority = list.New()
lru.cache = make(map[string]*list.Element) lru.cache = make(map[string]*list.Element)
return &lru return &lru
} }
func (lru *IDLru) Put(key string, value string) { func (lru *cache) Put(key string, value string) {
if _, ok := lru.Get(key); ok { if _, ok := lru.Get(key); ok {
return return
} }
if len(lru.cache) == lru.maxSize { if len(lru.cache) == lru.maxSize {
lru.removeOldest() lru.removeOldest()
} }
lru.priority.PushFront(KV{key: key, value: value}) lru.priority.PushFront(kv{key: key, value: value})
lru.cache[key] = lru.priority.Front() lru.cache[key] = lru.priority.Front()
} }
func (lru *IDLru) Get(key string) (string, bool) { func (lru *cache) Get(key string) (string, bool) {
if element, ok := lru.cache[key]; ok { if element, ok := lru.cache[key]; ok {
lru.priority.MoveToFront(element) lru.priority.MoveToFront(element)
return element.Value.(KV).value, true return element.Value.(kv).value, true
} }
return "", false return "", false
} }
func (lru *IDLru) removeOldest() { func (lru *cache) removeOldest() {
last := lru.priority.Remove(lru.priority.Back()) last := lru.priority.Remove(lru.priority.Back())
delete(lru.cache, last.(KV).key) delete(lru.cache, last.(kv).key)
} }

View file

@ -45,7 +45,7 @@ var (
type QuadStore struct { type QuadStore struct {
session *mgo.Session session *mgo.Session
db *mgo.Database db *mgo.Database
idCache *IDLru ids *cache
} }
func createNewMongoGraph(addr string, options graph.Options) error { func createNewMongoGraph(addr string, options graph.Options) error {
@ -97,11 +97,11 @@ func newQuadStore(addr string, options graph.Options) (graph.QuadStore, error) {
} }
qs.db = conn.DB(dbName) qs.db = conn.DB(dbName)
qs.session = conn qs.session = conn
qs.idCache = NewIDLru(1 << 16) qs.ids = newCache(1 << 16)
return &qs, nil return &qs, nil
} }
func (qs *QuadStore) getIdForQuad(t quad.Quad) string { func (qs *QuadStore) getIDForQuad(t quad.Quad) string {
id := hashOf(t.Subject) id := hashOf(t.Subject)
id += hashOf(t.Predicate) id += hashOf(t.Predicate)
id += hashOf(t.Object) id += hashOf(t.Object)
@ -121,7 +121,7 @@ func hashOf(s string) string {
} }
type MongoNode struct { type MongoNode struct {
Id string `bson:"_id"` ID string `bson:"_id"`
Name string `bson:"Name"` Name string `bson:"Name"`
Size int `bson:"Size"` Size int `bson:"Size"`
} }
@ -133,11 +133,11 @@ type MongoLogEntry struct {
Timestamp int64 Timestamp int64
} }
func (qs *QuadStore) updateNodeBy(node_name string, inc int) error { func (qs *QuadStore) updateNodeBy(name string, inc int) error {
node := qs.ValueOf(node_name) node := qs.ValueOf(name)
doc := bson.M{ doc := bson.M{
"_id": node.(string), "_id": node.(string),
"Name": node_name, "Name": name,
} }
upsert := bson.M{ upsert := bson.M{
"$setOnInsert": doc, "$setOnInsert": doc,
@ -166,7 +166,7 @@ func (qs *QuadStore) updateQuad(q quad.Quad, id int64, proc graph.Procedure) err
setname: id, setname: id,
}, },
} }
_, err := qs.db.C("quads").UpsertId(qs.getIdForQuad(q), upsert) _, err := qs.db.C("quads").UpsertId(qs.getIDForQuad(q), upsert)
if err != nil { if err != nil {
glog.Errorf("Error: %v", err) glog.Errorf("Error: %v", err)
} }
@ -202,7 +202,7 @@ func (qs *QuadStore) updateLog(d graph.Delta) error {
entry := MongoLogEntry{ entry := MongoLogEntry{
LogID: d.ID, LogID: d.ID,
Action: action, Action: action,
Key: qs.getIdForQuad(d.Quad), Key: qs.getIDForQuad(d.Quad),
Timestamp: d.Timestamp.UnixNano(), Timestamp: d.Timestamp.UnixNano(),
} }
err := qs.db.C("log").Insert(entry) err := qs.db.C("log").Insert(entry)
@ -217,7 +217,7 @@ func (qs *QuadStore) ApplyDeltas(in []graph.Delta) error {
ids := make(map[string]int) ids := make(map[string]int)
// Pre-check the existence condition. // Pre-check the existence condition.
for _, d := range in { for _, d := range in {
key := qs.getIdForQuad(d.Quad) key := qs.getIDForQuad(d.Quad)
switch d.Action { switch d.Action {
case graph.Add: case graph.Add:
if qs.checkValid(key) { if qs.checkValid(key) {
@ -292,7 +292,7 @@ func (qs *QuadStore) ValueOf(s string) graph.Value {
} }
func (qs *QuadStore) NameOf(v graph.Value) string { func (qs *QuadStore) NameOf(v graph.Value) string {
val, ok := qs.idCache.Get(v.(string)) val, ok := qs.ids.Get(v.(string))
if ok { if ok {
return val return val
} }
@ -301,7 +301,7 @@ func (qs *QuadStore) NameOf(v graph.Value) string {
if err != nil { if err != nil {
glog.Errorf("Error: Couldn't retrieve node %s %v", v, err) glog.Errorf("Error: Couldn't retrieve node %s %v", v, err)
} }
qs.idCache.Put(v.(string), node.Name) qs.ids.Put(v.(string), node.Name)
return node.Name return node.Name
} }

View file

@ -54,8 +54,8 @@ func (h *Handle) Close() {
} }
var ( var (
ErrQuadExists = errors.New("Quad exists") ErrQuadExists = errors.New("quad exists")
ErrQuadNotExist = errors.New("Quad doesn't exist") ErrQuadNotExist = errors.New("quad does not exist")
) )
type QuadWriter interface { type QuadWriter interface {

View file

@ -47,9 +47,8 @@ func findAssetsPath() string {
if *assetsPath != "" { if *assetsPath != "" {
if hasAssets(*assetsPath) { if hasAssets(*assetsPath) {
return *assetsPath return *assetsPath
} else {
glog.Fatalln("Cannot find assets at", *assetsPath, ".")
} }
glog.Fatalln("Cannot find assets at", *assetsPath, ".")
} }
if hasAssets(".") { if hasAssets(".") {
@ -61,7 +60,7 @@ func findAssetsPath() string {
return gopathPath return gopathPath
} }
glog.Fatalln("Cannot find assets in any of the default search paths. Please run in the same directory, in a Go workspace, or set --assets .") glog.Fatalln("Cannot find assets in any of the default search paths. Please run in the same directory, in a Go workspace, or set --assets .")
return "" panic("cannot reach")
} }
func LogRequest(handler ResponseHandler) httprouter.Handle { func LogRequest(handler ResponseHandler) httprouter.Handle {
@ -81,11 +80,7 @@ func LogRequest(handler ResponseHandler) httprouter.Handle {
} }
} }
func FormatJson400(w http.ResponseWriter, err interface{}) int { func jsonResponse(w http.ResponseWriter, code int, err interface{}) int {
return FormatJsonError(w, 400, err)
}
func FormatJsonError(w http.ResponseWriter, code int, err interface{}) int {
http.Error(w, fmt.Sprintf("{\"error\" : \"%s\"}", err), code) http.Error(w, fmt.Sprintf("{\"error\" : \"%s\"}", err), code)
return code return code
} }
@ -105,12 +100,12 @@ func (h *TemplateRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques
} }
} }
type Api struct { type API struct {
config *config.Config config *config.Config
handle *graph.Handle handle *graph.Handle
} }
func (api *Api) ApiV1(r *httprouter.Router) { func (api *API) APIv1(r *httprouter.Router) {
r.POST("/api/v1/query/:query_lang", LogRequest(api.ServeV1Query)) r.POST("/api/v1/query/:query_lang", LogRequest(api.ServeV1Query))
r.POST("/api/v1/shape/:query_lang", LogRequest(api.ServeV1Shape)) r.POST("/api/v1/shape/:query_lang", LogRequest(api.ServeV1Shape))
r.POST("/api/v1/write", LogRequest(api.ServeV1Write)) r.POST("/api/v1/write", LogRequest(api.ServeV1Write))
@ -129,8 +124,8 @@ func SetupRoutes(handle *graph.Handle, cfg *config.Config) {
templates.ParseGlob(fmt.Sprint(assets, "/templates/*.html")) templates.ParseGlob(fmt.Sprint(assets, "/templates/*.html"))
root := &TemplateRequestHandler{templates: templates} root := &TemplateRequestHandler{templates: templates}
docs := &DocRequestHandler{assets: assets} docs := &DocRequestHandler{assets: assets}
api := &Api{config: cfg, handle: handle} api := &API{config: cfg, handle: handle}
api.ApiV1(r) api.APIv1(r)
//m.Use(martini.Static("static", martini.StaticOptions{Prefix: "/static", SkipLogging: true})) //m.Use(martini.Static("static", martini.StaticOptions{Prefix: "/static", SkipLogging: true}))
//r.Handler("GET", "/static", http.StripPrefix("/static", http.FileServer(http.Dir("static/")))) //r.Handler("GET", "/static", http.StripPrefix("/static", http.FileServer(http.Dir("static/"))))

View file

@ -56,13 +56,13 @@ var parseTests = []struct {
{"subject": "foo", "predicate": "bar"} {"subject": "foo", "predicate": "bar"}
]`, ]`,
expect: nil, expect: nil,
err: fmt.Errorf("Invalid quad at index %d. %v", 0, quad.Quad{"foo", "bar", "", ""}), err: fmt.Errorf("invalid quad at index %d. %v", 0, quad.Quad{"foo", "bar", "", ""}),
}, },
} }
func TestParseJSON(t *testing.T) { func TestParseJSON(t *testing.T) {
for _, test := range parseTests { for _, test := range parseTests {
got, err := ParseJsonToQuadList([]byte(test.input)) got, err := ParseJSONToQuadList([]byte(test.input))
if fmt.Sprint(err) != fmt.Sprint(test.err) { if fmt.Sprint(err) != fmt.Sprint(test.err) {
t.Errorf("Failed to %v with unexpected error, got:%v expected %v", test.message, err, test.err) t.Errorf("Failed to %v with unexpected error, got:%v expected %v", test.message, err, test.err)
} }

View file

@ -47,18 +47,18 @@ func WrapResult(result interface{}) ([]byte, error) {
return json.MarshalIndent(wrap, "", " ") return json.MarshalIndent(wrap, "", " ")
} }
func RunJsonQuery(query string, ses query.HttpSession) (interface{}, error) { func Run(q string, ses query.HTTP) (interface{}, error) {
c := make(chan interface{}, 5) c := make(chan interface{}, 5)
go ses.ExecInput(query, c, 100) go ses.ExecInput(q, c, 100)
for res := range c { for res := range c {
ses.BuildJson(res) ses.BuildJSON(res)
} }
return ses.GetJson() return ses.GetJSON()
} }
func GetQueryShape(query string, ses query.HttpSession) ([]byte, error) { func GetQueryShape(q string, ses query.HTTP) ([]byte, error) {
c := make(chan map[string]interface{}, 5) c := make(chan map[string]interface{}, 5)
go ses.GetQuery(query, c) go ses.GetQuery(q, c)
var data map[string]interface{} var data map[string]interface{}
for res := range c { for res := range c {
data = res data = res
@ -67,19 +67,19 @@ func GetQueryShape(query string, ses query.HttpSession) ([]byte, error) {
} }
// TODO(barakmich): Turn this into proper middleware. // TODO(barakmich): Turn this into proper middleware.
func (api *Api) ServeV1Query(w http.ResponseWriter, r *http.Request, params httprouter.Params) int { func (api *API) ServeV1Query(w http.ResponseWriter, r *http.Request, params httprouter.Params) int {
var ses query.HttpSession var ses query.HTTP
switch params.ByName("query_lang") { switch params.ByName("query_lang") {
case "gremlin": case "gremlin":
ses = gremlin.NewSession(api.handle.QuadStore, api.config.Timeout, false) ses = gremlin.NewSession(api.handle.QuadStore, api.config.Timeout, false)
case "mql": case "mql":
ses = mql.NewSession(api.handle.QuadStore) ses = mql.NewSession(api.handle.QuadStore)
default: default:
return FormatJson400(w, "Need a query language.") return jsonResponse(w, 400, "Need a query language.")
} }
bodyBytes, err := ioutil.ReadAll(r.Body) bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
code := string(bodyBytes) code := string(bodyBytes)
result, err := ses.InputParses(code) result, err := ses.InputParses(code)
@ -88,7 +88,7 @@ func (api *Api) ServeV1Query(w http.ResponseWriter, r *http.Request, params http
var output interface{} var output interface{}
var bytes []byte var bytes []byte
var err error var err error
output, err = RunJsonQuery(code, ses) output, err = Run(code, ses)
if err != nil { if err != nil {
bytes, err = WrapErrResult(err) bytes, err = WrapErrResult(err)
http.Error(w, string(bytes), 400) http.Error(w, string(bytes), 400)
@ -98,33 +98,33 @@ func (api *Api) ServeV1Query(w http.ResponseWriter, r *http.Request, params http
bytes, err = WrapResult(output) bytes, err = WrapResult(output)
if err != nil { if err != nil {
ses = nil ses = nil
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
fmt.Fprint(w, string(bytes)) fmt.Fprint(w, string(bytes))
ses = nil ses = nil
return 200 return 200
case query.ParseFail: case query.ParseFail:
ses = nil ses = nil
return FormatJson400(w, err) return jsonResponse(w, 400, err)
default: default:
ses = nil ses = nil
return FormatJsonError(w, 500, "Incomplete data?") return jsonResponse(w, 500, "Incomplete data?")
} }
} }
func (api *Api) ServeV1Shape(w http.ResponseWriter, r *http.Request, params httprouter.Params) int { func (api *API) ServeV1Shape(w http.ResponseWriter, r *http.Request, params httprouter.Params) int {
var ses query.HttpSession var ses query.HTTP
switch params.ByName("query_lang") { switch params.ByName("query_lang") {
case "gremlin": case "gremlin":
ses = gremlin.NewSession(api.handle.QuadStore, api.config.Timeout, false) ses = gremlin.NewSession(api.handle.QuadStore, api.config.Timeout, false)
case "mql": case "mql":
ses = mql.NewSession(api.handle.QuadStore) ses = mql.NewSession(api.handle.QuadStore)
default: default:
return FormatJson400(w, "Need a query language.") return jsonResponse(w, 400, "Need a query language.")
} }
bodyBytes, err := ioutil.ReadAll(r.Body) bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
code := string(bodyBytes) code := string(bodyBytes)
result, err := ses.InputParses(code) result, err := ses.InputParses(code)
@ -134,13 +134,13 @@ func (api *Api) ServeV1Shape(w http.ResponseWriter, r *http.Request, params http
var err error var err error
output, err = GetQueryShape(code, ses) output, err = GetQueryShape(code, ses)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
fmt.Fprint(w, string(output)) fmt.Fprint(w, string(output))
return 200 return 200
case query.ParseFail: case query.ParseFail:
return FormatJson400(w, err) return jsonResponse(w, 400, err)
default: default:
return FormatJsonError(w, 500, "Incomplete data?") return jsonResponse(w, 500, "Incomplete data?")
} }
} }

View file

@ -29,7 +29,7 @@ import (
"github.com/google/cayley/quad/cquads" "github.com/google/cayley/quad/cquads"
) )
func ParseJsonToQuadList(jsonBody []byte) ([]quad.Quad, error) { func ParseJSONToQuadList(jsonBody []byte) ([]quad.Quad, error) {
var quads []quad.Quad var quads []quad.Quad
err := json.Unmarshal(jsonBody, &quads) err := json.Unmarshal(jsonBody, &quads)
if err != nil { if err != nil {
@ -37,38 +37,38 @@ func ParseJsonToQuadList(jsonBody []byte) ([]quad.Quad, error) {
} }
for i, q := range quads { for i, q := range quads {
if !q.IsValid() { if !q.IsValid() {
return nil, fmt.Errorf("Invalid quad at index %d. %s", i, q) return nil, fmt.Errorf("invalid quad at index %d. %s", i, q)
} }
} }
return quads, nil return quads, nil
} }
func (api *Api) ServeV1Write(w http.ResponseWriter, r *http.Request, _ httprouter.Params) int { func (api *API) ServeV1Write(w http.ResponseWriter, r *http.Request, _ httprouter.Params) int {
if api.config.ReadOnly { if api.config.ReadOnly {
return FormatJson400(w, "Database is read-only.") return jsonResponse(w, 400, "Database is read-only.")
} }
bodyBytes, err := ioutil.ReadAll(r.Body) bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
quads, err := ParseJsonToQuadList(bodyBytes) quads, err := ParseJSONToQuadList(bodyBytes)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
api.handle.QuadWriter.AddQuadSet(quads) api.handle.QuadWriter.AddQuadSet(quads)
fmt.Fprintf(w, "{\"result\": \"Successfully wrote %d quads.\"}", len(quads)) fmt.Fprintf(w, "{\"result\": \"Successfully wrote %d quads.\"}", len(quads))
return 200 return 200
} }
func (api *Api) ServeV1WriteNQuad(w http.ResponseWriter, r *http.Request, params httprouter.Params) int { func (api *API) ServeV1WriteNQuad(w http.ResponseWriter, r *http.Request, params httprouter.Params) int {
if api.config.ReadOnly { if api.config.ReadOnly {
return FormatJson400(w, "Database is read-only.") return jsonResponse(w, 400, "Database is read-only.")
} }
formFile, _, err := r.FormFile("NQuadFile") formFile, _, err := r.FormFile("NQuadFile")
if err != nil { if err != nil {
glog.Errorln(err) glog.Errorln(err)
return FormatJsonError(w, 500, "Couldn't read file: "+err.Error()) return jsonResponse(w, 500, "Couldn't read file: "+err.Error())
} }
defer formFile.Close() defer formFile.Close()
@ -108,17 +108,17 @@ func (api *Api) ServeV1WriteNQuad(w http.ResponseWriter, r *http.Request, params
return 200 return 200
} }
func (api *Api) ServeV1Delete(w http.ResponseWriter, r *http.Request, params httprouter.Params) int { func (api *API) ServeV1Delete(w http.ResponseWriter, r *http.Request, params httprouter.Params) int {
if api.config.ReadOnly { if api.config.ReadOnly {
return FormatJson400(w, "Database is read-only.") return jsonResponse(w, 400, "Database is read-only.")
} }
bodyBytes, err := ioutil.ReadAll(r.Body) bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
quads, err := ParseJsonToQuadList(bodyBytes) quads, err := ParseJSONToQuadList(bodyBytes)
if err != nil { if err != nil {
return FormatJson400(w, err) return jsonResponse(w, 400, err)
} }
count := 0 count := 0
for _, q := range quads { for _, q := range quads {

View file

@ -130,9 +130,8 @@ func (q Quad) NQuad() string {
if q.Label == "" { if q.Label == "" {
//TODO(barakmich): Proper escaping. //TODO(barakmich): Proper escaping.
return fmt.Sprintf("%s %s %s .", q.Subject, q.Predicate, q.Object) return fmt.Sprintf("%s %s %s .", q.Subject, q.Predicate, q.Object)
} else {
return fmt.Sprintf("%s %s %s %s .", q.Subject, q.Predicate, q.Object, q.Label)
} }
return fmt.Sprintf("%s %s %s %s .", q.Subject, q.Predicate, q.Object, q.Label)
} }
type Unmarshaler interface { type Unmarshaler interface {

View file

@ -140,7 +140,7 @@ func buildInOutIterator(obj *otto.Object, qs graph.QuadStore, base graph.Iterato
} }
func buildIteratorTreeHelper(obj *otto.Object, qs graph.QuadStore, base graph.Iterator) graph.Iterator { func buildIteratorTreeHelper(obj *otto.Object, qs graph.QuadStore, base graph.Iterator) graph.Iterator {
var it graph.Iterator = base it := base
// TODO: Better error handling // TODO: Better error handling
var subIt graph.Iterator var subIt graph.Iterator

View file

@ -112,9 +112,8 @@ func (wk *worker) toValueFunc(env *otto.Otto, obj *otto.Object, withTags bool) f
if err != nil { if err != nil {
glog.Error(err) glog.Error(err)
return otto.NullValue() return otto.NullValue()
} else {
return val
} }
return val
} }
} }
@ -260,9 +259,8 @@ func (wk *worker) send(r *Result) bool {
wk.count++ wk.count++
if wk.limit >= 0 && wk.limit == wk.count { if wk.limit >= 0 && wk.limit == wk.count {
return false return false
} else {
return true
} }
return true
} }
return false return false
} }

View file

@ -21,6 +21,7 @@ import (
"time" "time"
"github.com/robertkrimen/otto" "github.com/robertkrimen/otto"
// Provide underscore JS library.
_ "github.com/robertkrimen/otto/underscore" _ "github.com/robertkrimen/otto/underscore"
"github.com/google/cayley/graph" "github.com/google/cayley/graph"
@ -176,7 +177,7 @@ func (s *Session) ToText(result interface{}) string {
tags := data.actualResults tags := data.actualResults
tagKeys := make([]string, len(tags)) tagKeys := make([]string, len(tags))
i := 0 i := 0
for k, _ := range tags { for k := range tags {
tagKeys[i] = k tagKeys[i] = k
i++ i++
} }
@ -203,7 +204,7 @@ func (s *Session) ToText(result interface{}) string {
} }
// Web stuff // Web stuff
func (s *Session) BuildJson(result interface{}) { func (s *Session) BuildJSON(result interface{}) {
data := result.(*Result) data := result.(*Result)
if !data.metaresult { if !data.metaresult {
if data.val == nil { if data.val == nil {
@ -211,7 +212,7 @@ func (s *Session) BuildJson(result interface{}) {
tags := data.actualResults tags := data.actualResults
tagKeys := make([]string, len(tags)) tagKeys := make([]string, len(tags))
i := 0 i := 0
for k, _ := range tags { for k := range tags {
tagKeys[i] = k tagKeys[i] = k
i++ i++
} }
@ -232,8 +233,8 @@ func (s *Session) BuildJson(result interface{}) {
} }
} }
func (s *Session) GetJson() ([]interface{}, error) { func (s *Session) GetJSON() ([]interface{}, error) {
defer s.ClearJson() defer s.ClearJSON()
if s.err != nil { if s.err != nil {
return nil, s.err return nil, s.err
} }
@ -245,6 +246,6 @@ func (s *Session) GetJson() ([]interface{}, error) {
} }
} }
func (s *Session) ClearJson() { func (s *Session) ClearJSON() {
s.dataOutput = nil s.dataOutput = nil
} }

View file

@ -47,7 +47,7 @@ func (q *Query) BuildIteratorTree(query interface{}) {
var isOptional bool var isOptional bool
q.it, isOptional, q.err = q.buildIteratorTreeInternal(query, NewPath()) q.it, isOptional, q.err = q.buildIteratorTreeInternal(query, NewPath())
if isOptional { if isOptional {
q.err = errors.New("Optional iterator at the top level?") q.err = errors.New("optional iterator at the top level")
} }
} }
@ -84,7 +84,7 @@ func (q *Query) buildIteratorTreeInternal(query interface{}, path Path) (it grap
} else if len(t) == 1 { } else if len(t) == 1 {
it, optional, err = q.buildIteratorTreeInternal(t[0], path) it, optional, err = q.buildIteratorTreeInternal(t[0], path)
} else { } else {
err = errors.New(fmt.Sprintf("Multiple fields at location root%s", path.DisplayString())) err = fmt.Errorf("multiple fields at location root %s", path.DisplayString())
} }
case map[string]interface{}: case map[string]interface{}:
// for JSON objects // for JSON objects
@ -166,13 +166,13 @@ func (q *Query) buildIteratorTreeMapInternal(query map[string]interface{}, path
return it, nil return it, nil
} }
type ResultPathSlice []ResultPath type byRecordLength []ResultPath
func (p ResultPathSlice) Len() int { func (p byRecordLength) Len() int {
return len(p) return len(p)
} }
func (p ResultPathSlice) Less(i, j int) bool { func (p byRecordLength) Less(i, j int) bool {
iLen := len(strings.Split(string(p[i]), "\x30")) iLen := len(strings.Split(string(p[i]), "\x30"))
jLen := len(strings.Split(string(p[j]), "\x30")) jLen := len(strings.Split(string(p[j]), "\x30"))
if iLen < jLen { if iLen < jLen {
@ -186,6 +186,6 @@ func (p ResultPathSlice) Less(i, j int) bool {
return false return false
} }
func (p ResultPathSlice) Swap(i, j int) { func (p byRecordLength) Swap(i, j int) {
p[i], p[j] = p[j], p[i] p[i], p[j] = p[j], p[i]
} }

View file

@ -34,13 +34,11 @@ func (q *Query) treeifyResult(tags map[string]graph.Value) map[ResultPath]string
resultPaths[k.ToResultPathFromMap(results)] = v resultPaths[k.ToResultPathFromMap(results)] = v
} }
var paths ResultPathSlice paths := make([]ResultPath, 0, len(resultPaths))
for path := range resultPaths {
for path, _ := range resultPaths {
paths = append(paths, path) paths = append(paths, path)
} }
sort.Sort(byRecordLength(paths))
sort.Sort(paths)
// Build Structure // Build Structure
for _, path := range paths { for _, path := range paths {

View file

@ -172,9 +172,9 @@ func runQuery(g []quad.Quad, query string) interface{} {
c := make(chan interface{}, 5) c := make(chan interface{}, 5)
go s.ExecInput(query, c, -1) go s.ExecInput(query, c, -1)
for result := range c { for result := range c {
s.BuildJson(result) s.BuildJSON(result)
} }
result, _ := s.GetJson() result, _ := s.GetJSON()
return result return result
} }

View file

@ -21,8 +21,10 @@ import (
"github.com/google/cayley/graph" "github.com/google/cayley/graph"
) )
type Path string type (
type ResultPath string Path string
ResultPath string
)
type Query struct { type Query struct {
ses *Session ses *Session

View file

@ -38,29 +38,28 @@ func NewSession(qs graph.QuadStore) *Session {
return &m return &m
} }
func (m *Session) ToggleDebug() { func (s *Session) ToggleDebug() {
m.debug = !m.debug s.debug = !s.debug
} }
func (m *Session) GetQuery(input string, output_struct chan map[string]interface{}) { func (s *Session) GetQuery(input string, out chan map[string]interface{}) {
defer close(output_struct) defer close(out)
var mqlQuery interface{} var mqlQuery interface{}
err := json.Unmarshal([]byte(input), &mqlQuery) err := json.Unmarshal([]byte(input), &mqlQuery)
if err != nil { if err != nil {
return return
} }
m.currentQuery = NewQuery(m) s.currentQuery = NewQuery(s)
m.currentQuery.BuildIteratorTree(mqlQuery) s.currentQuery.BuildIteratorTree(mqlQuery)
output := make(map[string]interface{}) output := make(map[string]interface{})
iterator.OutputQueryShapeForIterator(m.currentQuery.it, m.qs, output) iterator.OutputQueryShapeForIterator(s.currentQuery.it, s.qs, output)
nodes := output["nodes"].([]iterator.Node) nodes := make([]iterator.Node, 0)
new_nodes := make([]iterator.Node, 0) for _, n := range output["nodes"].([]iterator.Node) {
for _, n := range nodes {
n.Tags = nil n.Tags = nil
new_nodes = append(new_nodes, n) nodes = append(nodes, n)
} }
output["nodes"] = new_nodes output["nodes"] = nodes
output_struct <- output out <- output
} }
func (s *Session) InputParses(input string) (query.ParseResult, error) { func (s *Session) InputParses(input string) (query.ParseResult, error) {
@ -109,7 +108,7 @@ func (s *Session) ToText(result interface{}) string {
r, _ := json.MarshalIndent(s.currentQuery.results, "", " ") r, _ := json.MarshalIndent(s.currentQuery.results, "", " ")
fmt.Println(string(r)) fmt.Println(string(r))
i := 0 i := 0
for k, _ := range tags { for k := range tags {
tagKeys[i] = string(k) tagKeys[i] = string(k)
i++ i++
} }
@ -123,20 +122,19 @@ func (s *Session) ToText(result interface{}) string {
return out return out
} }
func (s *Session) BuildJson(result interface{}) { func (s *Session) BuildJSON(result interface{}) {
s.currentQuery.treeifyResult(result.(map[string]graph.Value)) s.currentQuery.treeifyResult(result.(map[string]graph.Value))
} }
func (s *Session) GetJson() ([]interface{}, error) { func (s *Session) GetJSON() ([]interface{}, error) {
s.currentQuery.buildResults() s.currentQuery.buildResults()
if s.currentQuery.isError() { if s.currentQuery.isError() {
return nil, s.currentQuery.err return nil, s.currentQuery.err
} else {
return s.currentQuery.results, nil
} }
return s.currentQuery.results, nil
} }
func (s *Session) ClearJson() { func (s *Session) ClearJSON() {
// Since we create a new Query underneath every query, clearing isn't necessary. // Since we create a new Query underneath every query, clearing isn't necessary.
return return
} }

View file

@ -32,14 +32,14 @@ type Session interface {
ToggleDebug() ToggleDebug()
} }
type HttpSession interface { type HTTP interface {
// Return whether the string is a valid expression. // Return whether the string is a valid expression.
InputParses(string) (ParseResult, error) InputParses(string) (ParseResult, error)
// Runs the query and returns individual results on the channel. // Runs the query and returns individual results on the channel.
ExecInput(string, chan interface{}, int) ExecInput(string, chan interface{}, int)
GetQuery(string, chan map[string]interface{}) GetQuery(string, chan map[string]interface{})
BuildJson(interface{}) BuildJSON(interface{})
GetJson() ([]interface{}, error) GetJSON() ([]interface{}, error)
ClearJson() ClearJSON()
ToggleDebug() ToggleDebug()
} }

View file

@ -53,7 +53,7 @@ func (s *Session) InputParses(input string) (query.ParseResult, error) {
if (i - 10) > min { if (i - 10) > min {
min = i - 10 min = i - 10
} }
return query.ParseFail, errors.New(fmt.Sprintf("Too many close parens at char %d: %s", i, input[min:i])) return query.ParseFail, fmt.Errorf("too many close parentheses at char %d: %s", i, input[min:i])
} }
} }
} }
@ -63,7 +63,7 @@ func (s *Session) InputParses(input string) (query.ParseResult, error) {
if len(ParseString(input)) > 0 { if len(ParseString(input)) > 0 {
return query.Parsed, nil return query.Parsed, nil
} }
return query.ParseFail, errors.New("Invalid Syntax") return query.ParseFail, errors.New("invalid syntax")
} }
func (s *Session) ExecInput(input string, out chan interface{}, limit int) { func (s *Session) ExecInput(input string, out chan interface{}, limit int) {