Combine AND(Fixed, SQL) into a single IN clause, reducing roundtrips. Add a test and benchmark.
This commit is contained in:
parent
99283d5412
commit
277fc748e3
4 changed files with 74 additions and 11 deletions
|
|
@ -156,9 +156,12 @@ func (qs *QuadStore) optimizeLinksTo(it *iterator.LinksTo) (graph.Iterator, bool
|
||||||
switch primary.Type() {
|
switch primary.Type() {
|
||||||
case graph.Fixed:
|
case graph.Fixed:
|
||||||
size, _ := primary.Size()
|
size, _ := primary.Size()
|
||||||
|
if size == 0 {
|
||||||
|
return iterator.NewNull(), true
|
||||||
|
}
|
||||||
if size == 1 {
|
if size == 1 {
|
||||||
if !graph.Next(primary) {
|
if !graph.Next(primary) {
|
||||||
panic("unexpected size during optimize")
|
panic("sql: unexpected size during optimize")
|
||||||
}
|
}
|
||||||
val := primary.Result()
|
val := primary.Result()
|
||||||
newIt := qs.QuadIterator(it.Direction(), val)
|
newIt := qs.QuadIterator(it.Direction(), val)
|
||||||
|
|
@ -238,26 +241,57 @@ func (qs *QuadStore) optimizeAnd(it *iterator.And) (graph.Iterator, bool) {
|
||||||
changed := false
|
changed := false
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
for _, it := range subs {
|
// Combine SQL iterators
|
||||||
if it.Type() == sqlType {
|
glog.V(4).Infof("Combining SQL %#v", subs)
|
||||||
|
for _, subit := range subs {
|
||||||
|
if subit.Type() == sqlType {
|
||||||
if newit == nil {
|
if newit == nil {
|
||||||
newit = it.(*SQLIterator)
|
newit = subit.(*SQLIterator)
|
||||||
} else {
|
} else {
|
||||||
changed = true
|
changed = true
|
||||||
newit, err = intersect(newit.sql, it.(*SQLIterator).sql, qs)
|
newit, err = intersect(newit.sql, subit.(*SQLIterator).sql, qs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
return it, false
|
return it, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
unusedIts = append(unusedIts, it)
|
unusedIts = append(unusedIts, subit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if newit == nil {
|
||||||
|
return it, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Combine fixed iterators into the SQL iterators.
|
||||||
|
glog.V(4).Infof("Combining fixed %#v", unusedIts)
|
||||||
|
var nodeit *SQLNodeIterator
|
||||||
|
if n, ok := newit.sql.(*SQLNodeIterator); ok {
|
||||||
|
nodeit = n
|
||||||
|
} else if n, ok := newit.sql.(*SQLNodeIntersection); ok {
|
||||||
|
nodeit = n.nodeIts[0].(*SQLNodeIterator)
|
||||||
|
}
|
||||||
|
if nodeit != nil {
|
||||||
|
passOneIts := unusedIts
|
||||||
|
unusedIts = nil
|
||||||
|
for _, subit := range passOneIts {
|
||||||
|
if subit.Type() != graph.Fixed {
|
||||||
|
unusedIts = append(unusedIts, subit)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
changed = true
|
||||||
|
for graph.Next(subit) {
|
||||||
|
nodeit.fixedSet = append(nodeit.fixedSet, qs.NameOf(subit.Result()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !changed {
|
if !changed {
|
||||||
return it, false
|
return it, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clean up if we're done.
|
||||||
if len(unusedIts) == 0 {
|
if len(unusedIts) == 0 {
|
||||||
newit.Tagger().CopyFrom(it)
|
newit.Tagger().CopyFrom(it)
|
||||||
return newit, true
|
return newit, true
|
||||||
|
|
|
||||||
|
|
@ -224,9 +224,8 @@ func (l *SQLLinkIterator) buildWhere() (string, []string) {
|
||||||
q = append(q, fmt.Sprintf("%s.%s_hash = ?", l.tableName, c.dir))
|
q = append(q, fmt.Sprintf("%s.%s_hash = ?", l.tableName, c.dir))
|
||||||
vals = append(vals, hashOf(c.vals[0]))
|
vals = append(vals, hashOf(c.vals[0]))
|
||||||
} else if len(c.vals) > 1 {
|
} else if len(c.vals) > 1 {
|
||||||
subq := fmt.Sprintf("%s.%s_hash IN ", l.tableName, c.dir)
|
|
||||||
valslots := strings.Join(strings.Split(strings.Repeat("?", len(c.vals)), ""), ", ")
|
valslots := strings.Join(strings.Split(strings.Repeat("?", len(c.vals)), ""), ", ")
|
||||||
subq += fmt.Sprintf("(%s)", valslots)
|
subq := fmt.Sprintf("%s.%s_hash IN (%s)", l.tableName, c.dir, valslots)
|
||||||
q = append(q, subq)
|
q = append(q, subq)
|
||||||
for _, v := range c.vals {
|
for _, v := range c.vals {
|
||||||
vals = append(vals, hashOf(v))
|
vals = append(vals, hashOf(v))
|
||||||
|
|
|
||||||
|
|
@ -46,9 +46,10 @@ func newNodeTableName() string {
|
||||||
type SQLNodeIterator struct {
|
type SQLNodeIterator struct {
|
||||||
tableName string
|
tableName string
|
||||||
|
|
||||||
linkIt sqlItDir
|
linkIt sqlItDir
|
||||||
size int64
|
size int64
|
||||||
tagger graph.Tagger
|
tagger graph.Tagger
|
||||||
|
fixedSet []string
|
||||||
|
|
||||||
result string
|
result string
|
||||||
}
|
}
|
||||||
|
|
@ -61,8 +62,10 @@ func (n *SQLNodeIterator) sqlClone() sqlIterator {
|
||||||
dir: n.linkIt.dir,
|
dir: n.linkIt.dir,
|
||||||
it: n.linkIt.it.sqlClone(),
|
it: n.linkIt.it.sqlClone(),
|
||||||
},
|
},
|
||||||
|
fixedSet: make([]string, len(n.fixedSet)),
|
||||||
}
|
}
|
||||||
m.tagger.CopyFromTagger(n.Tagger())
|
m.tagger.CopyFromTagger(n.Tagger())
|
||||||
|
copy(m.fixedSet, n.fixedSet)
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -157,6 +160,15 @@ func (n *SQLNodeIterator) buildWhere() (string, []string) {
|
||||||
q = append(q, s)
|
q = append(q, s)
|
||||||
vals = append(vals, v...)
|
vals = append(vals, v...)
|
||||||
}
|
}
|
||||||
|
if len(n.fixedSet) != 0 {
|
||||||
|
topData := n.tableID()
|
||||||
|
var valueChain []string
|
||||||
|
for _, v := range n.fixedSet {
|
||||||
|
vals = append(vals, hashOf(v))
|
||||||
|
valueChain = append(valueChain, "?")
|
||||||
|
}
|
||||||
|
q = append(q, fmt.Sprintf("%s.%s_hash IN (%s)", topData.table, topData.dir, strings.Join(valueChain, ", ")))
|
||||||
|
}
|
||||||
query := strings.Join(q, " AND ")
|
query := strings.Join(q, " AND ")
|
||||||
return query, vals
|
return query, vals
|
||||||
}
|
}
|
||||||
|
|
@ -193,6 +205,7 @@ func (n *SQLNodeIterator) buildSQL(next bool, val graph.Value) (string, []string
|
||||||
constraint += fmt.Sprintf("%s.%s_hash = ?", topData.table, topData.dir)
|
constraint += fmt.Sprintf("%s.%s_hash = ?", topData.table, topData.dir)
|
||||||
values = append(values, hashOf(v))
|
values = append(values, hashOf(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
query += constraint
|
query += constraint
|
||||||
query += ";"
|
query += ";"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -394,6 +394,19 @@ var benchmarkQueries = []struct {
|
||||||
map[string]string{"costar1_actor": "Sandra Bullock", "costar1_movie": "In Love and War", "costar2_actor": "Keanu Reeves", "costar2_movie": "The Lake House", "id": "Sandra Bullock"},
|
map[string]string{"costar1_actor": "Sandra Bullock", "costar1_movie": "In Love and War", "costar2_actor": "Keanu Reeves", "costar2_movie": "The Lake House", "id": "Sandra Bullock"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
message: "Save a number of predicates around a set of nodes",
|
||||||
|
query: `
|
||||||
|
g.V("_:9037", "_:49278", "_:44112", "_:44709", "_:43382").Save("/film/performance/character", "char").Save("/film/performance/actor", "act").SaveR("/film/film/starring", "film").All()
|
||||||
|
`,
|
||||||
|
expect: []interface{}{
|
||||||
|
map[string]string{"act": "/en/humphrey_bogart", "char": "Rick Blaine", "film": "/en/casablanca_1942", "id": "_:9037"},
|
||||||
|
map[string]string{"act": "/en/humphrey_bogart", "char": "Sam Spade", "film": "/en/the_maltese_falcon_1941", "id": "_:49278"},
|
||||||
|
map[string]string{"act": "/en/humphrey_bogart", "char": "Philip Marlowe", "film": "/en/the_big_sleep_1946", "id": "_:44112"},
|
||||||
|
map[string]string{"act": "/en/humphrey_bogart", "char": "Captain Queeg", "film": "/en/the_caine_mutiny_1954", "id": "_:44709"},
|
||||||
|
map[string]string{"act": "/en/humphrey_bogart", "char": "Charlie Allnut", "film": "/en/the_african_queen", "id": "_:43382"},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
const common = `
|
const common = `
|
||||||
|
|
@ -666,6 +679,10 @@ func BenchmarkKeanuBullockOther(b *testing.B) {
|
||||||
runBench(10, b)
|
runBench(10, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkSaveBogartPerformances(b *testing.B) {
|
||||||
|
runBench(11, b)
|
||||||
|
}
|
||||||
|
|
||||||
// reader is a test helper to filter non-io.Reader methods from the contained io.Reader.
|
// reader is a test helper to filter non-io.Reader methods from the contained io.Reader.
|
||||||
type reader struct {
|
type reader struct {
|
||||||
r io.Reader
|
r io.Reader
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue