Gisle Aune
5 years ago
24 changed files with 1078 additions and 95 deletions
-
43database/drivers/mysqldriver/projects.go
-
1database/repositories/projectrepository.go
-
1go.mod
-
6graph/graph.go
-
58graph/loaders/activityloader.go
-
203graph/loaders/activityloader_gen.go
-
18graph/loaders/context.go
-
54graph/loaders/issueloader.go
-
203graph/loaders/issueloader_gen.go
-
46graph/loaders/logsbyissueloader.go
-
204graph/loaders/logsbyissueloader_gen.go
-
56graph/loaders/projectpermissionloader.go
-
23graph/loaders/projectpermissionloader_gen.go
-
8graph/loaders/userloader.go
-
14graph/loaders/userloader_gen.go
-
4graph/resolvers/issue.resolvers.go
-
30graph/resolvers/issueitem.resolvers.go
-
46graph/resolvers/issuetask.resolvers.go
-
5graph/resolvers/log.resolvers.go
-
29graph/resolvers/mutation.resolvers.go
-
16graph/resolvers/project.resolvers.go
-
17graph/resolvers/query.resolvers.go
-
66models/log.go
-
22services/auth.go
@ -0,0 +1,58 @@ |
|||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"git.aiterp.net/stufflog/server/database/repositories" |
||||
|
"git.aiterp.net/stufflog/server/internal/slerrors" |
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// go run github.com/vektah/dataloaden ActivityLoader string \*git.aiterp.net/stufflog/server/models.Activity
|
||||
|
|
||||
|
var activityLoaderCtxKey = "ctx.stufflog.ActivityLoader" |
||||
|
|
||||
|
func ActivityLoaderFromContext(ctx context.Context) *ActivityLoader { |
||||
|
return ctx.Value(activityLoaderCtxKey).(*ActivityLoader) |
||||
|
} |
||||
|
|
||||
|
func NewActivityLoader(ctx context.Context, activityRepo repositories.ActivityRepository) *ActivityLoader { |
||||
|
return &ActivityLoader{ |
||||
|
fetch: func(keys []string) ([]*models.Activity, []error) { |
||||
|
results := make([]*models.Activity, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
|
||||
|
activities, err := activityRepo.List(ctx, models.ActivityFilter{ActivityIDs: keys}) |
||||
|
if err != nil { |
||||
|
for i := range errors { |
||||
|
errors[i] = err |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
} |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
found := false |
||||
|
for j, activity := range activities { |
||||
|
if activity.ID == key { |
||||
|
found = true |
||||
|
results[i] = activity |
||||
|
|
||||
|
activities[j] = activities[len(activities)-1] |
||||
|
activities = activities[:len(activities)-1] |
||||
|
|
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if !found { |
||||
|
errors[i] = slerrors.NotFound("Activity") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
}, |
||||
|
wait: time.Millisecond / 2, |
||||
|
maxBatch: 16, |
||||
|
} |
||||
|
} |
@ -0,0 +1,203 @@ |
|||||
|
// Code generated by github.com/vektah/dataloaden, DO EDIT.
|
||||
|
|
||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
) |
||||
|
|
||||
|
// ActivityLoader batches and caches requests
|
||||
|
type ActivityLoader struct { |
||||
|
// this method provides the data for the loader
|
||||
|
fetch func(keys []string) ([]*models.Activity, []error) |
||||
|
|
||||
|
// how long to done before sending a batch
|
||||
|
wait time.Duration |
||||
|
|
||||
|
// this will limit the maximum number of keys to send in one batch, 0 = no limit
|
||||
|
maxBatch int |
||||
|
|
||||
|
// INTERNAL
|
||||
|
|
||||
|
// lazily created cache
|
||||
|
cache map[string]*models.Activity |
||||
|
|
||||
|
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
|
// then everything will be sent to the fetch method and out to the listeners
|
||||
|
batch *activityLoaderBatch |
||||
|
|
||||
|
// mutex to prevent races
|
||||
|
mu sync.Mutex |
||||
|
} |
||||
|
|
||||
|
type activityLoaderBatch struct { |
||||
|
keys []string |
||||
|
data []*models.Activity |
||||
|
error []error |
||||
|
closing bool |
||||
|
done chan struct{} |
||||
|
} |
||||
|
|
||||
|
// Load a Activity by key, batching and caching will be applied automatically
|
||||
|
func (l *ActivityLoader) Load(key string) (*models.Activity, error) { |
||||
|
return l.LoadThunk(key)() |
||||
|
} |
||||
|
|
||||
|
// LoadThunk returns a function that when called will block waiting for a Activity.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *ActivityLoader) LoadThunk(key string) func() (*models.Activity, error) { |
||||
|
l.mu.Lock() |
||||
|
if it, ok := l.cache[key]; ok { |
||||
|
l.mu.Unlock() |
||||
|
return func() (*models.Activity, error) { |
||||
|
return it, nil |
||||
|
} |
||||
|
} |
||||
|
if l.batch == nil { |
||||
|
l.batch = &activityLoaderBatch{done: make(chan struct{})} |
||||
|
} |
||||
|
batch := l.batch |
||||
|
pos := batch.keyIndex(l, key) |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
return func() (*models.Activity, error) { |
||||
|
<-batch.done |
||||
|
|
||||
|
var data *models.Activity |
||||
|
if pos < len(batch.data) { |
||||
|
data = batch.data[pos] |
||||
|
} |
||||
|
|
||||
|
var err error |
||||
|
// its convenient to be able to return a single error for everything
|
||||
|
if len(batch.error) == 1 { |
||||
|
err = batch.error[0] |
||||
|
} else if batch.error != nil { |
||||
|
err = batch.error[pos] |
||||
|
} |
||||
|
|
||||
|
if err == nil { |
||||
|
l.mu.Lock() |
||||
|
l.unsafeSet(key, data) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
return data, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
|
// sub batches depending on how the loader is configured
|
||||
|
func (l *ActivityLoader) LoadAll(keys []string) ([]*models.Activity, []error) { |
||||
|
results := make([]func() (*models.Activity, error), len(keys)) |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
|
||||
|
activitys := make([]*models.Activity, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
activitys[i], errors[i] = thunk() |
||||
|
} |
||||
|
return activitys, errors |
||||
|
} |
||||
|
|
||||
|
// LoadAllThunk returns a function that when called will block waiting for a Activitys.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *ActivityLoader) LoadAllThunk(keys []string) func() ([]*models.Activity, []error) { |
||||
|
results := make([]func() (*models.Activity, error), len(keys)) |
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
return func() ([]*models.Activity, []error) { |
||||
|
activitys := make([]*models.Activity, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
activitys[i], errors[i] = thunk() |
||||
|
} |
||||
|
return activitys, errors |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
|
// and false is returned.
|
||||
|
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
|
func (l *ActivityLoader) Prime(key string, value *models.Activity) bool { |
||||
|
l.mu.Lock() |
||||
|
var found bool |
||||
|
if _, found = l.cache[key]; !found { |
||||
|
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
|
||||
|
// and end up with the whole cache pointing to the same value.
|
||||
|
cpy := *value |
||||
|
l.unsafeSet(key, &cpy) |
||||
|
} |
||||
|
l.mu.Unlock() |
||||
|
return !found |
||||
|
} |
||||
|
|
||||
|
// Clear the value at key from the cache, if it exists
|
||||
|
func (l *ActivityLoader) Clear(key string) { |
||||
|
l.mu.Lock() |
||||
|
delete(l.cache, key) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (l *ActivityLoader) unsafeSet(key string, value *models.Activity) { |
||||
|
if l.cache == nil { |
||||
|
l.cache = map[string]*models.Activity{} |
||||
|
} |
||||
|
l.cache[key] = value |
||||
|
} |
||||
|
|
||||
|
// keyIndex will return the location of the key in the batch, if its not found
|
||||
|
// it will add the key to the batch
|
||||
|
func (b *activityLoaderBatch) keyIndex(l *ActivityLoader, key string) int { |
||||
|
for i, existingKey := range b.keys { |
||||
|
if key == existingKey { |
||||
|
return i |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
pos := len(b.keys) |
||||
|
b.keys = append(b.keys, key) |
||||
|
if pos == 0 { |
||||
|
go b.startTimer(l) |
||||
|
} |
||||
|
|
||||
|
if l.maxBatch != 0 && pos >= l.maxBatch-1 { |
||||
|
if !b.closing { |
||||
|
b.closing = true |
||||
|
l.batch = nil |
||||
|
go b.end(l) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return pos |
||||
|
} |
||||
|
|
||||
|
func (b *activityLoaderBatch) startTimer(l *ActivityLoader) { |
||||
|
time.Sleep(l.wait) |
||||
|
l.mu.Lock() |
||||
|
|
||||
|
// we must have hit a batch limit and are already finalizing this batch
|
||||
|
if b.closing { |
||||
|
l.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
l.batch = nil |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
b.end(l) |
||||
|
} |
||||
|
|
||||
|
func (b *activityLoaderBatch) end(l *ActivityLoader) { |
||||
|
b.data, b.error = l.fetch(b.keys) |
||||
|
close(b.done) |
||||
|
} |
@ -0,0 +1,18 @@ |
|||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
|
||||
|
"git.aiterp.net/stufflog/server/database" |
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
) |
||||
|
|
||||
|
func ContextWithLoaders(ctx context.Context, user *models.User, database database.Database) context.Context { |
||||
|
ctx = context.WithValue(ctx, logsByIssueLoaderCtxKey, NewLogsByIssueLoader(ctx, database.Logs())) |
||||
|
ctx = context.WithValue(ctx, userLoaderCtxKey, NewUserLoader(ctx, database.Users())) |
||||
|
ctx = context.WithValue(ctx, projectPermissionCtxKey, NewProjectPermissionLoader(ctx, user, database.Projects())) |
||||
|
ctx = context.WithValue(ctx, issueLoaderCtxKey, NewIssueLoader(ctx, database.Issues())) |
||||
|
ctx = context.WithValue(ctx, activityLoaderCtxKey, NewActivityLoader(ctx, database.Activities())) |
||||
|
|
||||
|
return ctx |
||||
|
} |
@ -0,0 +1,54 @@ |
|||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"git.aiterp.net/stufflog/server/database/repositories" |
||||
|
"git.aiterp.net/stufflog/server/internal/slerrors" |
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// go run github.com/vektah/dataloaden IssueLoader string \*git.aiterp.net/stufflog/server/models.Issue
|
||||
|
|
||||
|
var issueLoaderCtxKey = "ctx.stufflog.IssueLoader" |
||||
|
|
||||
|
func IssueLoaderFromContext(ctx context.Context) *IssueLoader { |
||||
|
return ctx.Value(issueLoaderCtxKey).(*IssueLoader) |
||||
|
} |
||||
|
|
||||
|
func NewIssueLoader(ctx context.Context, issueRepo repositories.IssueRepository) *IssueLoader { |
||||
|
return &IssueLoader{ |
||||
|
fetch: func(keys []string) ([]*models.Issue, []error) { |
||||
|
results := make([]*models.Issue, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
|
||||
|
issues, err := issueRepo.List(ctx, models.IssueFilter{IssueIDs: keys}) |
||||
|
if err != nil { |
||||
|
for i := range errors { |
||||
|
errors[i] = err |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
} |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
found := false |
||||
|
for _, issue := range issues { |
||||
|
if issue.ID == key { |
||||
|
found = true |
||||
|
results[i] = issue |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if !found { |
||||
|
errors[i] = slerrors.NotFound("Issue") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
}, |
||||
|
wait: time.Millisecond / 2, |
||||
|
maxBatch: 16, |
||||
|
} |
||||
|
} |
@ -0,0 +1,203 @@ |
|||||
|
// Code generated by github.com/vektah/dataloaden, DO EDIT.
|
||||
|
|
||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
) |
||||
|
|
||||
|
// IssueLoader batches and caches requests
|
||||
|
type IssueLoader struct { |
||||
|
// this method provides the data for the loader
|
||||
|
fetch func(keys []string) ([]*models.Issue, []error) |
||||
|
|
||||
|
// how long to done before sending a batch
|
||||
|
wait time.Duration |
||||
|
|
||||
|
// this will limit the maximum number of keys to send in one batch, 0 = no limit
|
||||
|
maxBatch int |
||||
|
|
||||
|
// INTERNAL
|
||||
|
|
||||
|
// lazily created cache
|
||||
|
cache map[string]*models.Issue |
||||
|
|
||||
|
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
|
// then everything will be sent to the fetch method and out to the listeners
|
||||
|
batch *issueLoaderBatch |
||||
|
|
||||
|
// mutex to prevent races
|
||||
|
mu sync.Mutex |
||||
|
} |
||||
|
|
||||
|
type issueLoaderBatch struct { |
||||
|
keys []string |
||||
|
data []*models.Issue |
||||
|
error []error |
||||
|
closing bool |
||||
|
done chan struct{} |
||||
|
} |
||||
|
|
||||
|
// Load a Issue by key, batching and caching will be applied automatically
|
||||
|
func (l *IssueLoader) Load(key string) (*models.Issue, error) { |
||||
|
return l.LoadThunk(key)() |
||||
|
} |
||||
|
|
||||
|
// LoadThunk returns a function that when called will block waiting for a Issue.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *IssueLoader) LoadThunk(key string) func() (*models.Issue, error) { |
||||
|
l.mu.Lock() |
||||
|
if it, ok := l.cache[key]; ok { |
||||
|
l.mu.Unlock() |
||||
|
return func() (*models.Issue, error) { |
||||
|
return it, nil |
||||
|
} |
||||
|
} |
||||
|
if l.batch == nil { |
||||
|
l.batch = &issueLoaderBatch{done: make(chan struct{})} |
||||
|
} |
||||
|
batch := l.batch |
||||
|
pos := batch.keyIndex(l, key) |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
return func() (*models.Issue, error) { |
||||
|
<-batch.done |
||||
|
|
||||
|
var data *models.Issue |
||||
|
if pos < len(batch.data) { |
||||
|
data = batch.data[pos] |
||||
|
} |
||||
|
|
||||
|
var err error |
||||
|
// its convenient to be able to return a single error for everything
|
||||
|
if len(batch.error) == 1 { |
||||
|
err = batch.error[0] |
||||
|
} else if batch.error != nil { |
||||
|
err = batch.error[pos] |
||||
|
} |
||||
|
|
||||
|
if err == nil { |
||||
|
l.mu.Lock() |
||||
|
l.unsafeSet(key, data) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
return data, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
|
// sub batches depending on how the loader is configured
|
||||
|
func (l *IssueLoader) LoadAll(keys []string) ([]*models.Issue, []error) { |
||||
|
results := make([]func() (*models.Issue, error), len(keys)) |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
|
||||
|
issues := make([]*models.Issue, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
issues[i], errors[i] = thunk() |
||||
|
} |
||||
|
return issues, errors |
||||
|
} |
||||
|
|
||||
|
// LoadAllThunk returns a function that when called will block waiting for a Issues.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *IssueLoader) LoadAllThunk(keys []string) func() ([]*models.Issue, []error) { |
||||
|
results := make([]func() (*models.Issue, error), len(keys)) |
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
return func() ([]*models.Issue, []error) { |
||||
|
issues := make([]*models.Issue, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
issues[i], errors[i] = thunk() |
||||
|
} |
||||
|
return issues, errors |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
|
// and false is returned.
|
||||
|
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
|
func (l *IssueLoader) Prime(key string, value *models.Issue) bool { |
||||
|
l.mu.Lock() |
||||
|
var found bool |
||||
|
if _, found = l.cache[key]; !found { |
||||
|
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
|
||||
|
// and end up with the whole cache pointing to the same value.
|
||||
|
cpy := *value |
||||
|
l.unsafeSet(key, &cpy) |
||||
|
} |
||||
|
l.mu.Unlock() |
||||
|
return !found |
||||
|
} |
||||
|
|
||||
|
// Clear the value at key from the cache, if it exists
|
||||
|
func (l *IssueLoader) Clear(key string) { |
||||
|
l.mu.Lock() |
||||
|
delete(l.cache, key) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (l *IssueLoader) unsafeSet(key string, value *models.Issue) { |
||||
|
if l.cache == nil { |
||||
|
l.cache = map[string]*models.Issue{} |
||||
|
} |
||||
|
l.cache[key] = value |
||||
|
} |
||||
|
|
||||
|
// keyIndex will return the location of the key in the batch, if its not found
|
||||
|
// it will add the key to the batch
|
||||
|
func (b *issueLoaderBatch) keyIndex(l *IssueLoader, key string) int { |
||||
|
for i, existingKey := range b.keys { |
||||
|
if key == existingKey { |
||||
|
return i |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
pos := len(b.keys) |
||||
|
b.keys = append(b.keys, key) |
||||
|
if pos == 0 { |
||||
|
go b.startTimer(l) |
||||
|
} |
||||
|
|
||||
|
if l.maxBatch != 0 && pos >= l.maxBatch-1 { |
||||
|
if !b.closing { |
||||
|
b.closing = true |
||||
|
l.batch = nil |
||||
|
go b.end(l) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return pos |
||||
|
} |
||||
|
|
||||
|
func (b *issueLoaderBatch) startTimer(l *IssueLoader) { |
||||
|
time.Sleep(l.wait) |
||||
|
l.mu.Lock() |
||||
|
|
||||
|
// we must have hit a batch limit and are already finalizing this batch
|
||||
|
if b.closing { |
||||
|
l.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
l.batch = nil |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
b.end(l) |
||||
|
} |
||||
|
|
||||
|
func (b *issueLoaderBatch) end(l *IssueLoader) { |
||||
|
b.data, b.error = l.fetch(b.keys) |
||||
|
close(b.done) |
||||
|
} |
@ -0,0 +1,46 @@ |
|||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"git.aiterp.net/stufflog/server/database/repositories" |
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// go run github.com/vektah/dataloaden LogsByIssueLoader string []\*git.aiterp.net/stufflog/server/models.Log
|
||||
|
|
||||
|
var logsByIssueLoaderCtxKey = "ctx.stufflog.IssuesByLogLoader" |
||||
|
|
||||
|
func LogsByIssueLoaderFromContext(ctx context.Context) *LogsByIssueLoader { |
||||
|
return ctx.Value(logsByIssueLoaderCtxKey).(*LogsByIssueLoader) |
||||
|
} |
||||
|
|
||||
|
func NewLogsByIssueLoader(ctx context.Context, logsRepo repositories.LogRepository) *LogsByIssueLoader { |
||||
|
return &LogsByIssueLoader{ |
||||
|
fetch: func(keys []string) ([][]*models.Log, []error) { |
||||
|
results := make([][]*models.Log, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
|
||||
|
logs, err := logsRepo.List(ctx, models.LogFilter{IssueIDs: keys}) |
||||
|
if err != nil { |
||||
|
for i := range errors { |
||||
|
errors[i] = err |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
} |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
for _, log := range logs { |
||||
|
if log.MatchesIssue(key) { |
||||
|
results[i] = append(results[i], log) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return results, nil |
||||
|
}, |
||||
|
wait: time.Millisecond / 2, |
||||
|
maxBatch: 16, |
||||
|
} |
||||
|
} |
@ -0,0 +1,204 @@ |
|||||
|
// Code generated by github.com/vektah/dataloaden, DO EDIT.
|
||||
|
|
||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
) |
||||
|
|
||||
|
// LogsByIssueLoader batches and caches requests
|
||||
|
type LogsByIssueLoader struct { |
||||
|
// this method provides the data for the loader
|
||||
|
fetch func(keys []string) ([][]*models.Log, []error) |
||||
|
|
||||
|
// how long to done before sending a batch
|
||||
|
wait time.Duration |
||||
|
|
||||
|
// this will limit the maximum number of keys to send in one batch, 0 = no limit
|
||||
|
maxBatch int |
||||
|
|
||||
|
// INTERNAL
|
||||
|
|
||||
|
// lazily created cache
|
||||
|
cache map[string][]*models.Log |
||||
|
|
||||
|
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
|
// then everything will be sent to the fetch method and out to the listeners
|
||||
|
batch *logsByIssueLoaderBatch |
||||
|
|
||||
|
// mutex to prevent races
|
||||
|
mu sync.Mutex |
||||
|
} |
||||
|
|
||||
|
type logsByIssueLoaderBatch struct { |
||||
|
keys []string |
||||
|
data [][]*models.Log |
||||
|
error []error |
||||
|
closing bool |
||||
|
done chan struct{} |
||||
|
} |
||||
|
|
||||
|
// Load a Log by key, batching and caching will be applied automatically
|
||||
|
func (l *LogsByIssueLoader) Load(key string) ([]*models.Log, error) { |
||||
|
return l.LoadThunk(key)() |
||||
|
} |
||||
|
|
||||
|
// LoadThunk returns a function that when called will block waiting for a Log.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *LogsByIssueLoader) LoadThunk(key string) func() ([]*models.Log, error) { |
||||
|
l.mu.Lock() |
||||
|
if it, ok := l.cache[key]; ok { |
||||
|
l.mu.Unlock() |
||||
|
return func() ([]*models.Log, error) { |
||||
|
return it, nil |
||||
|
} |
||||
|
} |
||||
|
if l.batch == nil { |
||||
|
l.batch = &logsByIssueLoaderBatch{done: make(chan struct{})} |
||||
|
} |
||||
|
batch := l.batch |
||||
|
pos := batch.keyIndex(l, key) |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
return func() ([]*models.Log, error) { |
||||
|
<-batch.done |
||||
|
|
||||
|
var data []*models.Log |
||||
|
if pos < len(batch.data) { |
||||
|
data = batch.data[pos] |
||||
|
} |
||||
|
|
||||
|
var err error |
||||
|
// its convenient to be able to return a single error for everything
|
||||
|
if len(batch.error) == 1 { |
||||
|
err = batch.error[0] |
||||
|
} else if batch.error != nil { |
||||
|
err = batch.error[pos] |
||||
|
} |
||||
|
|
||||
|
if err == nil { |
||||
|
l.mu.Lock() |
||||
|
l.unsafeSet(key, data) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
return data, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
|
// sub batches depending on how the loader is configured
|
||||
|
func (l *LogsByIssueLoader) LoadAll(keys []string) ([][]*models.Log, []error) { |
||||
|
results := make([]func() ([]*models.Log, error), len(keys)) |
||||
|
|
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
|
||||
|
logs := make([][]*models.Log, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
logs[i], errors[i] = thunk() |
||||
|
} |
||||
|
return logs, errors |
||||
|
} |
||||
|
|
||||
|
// LoadAllThunk returns a function that when called will block waiting for a Logs.
|
||||
|
// This method should be used if you want one goroutine to make requests to many
|
||||
|
// different data loaders without blocking until the thunk is called.
|
||||
|
func (l *LogsByIssueLoader) LoadAllThunk(keys []string) func() ([][]*models.Log, []error) { |
||||
|
results := make([]func() ([]*models.Log, error), len(keys)) |
||||
|
for i, key := range keys { |
||||
|
results[i] = l.LoadThunk(key) |
||||
|
} |
||||
|
return func() ([][]*models.Log, []error) { |
||||
|
logs := make([][]*models.Log, len(keys)) |
||||
|
errors := make([]error, len(keys)) |
||||
|
for i, thunk := range results { |
||||
|
logs[i], errors[i] = thunk() |
||||
|
} |
||||
|
return logs, errors |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
|
// and false is returned.
|
||||
|
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
|
func (l *LogsByIssueLoader) Prime(key string, value []*models.Log) bool { |
||||
|
l.mu.Lock() |
||||
|
var found bool |
||||
|
if _, found = l.cache[key]; !found { |
||||
|
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
|
||||
|
// and end up with the whole cache pointing to the same value.
|
||||
|
cpy := make([]*models.Log, len(value)) |
||||
|
copy(cpy, value) |
||||
|
l.unsafeSet(key, cpy) |
||||
|
} |
||||
|
l.mu.Unlock() |
||||
|
return !found |
||||
|
} |
||||
|
|
||||
|
// Clear the value at key from the cache, if it exists
|
||||
|
func (l *LogsByIssueLoader) Clear(key string) { |
||||
|
l.mu.Lock() |
||||
|
delete(l.cache, key) |
||||
|
l.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (l *LogsByIssueLoader) unsafeSet(key string, value []*models.Log) { |
||||
|
if l.cache == nil { |
||||
|
l.cache = map[string][]*models.Log{} |
||||
|
} |
||||
|
l.cache[key] = value |
||||
|
} |
||||
|
|
||||
|
// keyIndex will return the location of the key in the batch, if its not found
|
||||
|
// it will add the key to the batch
|
||||
|
func (b *logsByIssueLoaderBatch) keyIndex(l *LogsByIssueLoader, key string) int { |
||||
|
for i, existingKey := range b.keys { |
||||
|
if key == existingKey { |
||||
|
return i |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
pos := len(b.keys) |
||||
|
b.keys = append(b.keys, key) |
||||
|
if pos == 0 { |
||||
|
go b.startTimer(l) |
||||
|
} |
||||
|
|
||||
|
if l.maxBatch != 0 && pos >= l.maxBatch-1 { |
||||
|
if !b.closing { |
||||
|
b.closing = true |
||||
|
l.batch = nil |
||||
|
go b.end(l) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return pos |
||||
|
} |
||||
|
|
||||
|
func (b *logsByIssueLoaderBatch) startTimer(l *LogsByIssueLoader) { |
||||
|
time.Sleep(l.wait) |
||||
|
l.mu.Lock() |
||||
|
|
||||
|
// we must have hit a batch limit and are already finalizing this batch
|
||||
|
if b.closing { |
||||
|
l.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
l.batch = nil |
||||
|
l.mu.Unlock() |
||||
|
|
||||
|
b.end(l) |
||||
|
} |
||||
|
|
||||
|
func (b *logsByIssueLoaderBatch) end(l *LogsByIssueLoader) { |
||||
|
b.data, b.error = l.fetch(b.keys) |
||||
|
close(b.done) |
||||
|
} |
@ -0,0 +1,56 @@ |
|||||
|
package loaders |
||||
|
|
||||
|
import ( |
||||
|
"context" |
||||
|
"git.aiterp.net/stufflog/server/database/repositories" |
||||
|
"git.aiterp.net/stufflog/server/models" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
var projectPermissionCtxKey = "ctx.stufflog.ProjectPermissionLoader" |
||||
|
|
||||
|
func ProjectPermissionLoaderFromContext(ctx context.Context) *ProjectPermissionLoader { |
||||
|
return ctx.Value(projectPermissionCtxKey).(*ProjectPermissionLoader) |
||||
|
} |
||||
|
|
||||
|
func NewProjectPermissionLoader(ctx context.Context, user *models.User, projectRepo repositories.ProjectRepository) *ProjectPermissionLoader { |
||||
|
return &ProjectPermissionLoader{ |
||||
|
wait: time.Millisecond / 2, |
||||
|
maxBatch: 16, |
||||
|
fetch: func(keys []string) ([]*models.ProjectPermission, []error) { |
||||
|
errors := make([]error, len(keys)) |
||||
|
results := make([]*models.ProjectPermission, len(keys)) |
||||
|
|
||||
|
if user == nil { |
||||
|
for i, key := range keys { |
||||
|
results[i] = &models.ProjectPermission{ |
||||
|
ProjectID: key, |
||||
|
UserID: "", |
||||
|
Level: models.ProjectPermissionLevelNoAccess, |
||||
|
} |
||||
|
} |
||||
|
return results, errors |
||||
|
} |
||||
|
|
||||
|
indices := make(map[string]int) |
||||
|
for i, key := range keys { |
||||
|
indices[key] = i |
||||
|
} |
||||
|
|
||||
|
permissions, err := projectRepo.GetUserPermissions(ctx, *user, keys) |
||||
|
if err != nil { |
||||
|
for i := range errors { |
||||
|
errors[i] = err |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
} |
||||
|
|
||||
|
for _, permission := range permissions { |
||||
|
results[indices[permission.ProjectID]] = permission |
||||
|
} |
||||
|
|
||||
|
return results, errors |
||||
|
}, |
||||
|
} |
||||
|
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue