Browse Source

add logging and a lot of loaders.

master
Gisle Aune 4 years ago
parent
commit
bd024a01a4
  1. 43
      database/drivers/mysqldriver/projects.go
  2. 1
      database/repositories/projectrepository.go
  3. 1
      go.mod
  4. 6
      graph/graph.go
  5. 58
      graph/loaders/activityloader.go
  6. 203
      graph/loaders/activityloader_gen.go
  7. 18
      graph/loaders/context.go
  8. 54
      graph/loaders/issueloader.go
  9. 203
      graph/loaders/issueloader_gen.go
  10. 46
      graph/loaders/logsbyissueloader.go
  11. 204
      graph/loaders/logsbyissueloader_gen.go
  12. 56
      graph/loaders/projectpermissionloader.go
  13. 23
      graph/loaders/projectpermissionloader_gen.go
  14. 8
      graph/loaders/userloader.go
  15. 14
      graph/loaders/userloader_gen.go
  16. 4
      graph/resolvers/issue.resolvers.go
  17. 30
      graph/resolvers/issueitem.resolvers.go
  18. 46
      graph/resolvers/issuetask.resolvers.go
  19. 5
      graph/resolvers/log.resolvers.go
  20. 29
      graph/resolvers/mutation.resolvers.go
  21. 16
      graph/resolvers/project.resolvers.go
  22. 17
      graph/resolvers/query.resolvers.go
  23. 66
      models/log.go
  24. 22
      services/auth.go

43
database/drivers/mysqldriver/projects.go

@ -107,6 +107,7 @@ func (r *projectRepository) GetPermission(ctx context.Context, project models.Pr
return &models.ProjectPermission{
ProjectID: project.ID,
UserID: user.ID,
Level: models.ProjectPermissionLevelNoAccess,
}, nil
}
@ -116,6 +117,40 @@ func (r *projectRepository) GetPermission(ctx context.Context, project models.Pr
return &permission, nil
}
func (r *projectRepository) GetUserPermissions(ctx context.Context, user models.User, projectIDs []string) ([]*models.ProjectPermission, error) {
query, args, err := sq.Select("*").From("project_permission").Where(sq.Eq{
"user_id": user.ID,
"project_id": projectIDs,
}).ToSql()
if err != nil {
return nil, err
}
permissions := make([]*models.ProjectPermission, 0, 8)
err = r.db.GetContext(ctx, &permissions, query, args...)
if err != nil && err != sql.ErrNoRows {
return nil, err
}
filled := make(map[string]bool, len(permissions))
for _, permission := range permissions {
filled[permission.ProjectID] = true
}
for _, projectID := range projectIDs {
if filled[projectID] {
continue
}
permissions = append(permissions, &models.ProjectPermission{
ProjectID: projectID,
UserID: user.ID,
Level: models.ProjectPermissionLevelNoAccess,
})
}
return permissions, nil
}
func (r *projectRepository) GetIssuePermission(ctx context.Context, issue models.Issue, user models.User) (*models.ProjectPermission, error) {
return r.GetPermission(ctx, models.Project{ID: issue.ProjectID}, user)
}
@ -140,10 +175,10 @@ func (r *projectRepository) Delete(ctx context.Context, project models.Project)
return err
}
//_, err = r.db.ExecContext(ctx, "DELETE FROM project_status WHERE project_id=?", project.ID)
//if err != nil {
// return err
//}
_, err = r.db.ExecContext(ctx, "DELETE FROM project_status WHERE project_id=?", project.ID)
if err != nil {
return err
}
return nil
}

1
database/repositories/projectrepository.go

@ -12,6 +12,7 @@ type ProjectRepository interface {
Save(ctx context.Context, project models.Project) error
ListPermissions(ctx context.Context, project models.Project) ([]*models.ProjectPermission, error)
GetPermission(ctx context.Context, project models.Project, user models.User) (*models.ProjectPermission, error)
GetUserPermissions(ctx context.Context, user models.User, projectIDs []string) ([]*models.ProjectPermission, error)
GetIssuePermission(ctx context.Context, issue models.Issue, user models.User) (*models.ProjectPermission, error)
SetPermission(ctx context.Context, permission models.ProjectPermission) error
Delete(ctx context.Context, project models.Project) error

1
go.mod

@ -18,5 +18,6 @@ require (
github.com/urfave/cli/v2 v2.2.0
github.com/vektah/gqlparser/v2 v2.0.1
golang.org/x/crypto v0.0.0-20200403201458-baeed622b8d8
golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589
gopkg.in/ini.v1 v1.56.0 // indirect
)

6
graph/graph.go

@ -31,7 +31,11 @@ func Gin(bundle services.Bundle, database database.Database) gin.HandlerFunc {
bundle.Auth.CheckGinSession(c)
c.Request = c.Request.WithContext(
loaders.NewUserLoaderContext(c.Request.Context(), database.Users()),
loaders.ContextWithLoaders(
c.Request.Context(),
bundle.Auth.UserFromContext(c.Request.Context()),
database,
),
)
gqlHandler.ServeHTTP(c.Writer, c.Request)

58
graph/loaders/activityloader.go

@ -0,0 +1,58 @@
package loaders
import (
"context"
"git.aiterp.net/stufflog/server/database/repositories"
"git.aiterp.net/stufflog/server/internal/slerrors"
"git.aiterp.net/stufflog/server/models"
"time"
)
// go run github.com/vektah/dataloaden ActivityLoader string \*git.aiterp.net/stufflog/server/models.Activity
var activityLoaderCtxKey = "ctx.stufflog.ActivityLoader"
func ActivityLoaderFromContext(ctx context.Context) *ActivityLoader {
return ctx.Value(activityLoaderCtxKey).(*ActivityLoader)
}
func NewActivityLoader(ctx context.Context, activityRepo repositories.ActivityRepository) *ActivityLoader {
return &ActivityLoader{
fetch: func(keys []string) ([]*models.Activity, []error) {
results := make([]*models.Activity, len(keys))
errors := make([]error, len(keys))
activities, err := activityRepo.List(ctx, models.ActivityFilter{ActivityIDs: keys})
if err != nil {
for i := range errors {
errors[i] = err
}
return results, errors
}
for i, key := range keys {
found := false
for j, activity := range activities {
if activity.ID == key {
found = true
results[i] = activity
activities[j] = activities[len(activities)-1]
activities = activities[:len(activities)-1]
break
}
}
if !found {
errors[i] = slerrors.NotFound("Activity")
}
}
return results, errors
},
wait: time.Millisecond / 2,
maxBatch: 16,
}
}

203
graph/loaders/activityloader_gen.go

@ -0,0 +1,203 @@
// Code generated by github.com/vektah/dataloaden, DO EDIT.
package loaders
import (
"sync"
"time"
"git.aiterp.net/stufflog/server/models"
)
// ActivityLoader batches and caches requests
type ActivityLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*models.Activity, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*models.Activity
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *activityLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type activityLoaderBatch struct {
keys []string
data []*models.Activity
error []error
closing bool
done chan struct{}
}
// Load a Activity by key, batching and caching will be applied automatically
func (l *ActivityLoader) Load(key string) (*models.Activity, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Activity.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ActivityLoader) LoadThunk(key string) func() (*models.Activity, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*models.Activity, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &activityLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*models.Activity, error) {
<-batch.done
var data *models.Activity
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ActivityLoader) LoadAll(keys []string) ([]*models.Activity, []error) {
results := make([]func() (*models.Activity, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
activitys := make([]*models.Activity, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
activitys[i], errors[i] = thunk()
}
return activitys, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Activitys.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ActivityLoader) LoadAllThunk(keys []string) func() ([]*models.Activity, []error) {
results := make([]func() (*models.Activity, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*models.Activity, []error) {
activitys := make([]*models.Activity, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
activitys[i], errors[i] = thunk()
}
return activitys, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ActivityLoader) Prime(key string, value *models.Activity) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ActivityLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ActivityLoader) unsafeSet(key string, value *models.Activity) {
if l.cache == nil {
l.cache = map[string]*models.Activity{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *activityLoaderBatch) keyIndex(l *ActivityLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *activityLoaderBatch) startTimer(l *ActivityLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *activityLoaderBatch) end(l *ActivityLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

18
graph/loaders/context.go

@ -0,0 +1,18 @@
package loaders
import (
"context"
"git.aiterp.net/stufflog/server/database"
"git.aiterp.net/stufflog/server/models"
)
func ContextWithLoaders(ctx context.Context, user *models.User, database database.Database) context.Context {
ctx = context.WithValue(ctx, logsByIssueLoaderCtxKey, NewLogsByIssueLoader(ctx, database.Logs()))
ctx = context.WithValue(ctx, userLoaderCtxKey, NewUserLoader(ctx, database.Users()))
ctx = context.WithValue(ctx, projectPermissionCtxKey, NewProjectPermissionLoader(ctx, user, database.Projects()))
ctx = context.WithValue(ctx, issueLoaderCtxKey, NewIssueLoader(ctx, database.Issues()))
ctx = context.WithValue(ctx, activityLoaderCtxKey, NewActivityLoader(ctx, database.Activities()))
return ctx
}

54
graph/loaders/issueloader.go

@ -0,0 +1,54 @@
package loaders
import (
"context"
"git.aiterp.net/stufflog/server/database/repositories"
"git.aiterp.net/stufflog/server/internal/slerrors"
"git.aiterp.net/stufflog/server/models"
"time"
)
// go run github.com/vektah/dataloaden IssueLoader string \*git.aiterp.net/stufflog/server/models.Issue
var issueLoaderCtxKey = "ctx.stufflog.IssueLoader"
func IssueLoaderFromContext(ctx context.Context) *IssueLoader {
return ctx.Value(issueLoaderCtxKey).(*IssueLoader)
}
func NewIssueLoader(ctx context.Context, issueRepo repositories.IssueRepository) *IssueLoader {
return &IssueLoader{
fetch: func(keys []string) ([]*models.Issue, []error) {
results := make([]*models.Issue, len(keys))
errors := make([]error, len(keys))
issues, err := issueRepo.List(ctx, models.IssueFilter{IssueIDs: keys})
if err != nil {
for i := range errors {
errors[i] = err
}
return results, errors
}
for i, key := range keys {
found := false
for _, issue := range issues {
if issue.ID == key {
found = true
results[i] = issue
break
}
}
if !found {
errors[i] = slerrors.NotFound("Issue")
}
}
return results, errors
},
wait: time.Millisecond / 2,
maxBatch: 16,
}
}

203
graph/loaders/issueloader_gen.go

@ -0,0 +1,203 @@
// Code generated by github.com/vektah/dataloaden, DO EDIT.
package loaders
import (
"sync"
"time"
"git.aiterp.net/stufflog/server/models"
)
// IssueLoader batches and caches requests
type IssueLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*models.Issue, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*models.Issue
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *issueLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type issueLoaderBatch struct {
keys []string
data []*models.Issue
error []error
closing bool
done chan struct{}
}
// Load a Issue by key, batching and caching will be applied automatically
func (l *IssueLoader) Load(key string) (*models.Issue, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Issue.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *IssueLoader) LoadThunk(key string) func() (*models.Issue, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*models.Issue, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &issueLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*models.Issue, error) {
<-batch.done
var data *models.Issue
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *IssueLoader) LoadAll(keys []string) ([]*models.Issue, []error) {
results := make([]func() (*models.Issue, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
issues := make([]*models.Issue, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
issues[i], errors[i] = thunk()
}
return issues, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Issues.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *IssueLoader) LoadAllThunk(keys []string) func() ([]*models.Issue, []error) {
results := make([]func() (*models.Issue, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*models.Issue, []error) {
issues := make([]*models.Issue, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
issues[i], errors[i] = thunk()
}
return issues, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *IssueLoader) Prime(key string, value *models.Issue) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *IssueLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *IssueLoader) unsafeSet(key string, value *models.Issue) {
if l.cache == nil {
l.cache = map[string]*models.Issue{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *issueLoaderBatch) keyIndex(l *IssueLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *issueLoaderBatch) startTimer(l *IssueLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *issueLoaderBatch) end(l *IssueLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

46
graph/loaders/logsbyissueloader.go

@ -0,0 +1,46 @@
package loaders
import (
"context"
"git.aiterp.net/stufflog/server/database/repositories"
"git.aiterp.net/stufflog/server/models"
"time"
)
// go run github.com/vektah/dataloaden LogsByIssueLoader string []\*git.aiterp.net/stufflog/server/models.Log
var logsByIssueLoaderCtxKey = "ctx.stufflog.IssuesByLogLoader"
func LogsByIssueLoaderFromContext(ctx context.Context) *LogsByIssueLoader {
return ctx.Value(logsByIssueLoaderCtxKey).(*LogsByIssueLoader)
}
func NewLogsByIssueLoader(ctx context.Context, logsRepo repositories.LogRepository) *LogsByIssueLoader {
return &LogsByIssueLoader{
fetch: func(keys []string) ([][]*models.Log, []error) {
results := make([][]*models.Log, len(keys))
errors := make([]error, len(keys))
logs, err := logsRepo.List(ctx, models.LogFilter{IssueIDs: keys})
if err != nil {
for i := range errors {
errors[i] = err
}
return results, errors
}
for i, key := range keys {
for _, log := range logs {
if log.MatchesIssue(key) {
results[i] = append(results[i], log)
}
}
}
return results, nil
},
wait: time.Millisecond / 2,
maxBatch: 16,
}
}

204
graph/loaders/logsbyissueloader_gen.go

@ -0,0 +1,204 @@
// Code generated by github.com/vektah/dataloaden, DO EDIT.
package loaders
import (
"sync"
"time"
"git.aiterp.net/stufflog/server/models"
)
// LogsByIssueLoader batches and caches requests
type LogsByIssueLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([][]*models.Log, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string][]*models.Log
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *logsByIssueLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type logsByIssueLoaderBatch struct {
keys []string
data [][]*models.Log
error []error
closing bool
done chan struct{}
}
// Load a Log by key, batching and caching will be applied automatically
func (l *LogsByIssueLoader) Load(key string) ([]*models.Log, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Log.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *LogsByIssueLoader) LoadThunk(key string) func() ([]*models.Log, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]*models.Log, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &logsByIssueLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]*models.Log, error) {
<-batch.done
var data []*models.Log
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *LogsByIssueLoader) LoadAll(keys []string) ([][]*models.Log, []error) {
results := make([]func() ([]*models.Log, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
logs := make([][]*models.Log, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
logs[i], errors[i] = thunk()
}
return logs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Logs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *LogsByIssueLoader) LoadAllThunk(keys []string) func() ([][]*models.Log, []error) {
results := make([]func() ([]*models.Log, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]*models.Log, []error) {
logs := make([][]*models.Log, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
logs[i], errors[i] = thunk()
}
return logs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *LogsByIssueLoader) Prime(key string, value []*models.Log) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]*models.Log, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *LogsByIssueLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *LogsByIssueLoader) unsafeSet(key string, value []*models.Log) {
if l.cache == nil {
l.cache = map[string][]*models.Log{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *logsByIssueLoaderBatch) keyIndex(l *LogsByIssueLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *logsByIssueLoaderBatch) startTimer(l *LogsByIssueLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *logsByIssueLoaderBatch) end(l *LogsByIssueLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

56
graph/loaders/projectpermissionloader.go

@ -0,0 +1,56 @@
package loaders
import (
"context"
"git.aiterp.net/stufflog/server/database/repositories"
"git.aiterp.net/stufflog/server/models"
"time"
)
var projectPermissionCtxKey = "ctx.stufflog.ProjectPermissionLoader"
func ProjectPermissionLoaderFromContext(ctx context.Context) *ProjectPermissionLoader {
return ctx.Value(projectPermissionCtxKey).(*ProjectPermissionLoader)
}
func NewProjectPermissionLoader(ctx context.Context, user *models.User, projectRepo repositories.ProjectRepository) *ProjectPermissionLoader {
return &ProjectPermissionLoader{
wait: time.Millisecond / 2,
maxBatch: 16,
fetch: func(keys []string) ([]*models.ProjectPermission, []error) {
errors := make([]error, len(keys))
results := make([]*models.ProjectPermission, len(keys))
if user == nil {
for i, key := range keys {
results[i] = &models.ProjectPermission{
ProjectID: key,
UserID: "",
Level: models.ProjectPermissionLevelNoAccess,
}
}
return results, errors
}
indices := make(map[string]int)
for i, key := range keys {
indices[key] = i
}
permissions, err := projectRepo.GetUserPermissions(ctx, *user, keys)
if err != nil {
for i := range errors {
errors[i] = err
}
return results, errors
}
for _, permission := range permissions {
results[indices[permission.ProjectID]] = permission
}
return results, errors
},
}
}

23
graph/loaders/projectpermissionloader_gen.go

@ -1,4 +1,4 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
// Code generated by github.com/vektah/dataloaden, DO EDIT.
package loaders
@ -9,27 +9,6 @@ import (
"git.aiterp.net/stufflog/server/models"
)
// ProjectPermissionLoaderConfig captures the config to create a new ProjectPermissionLoader
type ProjectPermissionLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*models.ProjectPermission, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewProjectPermissionLoader creates a new ProjectPermissionLoader given a fetch, wait, and maxBatch
func NewProjectPermissionLoader(config ProjectPermissionLoaderConfig) *ProjectPermissionLoader {
return &ProjectPermissionLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ProjectPermissionLoader batches and caches requests
type ProjectPermissionLoader struct {
// this method provides the data for the loader

8
graph/loaders/userloader.go

@ -12,18 +12,14 @@ import (
var userLoaderCtxKey = "ctx.stufflog.UserLoader"
func NewUserLoaderContext(ctx context.Context, userRepo repositories.UserRepository) context.Context {
return context.WithValue(ctx, userLoaderCtxKey, NewUserLoader(ctx, userRepo))
}
func UserLoaderFromContext(ctx context.Context) *UserLoader {
return ctx.Value(userLoaderCtxKey).(*UserLoader)
}
func NewUserLoader(ctx context.Context, userRepo repositories.UserRepository) *UserLoader {
return &UserLoader{
wait: 2 * time.Millisecond,
maxBatch: 100,
wait: time.Millisecond / 4,
maxBatch: 4,
fetch: func(keys []string) ([]*models.User, []error) {
results := make([]*models.User, len(keys))
errors := make([]error, len(keys))

14
graph/loaders/userloader_gen.go

@ -1,4 +1,4 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
// Code generated by github.com/vektah/dataloaden, DO EDIT.
package loaders
@ -9,18 +9,6 @@ import (
"git.aiterp.net/stufflog/server/models"
)
// UserLoaderConfig captures the config to create a new UserLoader
type UserLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*models.User, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// UserLoader batches and caches requests
type UserLoader struct {
// this method provides the data for the loader

4
graph/resolvers/issue.resolvers.go

@ -94,7 +94,9 @@ func (r *issueResolver) Items(ctx context.Context, obj *models.Issue, filter *mo
}
func (r *issueResolver) Logs(ctx context.Context, obj *models.Issue) ([]*models.Log, error) {
logs, err := r.Database.Logs().List(ctx, models.LogFilter{IssueIDs: []string{obj.ID}})
loader := loaders.LogsByIssueLoaderFromContext(ctx)
logs, err := loader.Load(obj.ID)
if err != nil {
return nil, err
}

30
graph/resolvers/issueitem.resolvers.go

@ -5,14 +5,14 @@ package resolvers
import (
"context"
"fmt"
"git.aiterp.net/stufflog/server/graph/loaders"
"git.aiterp.net/stufflog/server/graph/graphcore"
"git.aiterp.net/stufflog/server/models"
)
func (r *issueItemResolver) Issue(ctx context.Context, obj *models.IssueItem) (*models.Issue, error) {
return r.Database.Issues().Find(ctx, obj.IssueID)
return loaders.IssueLoaderFromContext(ctx).Load(obj.IssueID)
}
func (r *issueItemResolver) Item(ctx context.Context, obj *models.IssueItem) (*models.Item, error) {
@ -24,21 +24,23 @@ func (r *issueItemResolver) Remaining(ctx context.Context, obj *models.IssueItem
return 0, nil
}
// TODO: Use logs
return obj.Quantity, nil
loader := loaders.LogsByIssueLoaderFromContext(ctx)
logs, err := loader.Load(obj.IssueID)
if err != nil {
return 0, err
}
remaining := obj.Quantity
for _, log := range logs {
if item := log.Item(obj.ID); item != nil {
remaining -= item.Amount
}
}
return remaining, nil
}
// IssueItem returns graphcore.IssueItemResolver implementation.
func (r *Resolver) IssueItem() graphcore.IssueItemResolver { return &issueItemResolver{r} }
type issueItemResolver struct{ *Resolver }
// !!! WARNING !!!
// The code below was going to be deleted when updating resolvers. It has been copied here so you have
// one last chance to move it out of harms way if you want. There are two reasons this happens:
// - When renaming or deleting a resolver the old code will be put in here. You can safely delete
// it when you're done.
// - You have helper methods in this file. Move them out to keep these resolver files clean.
func (r *issueItemResolver) Quanity(ctx context.Context, obj *models.IssueItem) (int, error) {
panic(fmt.Errorf("not implemented"))
}

46
graph/resolvers/issuetask.resolvers.go

@ -6,19 +6,19 @@ package resolvers
import (
"context"
"errors"
"fmt"
"strings"
"time"
"git.aiterp.net/stufflog/server/graph/graphcore"
"git.aiterp.net/stufflog/server/graph/graphutil"
"git.aiterp.net/stufflog/server/graph/loaders"
"git.aiterp.net/stufflog/server/internal/slerrors"
"git.aiterp.net/stufflog/server/models"
)
func (r *issueTaskResolver) EstimatedUnits(ctx context.Context, obj *models.IssueTask) (*int, error) {
// TODO: Data loader
activity, err := r.Database.Activities().Find(ctx, obj.ActivityID)
activity, err := loaders.ActivityLoaderFromContext(ctx).Load(obj.ActivityID)
if err != nil {
return nil, err
}
@ -30,11 +30,11 @@ func (r *issueTaskResolver) EstimatedUnits(ctx context.Context, obj *models.Issu
}
func (r *issueTaskResolver) Issue(ctx context.Context, obj *models.IssueTask) (*models.Issue, error) {
return r.Database.Issues().Find(ctx, obj.IssueID)
return loaders.IssueLoaderFromContext(ctx).Load(obj.IssueID)
}
func (r *issueTaskResolver) Activity(ctx context.Context, obj *models.IssueTask) (*models.Activity, error) {
return r.Database.Activities().Find(ctx, obj.ActivityID)
return loaders.ActivityLoaderFromContext(ctx).Load(obj.ActivityID)
}
func (r *issueTaskResolver) Status(ctx context.Context, obj *models.IssueTask) (*models.ProjectStatus, error) {
@ -79,11 +79,45 @@ func (r *issueTaskResolver) Status(ctx context.Context, obj *models.IssueTask) (
}
func (r *issueTaskResolver) RemainingTime(ctx context.Context, obj *models.IssueTask) (time.Duration, error) {
panic(fmt.Errorf("not implemented"))
loader := loaders.LogsByIssueLoaderFromContext(ctx)
logs, err := loader.Load(obj.IssueID)
if err != nil {
return 0, err
}
remaining := obj.EstimatedTime
for _, log := range logs {
if task := log.Task(obj.ID); task != nil {
remaining -= task.Duration
}
}
return remaining, nil
}
func (r *issueTaskResolver) RemainingUnits(ctx context.Context, obj *models.IssueTask) (*int, error) {
panic(fmt.Errorf("not implemented"))
activity, err := loaders.ActivityLoaderFromContext(ctx).Load(obj.ActivityID)
if err != nil {
return nil, err
}
if !activity.Countable || activity.UnitIsTimeSpent {
return nil, nil
}
loader := loaders.LogsByIssueLoaderFromContext(ctx)
logs, err := loader.Load(obj.IssueID)
if err != nil {
return nil, err
}
remaining := obj.EstimatedUnits
for _, log := range logs {
if task := log.Task(obj.ID); task != nil && task.Units != nil {
remaining -= *task.Units
}
}
return &remaining, nil
}
// IssueTask returns graphcore.IssueTaskResolver implementation.

5
graph/resolvers/log.resolvers.go

@ -5,6 +5,7 @@ package resolvers
import (
"context"
"git.aiterp.net/stufflog/server/graph/loaders"
"git.aiterp.net/stufflog/server/graph/graphcore"
"git.aiterp.net/stufflog/server/models"
@ -15,7 +16,7 @@ func (r *logResolver) User(ctx context.Context, obj *models.Log) (*models.User,
}
func (r *logItemResolver) Issue(ctx context.Context, obj *models.LogItem) (*models.Issue, error) {
return r.Database.Issues().Find(ctx, obj.IssueID)
return loaders.IssueLoaderFromContext(ctx).Load(obj.IssueID)
}
func (r *logItemResolver) Item(ctx context.Context, obj *models.LogItem) (*models.IssueItem, error) {
@ -23,7 +24,7 @@ func (r *logItemResolver) Item(ctx context.Context, obj *models.LogItem) (*model
}
func (r *logTaskResolver) Issue(ctx context.Context, obj *models.LogTask) (*models.Issue, error) {
return r.Database.Issues().Find(ctx, obj.IssueID)
return loaders.IssueLoaderFromContext(ctx).Load(obj.IssueID)
}
func (r *logTaskResolver) Task(ctx context.Context, obj *models.LogTask) (*models.IssueTask, error) {

29
graph/resolvers/mutation.resolvers.go

@ -6,6 +6,7 @@ package resolvers
import (
"context"
"errors"
"git.aiterp.net/stufflog/server/graph/loaders"
"log"
"sort"
"time"
@ -59,7 +60,7 @@ func (r *mutationResolver) CreateActivity(ctx context.Context, input graphcore.A
if err != nil {
return nil, err
}
if perm, err := r.Auth.ProjectPermission(ctx, *project); err != nil || !perm.CanManageActivities() {
if perm, err := r.Auth.ProjectPermission(ctx, project.ID); err != nil || !perm.CanManageActivities() {
return nil, slerrors.PermissionDenied
}
@ -101,7 +102,7 @@ func (r *mutationResolver) EditActivity(ctx context.Context, input graphcore.Act
if err != nil {
return nil, err
}
if perm, err := r.Auth.ProjectPermission(ctx, *project); err != nil || !perm.CanManageActivities() {
if perm, err := r.Auth.ProjectPermission(ctx, project.ID); err != nil || !perm.CanManageActivities() {
return nil, slerrors.PermissionDenied
}
@ -261,7 +262,7 @@ func (r *mutationResolver) CreateIssue(ctx context.Context, input graphcore.Issu
if err != nil {
return nil, err
}
if perm, err := r.Auth.ProjectPermission(ctx, *project); err != nil || !perm.CanManageOwnIssue() {
if perm, err := r.Auth.ProjectPermission(ctx, project.ID); err != nil || !perm.CanManageOwnIssue() {
return nil, slerrors.PermissionDenied
}
@ -295,6 +296,8 @@ func (r *mutationResolver) CreateIssue(ctx context.Context, input graphcore.Issu
return nil, err
}
loaders.IssueLoaderFromContext(ctx).Prime(issue.ID, issue)
return issue, nil
}
@ -304,7 +307,7 @@ func (r *mutationResolver) CreateIssueTask(ctx context.Context, input graphcore.
return nil, slerrors.PermissionDenied
}
issue, err := r.Database.Issues().Find(ctx, input.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(input.IssueID)
if err != nil {
return nil, err
}
@ -317,7 +320,7 @@ func (r *mutationResolver) CreateIssueTask(ctx context.Context, input graphcore.
return nil, err
}
activity, err := r.Database.Activities().Find(ctx, input.ActivityID)
activity, err := loaders.ActivityLoaderFromContext(ctx).Load(input.ActivityID)
if err != nil {
return nil, err
} else if activity.ProjectID != issue.ProjectID {
@ -361,7 +364,7 @@ func (r *mutationResolver) EditIssueTask(ctx context.Context, input graphcore.Is
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, task.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(task.IssueID)
if err != nil {
return nil, err
}
@ -382,7 +385,7 @@ func (r *mutationResolver) EditIssueTask(ctx context.Context, input graphcore.Is
task.EstimatedTime = *input.SetEstimatedTime
}
if input.SetEstimatedUnits != nil {
activity, err := r.Database.Activities().Find(ctx, task.ActivityID)
activity, err := loaders.ActivityLoaderFromContext(ctx).Load(task.ActivityID)
if err != nil {
return nil, err
}
@ -418,7 +421,7 @@ func (r *mutationResolver) CreateIssueItem(ctx context.Context, input graphcore.
return nil, slerrors.PermissionDenied
}
issue, err := r.Database.Issues().Find(ctx, input.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(input.IssueID)
if err != nil {
return nil, err
}
@ -452,7 +455,7 @@ func (r *mutationResolver) EditIssueItem(ctx context.Context, input graphcore.Is
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(item.IssueID)
if err != nil {
return nil, err
}
@ -495,7 +498,7 @@ func (r *mutationResolver) CreateLog(ctx context.Context, input graphcore.LogCre
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(item.IssueID)
if err != nil {
return nil, err
}
@ -516,7 +519,7 @@ func (r *mutationResolver) CreateLog(ctx context.Context, input graphcore.LogCre
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, task.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(task.IssueID)
if err != nil {
return nil, err
}
@ -583,7 +586,7 @@ func (r *mutationResolver) EditLog(ctx context.Context, input graphcore.LogEditI
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(item.IssueID)
if err != nil {
return nil, err
}
@ -605,7 +608,7 @@ func (r *mutationResolver) EditLog(ctx context.Context, input graphcore.LogEditI
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, task.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(task.IssueID)
if err != nil {
return nil, err
}

16
graph/resolvers/project.resolvers.go

@ -17,7 +17,7 @@ func (r *projectResolver) Issues(ctx context.Context, obj *models.Project, filte
filter = &graphcore.ProjectIssueFilter{}
}
return r.Database.Issues().List(ctx, models.IssueFilter{
issues, err := r.Database.Issues().List(ctx, models.IssueFilter{
ProjectIDs: []string{obj.ID},
AssigneeIDs: filter.AssigneeIds,
Search: filter.Search,
@ -25,10 +25,20 @@ func (r *projectResolver) Issues(ctx context.Context, obj *models.Project, filte
MaxStage: filter.MaxStage,
Limit: filter.Limit,
})
if err != nil {
return nil, err
}
loader := loaders.IssueLoaderFromContext(ctx)
for _, issue := range issues {
loader.Prime(issue.ID, issue)
}
return issues, nil
}
func (r *projectResolver) Permissions(ctx context.Context, obj *models.Project) ([]*models.ProjectPermission, error) {
if perm, err := r.Auth.ProjectPermission(ctx, *obj); err != nil || !perm.CanManagePermissions() {
if perm, err := r.Auth.ProjectPermission(ctx, obj.ID); err != nil || !perm.CanManagePermissions() {
return nil, slerrors.PermissionDenied
}
@ -36,7 +46,7 @@ func (r *projectResolver) Permissions(ctx context.Context, obj *models.Project)
}
func (r *projectResolver) UserPermissions(ctx context.Context, obj *models.Project) (*models.ProjectPermission, error) {
return r.Auth.ProjectPermission(ctx, *obj)
return r.Auth.ProjectPermission(ctx, obj.ID)
}
func (r *projectResolver) Statuses(ctx context.Context, obj *models.Project, filter *models.ProjectStatusFilter) ([]*models.ProjectStatus, error) {

17
graph/resolvers/query.resolvers.go

@ -6,6 +6,7 @@ package resolvers
import (
"context"
"errors"
"git.aiterp.net/stufflog/server/graph/loaders"
"git.aiterp.net/stufflog/server/graph/graphcore"
"git.aiterp.net/stufflog/server/internal/slerrors"
@ -18,7 +19,8 @@ func (r *queryResolver) Issue(ctx context.Context, id string) (*models.Issue, er
return nil, slerrors.PermissionDenied
}
issue, err := r.Database.Issues().Find(ctx, id)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(id)
if err != nil {
return nil, err
}
@ -44,6 +46,11 @@ func (r *queryResolver) Issues(ctx context.Context, filter *models.IssueFilter)
return nil, err
}
loader := loaders.IssueLoaderFromContext(ctx)
for _, issue := range issues {
loader.Prime(issue.ID, issue)
}
deleteList := make([]int, 0, len(issues))
for i, issue := range issues {
_, err := r.Auth.IssuePermission(ctx, *issue)
@ -100,7 +107,7 @@ func (r *queryResolver) IssueItem(ctx context.Context, id string) (*models.Issue
return nil, err
}
issue, err := r.Database.Issues().Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(id)
if err != nil {
return nil, err
}
@ -138,7 +145,7 @@ func (r *queryResolver) IssueItems(ctx context.Context, filter *models.IssueItem
continue
}
issue, err := r.Database.Issues().Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(item.IssueID)
if err != nil {
deleteList = append(deleteList, i-len(deleteList))
accessMap[item.IssueID] = true
@ -169,7 +176,7 @@ func (r *queryResolver) Project(ctx context.Context, id string) (*models.Project
if err != nil {
return nil, err
}
_, err = r.Auth.ProjectPermission(ctx, *project)
_, err = r.Auth.ProjectPermission(ctx, project.ID)
if err != nil {
return nil, err
}
@ -203,7 +210,7 @@ func (r *queryResolver) Projects(ctx context.Context, filter *models.ProjectFilt
if !skipCheck && len(projects) > 0 {
deleteList := make([]int, 0, 4)
for i, project := range projects {
if _, err := r.Auth.ProjectPermission(ctx, *project); err != nil {
if _, err := r.Auth.ProjectPermission(ctx, project.ID); err != nil {
deleteList = append(deleteList, i-len(deleteList))
}
}

66
models/log.go

@ -12,10 +12,76 @@ type Log struct {
Tasks []LogTask
}
func (log *Log) Copy() *Log {
newLog := *log
newLog.Items = append(newLog.Items[:0:0], log.Items...)
newLog.Tasks = append(newLog.Tasks[:0:0], log.Tasks...)
return &newLog
}
func (log *Log) Empty() bool {
return len(log.Items) == 0 && len(log.Tasks) == 0
}
func (log *Log) MatchesIssue(issueID string) bool {
for _, item := range log.Items {
if item.IssueID == issueID {
return true
}
}
for _, task := range log.Tasks {
if task.IssueID == issueID {
return true
}
}
return false
}
func (log *Log) MatchesIssueTask(issueTaskID string) bool {
for _, task := range log.Tasks {
if task.IssueTaskID == issueTaskID {
return true
}
}
return false
}
func (log *Log) MatchesIssueItem(issueItemID string) bool {
for _, item := range log.Items {
if item.IssueItemID == issueItemID {
return true
}
}
return false
}
func (log *Log) Task(issueTaskID string) *LogTask {
for i := range log.Tasks {
task := &log.Tasks[i]
if task.IssueTaskID == issueTaskID {
return task
}
}
return nil
}
func (log *Log) Item(issueItemID string) *LogItem {
for i := range log.Items {
item := &log.Items[i]
if item.IssueItemID == issueItemID {
return item
}
}
return nil
}
type LogTask struct {
LogID string `db:"log_id"`
IssueID string `db:"issue_id"`

22
services/auth.go

@ -4,6 +4,7 @@ import (
"context"
"errors"
"git.aiterp.net/stufflog/server/database/repositories"
"git.aiterp.net/stufflog/server/graph/loaders"
"git.aiterp.net/stufflog/server/internal/generate"
"git.aiterp.net/stufflog/server/internal/slerrors"
"git.aiterp.net/stufflog/server/models"
@ -112,13 +113,13 @@ func (auth *Auth) UserFromContext(ctx context.Context) *models.User {
return user
}
func (auth *Auth) ProjectPermission(ctx context.Context, project models.Project) (*models.ProjectPermission, error) {
func (auth *Auth) ProjectPermission(ctx context.Context, projectID string) (*models.ProjectPermission, error) {
user := auth.UserFromContext(ctx)
if user == nil || !user.Active {
return nil, slerrors.PermissionDenied
}
permission, err := auth.projects.GetPermission(ctx, project, *user)
permission, err := loaders.ProjectPermissionLoaderFromContext(ctx).Load(projectID)
if err != nil {
return nil, ErrInternalPermissionFailure
}
@ -137,7 +138,7 @@ func (auth *Auth) IssuePermission(ctx context.Context, issue models.Issue) (*mod
isOwnedOrAssigned := issue.AssigneeID == user.ID || issue.OwnerID == user.ID
permission, err := auth.projects.GetIssuePermission(ctx, issue, *user)
permission, err := loaders.ProjectPermissionLoaderFromContext(ctx).Load(issue.ProjectID)
if err != nil {
return nil, ErrInternalPermissionFailure
}
@ -227,6 +228,17 @@ func (auth *Auth) EditUser(ctx context.Context, username string, setName *string
return user, nil
}
func (auth *Auth) FilterLogListCopy(ctx context.Context, logs []*models.Log) []*models.Log {
logs2 := make([]*models.Log, 0, len(logs))
for _, log := range logs {
logs2 = append(logs2, log.Copy())
}
auth.FilterLogList(ctx, &logs2)
return logs2
}
func (auth *Auth) FilterLogList(ctx context.Context, logs *[]*models.Log) {
user := auth.UserFromContext(ctx)
if user == nil {
@ -270,7 +282,7 @@ func (auth *Auth) FilterLog(ctx context.Context, logs ...*models.Log) {
continue
}
issue, err := auth.issues.Find(ctx, item.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(item.IssueID)
if err != nil {
deleteList = append(deleteList, i-len(deleteList))
accessMap[item.IssueID] = true
@ -296,7 +308,7 @@ func (auth *Auth) FilterLog(ctx context.Context, logs ...*models.Log) {
continue
}
issue, err := auth.issues.Find(ctx, task.IssueID)
issue, err := loaders.IssueLoaderFromContext(ctx).Load(task.IssueID)
if err != nil {
deleteList = append(deleteList, i-len(deleteList))
accessMap[task.IssueID] = true

Loading…
Cancel
Save