stufflog graphql server
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

204 lines
5.1 KiB

  1. // Code generated by github.com/vektah/dataloaden, DO EDIT.
  2. package loaders
  3. import (
  4. "sync"
  5. "time"
  6. "git.aiterp.net/stufflog/server/models"
  7. )
  8. // LogsByIssueLoader batches and caches requests
  9. type LogsByIssueLoader struct {
  10. // this method provides the data for the loader
  11. fetch func(keys []string) ([][]*models.Log, []error)
  12. // how long to done before sending a batch
  13. wait time.Duration
  14. // this will limit the maximum number of keys to send in one batch, 0 = no limit
  15. maxBatch int
  16. // INTERNAL
  17. // lazily created cache
  18. cache map[string][]*models.Log
  19. // the current batch. keys will continue to be collected until timeout is hit,
  20. // then everything will be sent to the fetch method and out to the listeners
  21. batch *logsByIssueLoaderBatch
  22. // mutex to prevent races
  23. mu sync.Mutex
  24. }
  25. type logsByIssueLoaderBatch struct {
  26. keys []string
  27. data [][]*models.Log
  28. error []error
  29. closing bool
  30. done chan struct{}
  31. }
  32. // Load a Log by key, batching and caching will be applied automatically
  33. func (l *LogsByIssueLoader) Load(key string) ([]*models.Log, error) {
  34. return l.LoadThunk(key)()
  35. }
  36. // LoadThunk returns a function that when called will block waiting for a Log.
  37. // This method should be used if you want one goroutine to make requests to many
  38. // different data loaders without blocking until the thunk is called.
  39. func (l *LogsByIssueLoader) LoadThunk(key string) func() ([]*models.Log, error) {
  40. l.mu.Lock()
  41. if it, ok := l.cache[key]; ok {
  42. l.mu.Unlock()
  43. return func() ([]*models.Log, error) {
  44. return it, nil
  45. }
  46. }
  47. if l.batch == nil {
  48. l.batch = &logsByIssueLoaderBatch{done: make(chan struct{})}
  49. }
  50. batch := l.batch
  51. pos := batch.keyIndex(l, key)
  52. l.mu.Unlock()
  53. return func() ([]*models.Log, error) {
  54. <-batch.done
  55. var data []*models.Log
  56. if pos < len(batch.data) {
  57. data = batch.data[pos]
  58. }
  59. var err error
  60. // its convenient to be able to return a single error for everything
  61. if len(batch.error) == 1 {
  62. err = batch.error[0]
  63. } else if batch.error != nil {
  64. err = batch.error[pos]
  65. }
  66. if err == nil {
  67. l.mu.Lock()
  68. l.unsafeSet(key, data)
  69. l.mu.Unlock()
  70. }
  71. return data, err
  72. }
  73. }
  74. // LoadAll fetches many keys at once. It will be broken into appropriate sized
  75. // sub batches depending on how the loader is configured
  76. func (l *LogsByIssueLoader) LoadAll(keys []string) ([][]*models.Log, []error) {
  77. results := make([]func() ([]*models.Log, error), len(keys))
  78. for i, key := range keys {
  79. results[i] = l.LoadThunk(key)
  80. }
  81. logs := make([][]*models.Log, len(keys))
  82. errors := make([]error, len(keys))
  83. for i, thunk := range results {
  84. logs[i], errors[i] = thunk()
  85. }
  86. return logs, errors
  87. }
  88. // LoadAllThunk returns a function that when called will block waiting for a Logs.
  89. // This method should be used if you want one goroutine to make requests to many
  90. // different data loaders without blocking until the thunk is called.
  91. func (l *LogsByIssueLoader) LoadAllThunk(keys []string) func() ([][]*models.Log, []error) {
  92. results := make([]func() ([]*models.Log, error), len(keys))
  93. for i, key := range keys {
  94. results[i] = l.LoadThunk(key)
  95. }
  96. return func() ([][]*models.Log, []error) {
  97. logs := make([][]*models.Log, len(keys))
  98. errors := make([]error, len(keys))
  99. for i, thunk := range results {
  100. logs[i], errors[i] = thunk()
  101. }
  102. return logs, errors
  103. }
  104. }
  105. // Prime the cache with the provided key and value. If the key already exists, no change is made
  106. // and false is returned.
  107. // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
  108. func (l *LogsByIssueLoader) Prime(key string, value []*models.Log) bool {
  109. l.mu.Lock()
  110. var found bool
  111. if _, found = l.cache[key]; !found {
  112. // make a copy when writing to the cache, its easy to pass a pointer in from a loop var
  113. // and end up with the whole cache pointing to the same value.
  114. cpy := make([]*models.Log, len(value))
  115. copy(cpy, value)
  116. l.unsafeSet(key, cpy)
  117. }
  118. l.mu.Unlock()
  119. return !found
  120. }
  121. // Clear the value at key from the cache, if it exists
  122. func (l *LogsByIssueLoader) Clear(key string) {
  123. l.mu.Lock()
  124. delete(l.cache, key)
  125. l.mu.Unlock()
  126. }
  127. func (l *LogsByIssueLoader) unsafeSet(key string, value []*models.Log) {
  128. if l.cache == nil {
  129. l.cache = map[string][]*models.Log{}
  130. }
  131. l.cache[key] = value
  132. }
  133. // keyIndex will return the location of the key in the batch, if its not found
  134. // it will add the key to the batch
  135. func (b *logsByIssueLoaderBatch) keyIndex(l *LogsByIssueLoader, key string) int {
  136. for i, existingKey := range b.keys {
  137. if key == existingKey {
  138. return i
  139. }
  140. }
  141. pos := len(b.keys)
  142. b.keys = append(b.keys, key)
  143. if pos == 0 {
  144. go b.startTimer(l)
  145. }
  146. if l.maxBatch != 0 && pos >= l.maxBatch-1 {
  147. if !b.closing {
  148. b.closing = true
  149. l.batch = nil
  150. go b.end(l)
  151. }
  152. }
  153. return pos
  154. }
  155. func (b *logsByIssueLoaderBatch) startTimer(l *LogsByIssueLoader) {
  156. time.Sleep(l.wait)
  157. l.mu.Lock()
  158. // we must have hit a batch limit and are already finalizing this batch
  159. if b.closing {
  160. l.mu.Unlock()
  161. return
  162. }
  163. l.batch = nil
  164. l.mu.Unlock()
  165. b.end(l)
  166. }
  167. func (b *logsByIssueLoaderBatch) end(l *LogsByIssueLoader) {
  168. b.data, b.error = l.fetch(b.keys)
  169. close(b.done)
  170. }