stufflog graphql server
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

203 lines
4.9 KiB

  1. // Code generated by github.com/vektah/dataloaden, DO EDIT.
  2. package loaders
  3. import (
  4. "sync"
  5. "time"
  6. "git.aiterp.net/stufflog/server/models"
  7. )
  8. // IssueLoader batches and caches requests
  9. type IssueLoader struct {
  10. // this method provides the data for the loader
  11. fetch func(keys []string) ([]*models.Issue, []error)
  12. // how long to done before sending a batch
  13. wait time.Duration
  14. // this will limit the maximum number of keys to send in one batch, 0 = no limit
  15. maxBatch int
  16. // INTERNAL
  17. // lazily created cache
  18. cache map[string]*models.Issue
  19. // the current batch. keys will continue to be collected until timeout is hit,
  20. // then everything will be sent to the fetch method and out to the listeners
  21. batch *issueLoaderBatch
  22. // mutex to prevent races
  23. mu sync.Mutex
  24. }
  25. type issueLoaderBatch struct {
  26. keys []string
  27. data []*models.Issue
  28. error []error
  29. closing bool
  30. done chan struct{}
  31. }
  32. // Load a Issue by key, batching and caching will be applied automatically
  33. func (l *IssueLoader) Load(key string) (*models.Issue, error) {
  34. return l.LoadThunk(key)()
  35. }
  36. // LoadThunk returns a function that when called will block waiting for a Issue.
  37. // This method should be used if you want one goroutine to make requests to many
  38. // different data loaders without blocking until the thunk is called.
  39. func (l *IssueLoader) LoadThunk(key string) func() (*models.Issue, error) {
  40. l.mu.Lock()
  41. if it, ok := l.cache[key]; ok {
  42. l.mu.Unlock()
  43. return func() (*models.Issue, error) {
  44. return it, nil
  45. }
  46. }
  47. if l.batch == nil {
  48. l.batch = &issueLoaderBatch{done: make(chan struct{})}
  49. }
  50. batch := l.batch
  51. pos := batch.keyIndex(l, key)
  52. l.mu.Unlock()
  53. return func() (*models.Issue, error) {
  54. <-batch.done
  55. var data *models.Issue
  56. if pos < len(batch.data) {
  57. data = batch.data[pos]
  58. }
  59. var err error
  60. // its convenient to be able to return a single error for everything
  61. if len(batch.error) == 1 {
  62. err = batch.error[0]
  63. } else if batch.error != nil {
  64. err = batch.error[pos]
  65. }
  66. if err == nil {
  67. l.mu.Lock()
  68. l.unsafeSet(key, data)
  69. l.mu.Unlock()
  70. }
  71. return data, err
  72. }
  73. }
  74. // LoadAll fetches many keys at once. It will be broken into appropriate sized
  75. // sub batches depending on how the loader is configured
  76. func (l *IssueLoader) LoadAll(keys []string) ([]*models.Issue, []error) {
  77. results := make([]func() (*models.Issue, error), len(keys))
  78. for i, key := range keys {
  79. results[i] = l.LoadThunk(key)
  80. }
  81. issues := make([]*models.Issue, len(keys))
  82. errors := make([]error, len(keys))
  83. for i, thunk := range results {
  84. issues[i], errors[i] = thunk()
  85. }
  86. return issues, errors
  87. }
  88. // LoadAllThunk returns a function that when called will block waiting for a Issues.
  89. // This method should be used if you want one goroutine to make requests to many
  90. // different data loaders without blocking until the thunk is called.
  91. func (l *IssueLoader) LoadAllThunk(keys []string) func() ([]*models.Issue, []error) {
  92. results := make([]func() (*models.Issue, error), len(keys))
  93. for i, key := range keys {
  94. results[i] = l.LoadThunk(key)
  95. }
  96. return func() ([]*models.Issue, []error) {
  97. issues := make([]*models.Issue, len(keys))
  98. errors := make([]error, len(keys))
  99. for i, thunk := range results {
  100. issues[i], errors[i] = thunk()
  101. }
  102. return issues, errors
  103. }
  104. }
  105. // Prime the cache with the provided key and value. If the key already exists, no change is made
  106. // and false is returned.
  107. // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
  108. func (l *IssueLoader) Prime(key string, value *models.Issue) bool {
  109. l.mu.Lock()
  110. var found bool
  111. if _, found = l.cache[key]; !found {
  112. // make a copy when writing to the cache, its easy to pass a pointer in from a loop var
  113. // and end up with the whole cache pointing to the same value.
  114. cpy := *value
  115. l.unsafeSet(key, &cpy)
  116. }
  117. l.mu.Unlock()
  118. return !found
  119. }
  120. // Clear the value at key from the cache, if it exists
  121. func (l *IssueLoader) Clear(key string) {
  122. l.mu.Lock()
  123. delete(l.cache, key)
  124. l.mu.Unlock()
  125. }
  126. func (l *IssueLoader) unsafeSet(key string, value *models.Issue) {
  127. if l.cache == nil {
  128. l.cache = map[string]*models.Issue{}
  129. }
  130. l.cache[key] = value
  131. }
  132. // keyIndex will return the location of the key in the batch, if its not found
  133. // it will add the key to the batch
  134. func (b *issueLoaderBatch) keyIndex(l *IssueLoader, key string) int {
  135. for i, existingKey := range b.keys {
  136. if key == existingKey {
  137. return i
  138. }
  139. }
  140. pos := len(b.keys)
  141. b.keys = append(b.keys, key)
  142. if pos == 0 {
  143. go b.startTimer(l)
  144. }
  145. if l.maxBatch != 0 && pos >= l.maxBatch-1 {
  146. if !b.closing {
  147. b.closing = true
  148. l.batch = nil
  149. go b.end(l)
  150. }
  151. }
  152. return pos
  153. }
  154. func (b *issueLoaderBatch) startTimer(l *IssueLoader) {
  155. time.Sleep(l.wait)
  156. l.mu.Lock()
  157. // we must have hit a batch limit and are already finalizing this batch
  158. if b.closing {
  159. l.mu.Unlock()
  160. return
  161. }
  162. l.batch = nil
  163. l.mu.Unlock()
  164. b.end(l)
  165. }
  166. func (b *issueLoaderBatch) end(l *IssueLoader) {
  167. b.data, b.error = l.fetch(b.keys)
  168. close(b.done)
  169. }