stufflog graphql server
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

215 lines
5.3 KiB

  1. // Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
  2. package loaders
  3. import (
  4. "sync"
  5. "time"
  6. "git.aiterp.net/stufflog/server/models"
  7. )
  8. // UserLoaderConfig captures the config to create a new UserLoader
  9. type UserLoaderConfig struct {
  10. // Fetch is a method that provides the data for the loader
  11. Fetch func(keys []string) ([]*models.User, []error)
  12. // Wait is how long wait before sending a batch
  13. Wait time.Duration
  14. // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
  15. MaxBatch int
  16. }
  17. // UserLoader batches and caches requests
  18. type UserLoader struct {
  19. // this method provides the data for the loader
  20. fetch func(keys []string) ([]*models.User, []error)
  21. // how long to done before sending a batch
  22. wait time.Duration
  23. // this will limit the maximum number of keys to send in one batch, 0 = no limit
  24. maxBatch int
  25. // INTERNAL
  26. // lazily created cache
  27. cache map[string]*models.User
  28. // the current batch. keys will continue to be collected until timeout is hit,
  29. // then everything will be sent to the fetch method and out to the listeners
  30. batch *userLoaderBatch
  31. // mutex to prevent races
  32. mu sync.Mutex
  33. }
  34. type userLoaderBatch struct {
  35. keys []string
  36. data []*models.User
  37. error []error
  38. closing bool
  39. done chan struct{}
  40. }
  41. // Load a User by key, batching and caching will be applied automatically
  42. func (l *UserLoader) Load(key string) (*models.User, error) {
  43. return l.LoadThunk(key)()
  44. }
  45. // LoadThunk returns a function that when called will block waiting for a User.
  46. // This method should be used if you want one goroutine to make requests to many
  47. // different data loaders without blocking until the thunk is called.
  48. func (l *UserLoader) LoadThunk(key string) func() (*models.User, error) {
  49. l.mu.Lock()
  50. if it, ok := l.cache[key]; ok {
  51. l.mu.Unlock()
  52. return func() (*models.User, error) {
  53. return it, nil
  54. }
  55. }
  56. if l.batch == nil {
  57. l.batch = &userLoaderBatch{done: make(chan struct{})}
  58. }
  59. batch := l.batch
  60. pos := batch.keyIndex(l, key)
  61. l.mu.Unlock()
  62. return func() (*models.User, error) {
  63. <-batch.done
  64. var data *models.User
  65. if pos < len(batch.data) {
  66. data = batch.data[pos]
  67. }
  68. var err error
  69. // its convenient to be able to return a single error for everything
  70. if len(batch.error) == 1 {
  71. err = batch.error[0]
  72. } else if batch.error != nil {
  73. err = batch.error[pos]
  74. }
  75. if err == nil {
  76. l.mu.Lock()
  77. l.unsafeSet(key, data)
  78. l.mu.Unlock()
  79. }
  80. return data, err
  81. }
  82. }
  83. // LoadAll fetches many keys at once. It will be broken into appropriate sized
  84. // sub batches depending on how the loader is configured
  85. func (l *UserLoader) LoadAll(keys []string) ([]*models.User, []error) {
  86. results := make([]func() (*models.User, error), len(keys))
  87. for i, key := range keys {
  88. results[i] = l.LoadThunk(key)
  89. }
  90. users := make([]*models.User, len(keys))
  91. errors := make([]error, len(keys))
  92. for i, thunk := range results {
  93. users[i], errors[i] = thunk()
  94. }
  95. return users, errors
  96. }
  97. // LoadAllThunk returns a function that when called will block waiting for a Users.
  98. // This method should be used if you want one goroutine to make requests to many
  99. // different data loaders without blocking until the thunk is called.
  100. func (l *UserLoader) LoadAllThunk(keys []string) func() ([]*models.User, []error) {
  101. results := make([]func() (*models.User, error), len(keys))
  102. for i, key := range keys {
  103. results[i] = l.LoadThunk(key)
  104. }
  105. return func() ([]*models.User, []error) {
  106. users := make([]*models.User, len(keys))
  107. errors := make([]error, len(keys))
  108. for i, thunk := range results {
  109. users[i], errors[i] = thunk()
  110. }
  111. return users, errors
  112. }
  113. }
  114. // Prime the cache with the provided key and value. If the key already exists, no change is made
  115. // and false is returned.
  116. // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
  117. func (l *UserLoader) Prime(key string, value *models.User) bool {
  118. l.mu.Lock()
  119. var found bool
  120. if _, found = l.cache[key]; !found {
  121. // make a copy when writing to the cache, its easy to pass a pointer in from a loop var
  122. // and end up with the whole cache pointing to the same value.
  123. cpy := *value
  124. l.unsafeSet(key, &cpy)
  125. }
  126. l.mu.Unlock()
  127. return !found
  128. }
  129. // Clear the value at key from the cache, if it exists
  130. func (l *UserLoader) Clear(key string) {
  131. l.mu.Lock()
  132. delete(l.cache, key)
  133. l.mu.Unlock()
  134. }
  135. func (l *UserLoader) unsafeSet(key string, value *models.User) {
  136. if l.cache == nil {
  137. l.cache = map[string]*models.User{}
  138. }
  139. l.cache[key] = value
  140. }
  141. // keyIndex will return the location of the key in the batch, if its not found
  142. // it will add the key to the batch
  143. func (b *userLoaderBatch) keyIndex(l *UserLoader, key string) int {
  144. for i, existingKey := range b.keys {
  145. if key == existingKey {
  146. return i
  147. }
  148. }
  149. pos := len(b.keys)
  150. b.keys = append(b.keys, key)
  151. if pos == 0 {
  152. go b.startTimer(l)
  153. }
  154. if l.maxBatch != 0 && pos >= l.maxBatch-1 {
  155. if !b.closing {
  156. b.closing = true
  157. l.batch = nil
  158. go b.end(l)
  159. }
  160. }
  161. return pos
  162. }
  163. func (b *userLoaderBatch) startTimer(l *UserLoader) {
  164. time.Sleep(l.wait)
  165. l.mu.Lock()
  166. // we must have hit a batch limit and are already finalizing this batch
  167. if b.closing {
  168. l.mu.Unlock()
  169. return
  170. }
  171. l.batch = nil
  172. l.mu.Unlock()
  173. b.end(l)
  174. }
  175. func (b *userLoaderBatch) end(l *UserLoader) {
  176. b.data, b.error = l.fetch(b.keys)
  177. close(b.done)
  178. }