GraphQL API and utilities for the rpdata project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

224 lines
5.7 KiB

  1. // Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
  2. package loaders
  3. import (
  4. "sync"
  5. "time"
  6. "git.aiterp.net/rpdata/api/models"
  7. )
  8. // ChannelLoaderConfig captures the config to create a new ChannelLoader
  9. type ChannelLoaderConfig struct {
  10. // Fetch is a method that provides the data for the loader
  11. Fetch func(keys []string) ([]*models.Channel, []error)
  12. // Wait is how long wait before sending a batch
  13. Wait time.Duration
  14. // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
  15. MaxBatch int
  16. }
  17. // NewChannelLoader creates a new ChannelLoader given a fetch, wait, and maxBatch
  18. func NewChannelLoader(config ChannelLoaderConfig) *ChannelLoader {
  19. return &ChannelLoader{
  20. fetch: config.Fetch,
  21. wait: config.Wait,
  22. maxBatch: config.MaxBatch,
  23. }
  24. }
  25. // ChannelLoader batches and caches requests
  26. type ChannelLoader struct {
  27. // this method provides the data for the loader
  28. fetch func(keys []string) ([]*models.Channel, []error)
  29. // how long to done before sending a batch
  30. wait time.Duration
  31. // this will limit the maximum number of keys to send in one batch, 0 = no limit
  32. maxBatch int
  33. // INTERNAL
  34. // lazily created cache
  35. cache map[string]*models.Channel
  36. // the current batch. keys will continue to be collected until timeout is hit,
  37. // then everything will be sent to the fetch method and out to the listeners
  38. batch *channelLoaderBatch
  39. // mutex to prevent races
  40. mu sync.Mutex
  41. }
  42. type channelLoaderBatch struct {
  43. keys []string
  44. data []*models.Channel
  45. error []error
  46. closing bool
  47. done chan struct{}
  48. }
  49. // Load a Channel by key, batching and caching will be applied automatically
  50. func (l *ChannelLoader) Load(key string) (*models.Channel, error) {
  51. return l.LoadThunk(key)()
  52. }
  53. // LoadThunk returns a function that when called will block waiting for a Channel.
  54. // This method should be used if you want one goroutine to make requests to many
  55. // different data loaders without blocking until the thunk is called.
  56. func (l *ChannelLoader) LoadThunk(key string) func() (*models.Channel, error) {
  57. l.mu.Lock()
  58. if it, ok := l.cache[key]; ok {
  59. l.mu.Unlock()
  60. return func() (*models.Channel, error) {
  61. return it, nil
  62. }
  63. }
  64. if l.batch == nil {
  65. l.batch = &channelLoaderBatch{done: make(chan struct{})}
  66. }
  67. batch := l.batch
  68. pos := batch.keyIndex(l, key)
  69. l.mu.Unlock()
  70. return func() (*models.Channel, error) {
  71. <-batch.done
  72. var data *models.Channel
  73. if pos < len(batch.data) {
  74. data = batch.data[pos]
  75. }
  76. var err error
  77. // its convenient to be able to return a single error for everything
  78. if len(batch.error) == 1 {
  79. err = batch.error[0]
  80. } else if batch.error != nil {
  81. err = batch.error[pos]
  82. }
  83. if err == nil {
  84. l.mu.Lock()
  85. l.unsafeSet(key, data)
  86. l.mu.Unlock()
  87. }
  88. return data, err
  89. }
  90. }
  91. // LoadAll fetches many keys at once. It will be broken into appropriate sized
  92. // sub batches depending on how the loader is configured
  93. func (l *ChannelLoader) LoadAll(keys []string) ([]*models.Channel, []error) {
  94. results := make([]func() (*models.Channel, error), len(keys))
  95. for i, key := range keys {
  96. results[i] = l.LoadThunk(key)
  97. }
  98. channels := make([]*models.Channel, len(keys))
  99. errors := make([]error, len(keys))
  100. for i, thunk := range results {
  101. channels[i], errors[i] = thunk()
  102. }
  103. return channels, errors
  104. }
  105. // LoadAllThunk returns a function that when called will block waiting for a Channels.
  106. // This method should be used if you want one goroutine to make requests to many
  107. // different data loaders without blocking until the thunk is called.
  108. func (l *ChannelLoader) LoadAllThunk(keys []string) func() ([]*models.Channel, []error) {
  109. results := make([]func() (*models.Channel, error), len(keys))
  110. for i, key := range keys {
  111. results[i] = l.LoadThunk(key)
  112. }
  113. return func() ([]*models.Channel, []error) {
  114. channels := make([]*models.Channel, len(keys))
  115. errors := make([]error, len(keys))
  116. for i, thunk := range results {
  117. channels[i], errors[i] = thunk()
  118. }
  119. return channels, errors
  120. }
  121. }
  122. // Prime the cache with the provided key and value. If the key already exists, no change is made
  123. // and false is returned.
  124. // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
  125. func (l *ChannelLoader) Prime(key string, value *models.Channel) bool {
  126. l.mu.Lock()
  127. var found bool
  128. if _, found = l.cache[key]; !found {
  129. // make a copy when writing to the cache, its easy to pass a pointer in from a loop var
  130. // and end up with the whole cache pointing to the same value.
  131. cpy := *value
  132. l.unsafeSet(key, &cpy)
  133. }
  134. l.mu.Unlock()
  135. return !found
  136. }
  137. // Clear the value at key from the cache, if it exists
  138. func (l *ChannelLoader) Clear(key string) {
  139. l.mu.Lock()
  140. delete(l.cache, key)
  141. l.mu.Unlock()
  142. }
  143. func (l *ChannelLoader) unsafeSet(key string, value *models.Channel) {
  144. if l.cache == nil {
  145. l.cache = map[string]*models.Channel{}
  146. }
  147. l.cache[key] = value
  148. }
  149. // keyIndex will return the location of the key in the batch, if its not found
  150. // it will add the key to the batch
  151. func (b *channelLoaderBatch) keyIndex(l *ChannelLoader, key string) int {
  152. for i, existingKey := range b.keys {
  153. if key == existingKey {
  154. return i
  155. }
  156. }
  157. pos := len(b.keys)
  158. b.keys = append(b.keys, key)
  159. if pos == 0 {
  160. go b.startTimer(l)
  161. }
  162. if l.maxBatch != 0 && pos >= l.maxBatch-1 {
  163. if !b.closing {
  164. b.closing = true
  165. l.batch = nil
  166. go b.end(l)
  167. }
  168. }
  169. return pos
  170. }
  171. func (b *channelLoaderBatch) startTimer(l *ChannelLoader) {
  172. time.Sleep(l.wait)
  173. l.mu.Lock()
  174. // we must have hit a batch limit and are already finalizing this batch
  175. if b.closing {
  176. l.mu.Unlock()
  177. return
  178. }
  179. l.batch = nil
  180. l.mu.Unlock()
  181. b.end(l)
  182. }
  183. func (b *channelLoaderBatch) end(l *ChannelLoader) {
  184. b.data, b.error = l.fetch(b.keys)
  185. close(b.done)
  186. }