You can not select more than 25 topics
			Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
		
		
		
		
		
			
		
			
				
					
					
						
							203 lines
						
					
					
						
							4.9 KiB
						
					
					
				
			
		
		
		
			
			
			
				
					
				
				
					
				
			
		
		
	
	
							203 lines
						
					
					
						
							4.9 KiB
						
					
					
				| // Code generated by github.com/vektah/dataloaden, DO EDIT. | |
|  | |
| package loaders | |
| 
 | |
| import ( | |
| 	"sync" | |
| 	"time" | |
| 
 | |
| 	"git.aiterp.net/stufflog/server/models" | |
| ) | |
| 
 | |
| // IssueLoader batches and caches requests | |
| type IssueLoader struct { | |
| 	// this method provides the data for the loader | |
| 	fetch func(keys []string) ([]*models.Issue, []error) | |
| 
 | |
| 	// how long to done before sending a batch | |
| 	wait time.Duration | |
| 
 | |
| 	// this will limit the maximum number of keys to send in one batch, 0 = no limit | |
| 	maxBatch int | |
| 
 | |
| 	// INTERNAL | |
|  | |
| 	// lazily created cache | |
| 	cache map[string]*models.Issue | |
| 
 | |
| 	// the current batch. keys will continue to be collected until timeout is hit, | |
| 	// then everything will be sent to the fetch method and out to the listeners | |
| 	batch *issueLoaderBatch | |
| 
 | |
| 	// mutex to prevent races | |
| 	mu sync.Mutex | |
| } | |
| 
 | |
| type issueLoaderBatch struct { | |
| 	keys    []string | |
| 	data    []*models.Issue | |
| 	error   []error | |
| 	closing bool | |
| 	done    chan struct{} | |
| } | |
| 
 | |
| // Load a Issue by key, batching and caching will be applied automatically | |
| func (l *IssueLoader) Load(key string) (*models.Issue, error) { | |
| 	return l.LoadThunk(key)() | |
| } | |
| 
 | |
| // LoadThunk returns a function that when called will block waiting for a Issue. | |
| // This method should be used if you want one goroutine to make requests to many | |
| // different data loaders without blocking until the thunk is called. | |
| func (l *IssueLoader) LoadThunk(key string) func() (*models.Issue, error) { | |
| 	l.mu.Lock() | |
| 	if it, ok := l.cache[key]; ok { | |
| 		l.mu.Unlock() | |
| 		return func() (*models.Issue, error) { | |
| 			return it, nil | |
| 		} | |
| 	} | |
| 	if l.batch == nil { | |
| 		l.batch = &issueLoaderBatch{done: make(chan struct{})} | |
| 	} | |
| 	batch := l.batch | |
| 	pos := batch.keyIndex(l, key) | |
| 	l.mu.Unlock() | |
| 
 | |
| 	return func() (*models.Issue, error) { | |
| 		<-batch.done | |
| 
 | |
| 		var data *models.Issue | |
| 		if pos < len(batch.data) { | |
| 			data = batch.data[pos] | |
| 		} | |
| 
 | |
| 		var err error | |
| 		// its convenient to be able to return a single error for everything | |
| 		if len(batch.error) == 1 { | |
| 			err = batch.error[0] | |
| 		} else if batch.error != nil { | |
| 			err = batch.error[pos] | |
| 		} | |
| 
 | |
| 		if err == nil { | |
| 			l.mu.Lock() | |
| 			l.unsafeSet(key, data) | |
| 			l.mu.Unlock() | |
| 		} | |
| 
 | |
| 		return data, err | |
| 	} | |
| } | |
| 
 | |
| // LoadAll fetches many keys at once. It will be broken into appropriate sized | |
| // sub batches depending on how the loader is configured | |
| func (l *IssueLoader) LoadAll(keys []string) ([]*models.Issue, []error) { | |
| 	results := make([]func() (*models.Issue, error), len(keys)) | |
| 
 | |
| 	for i, key := range keys { | |
| 		results[i] = l.LoadThunk(key) | |
| 	} | |
| 
 | |
| 	issues := make([]*models.Issue, len(keys)) | |
| 	errors := make([]error, len(keys)) | |
| 	for i, thunk := range results { | |
| 		issues[i], errors[i] = thunk() | |
| 	} | |
| 	return issues, errors | |
| } | |
| 
 | |
| // LoadAllThunk returns a function that when called will block waiting for a Issues. | |
| // This method should be used if you want one goroutine to make requests to many | |
| // different data loaders without blocking until the thunk is called. | |
| func (l *IssueLoader) LoadAllThunk(keys []string) func() ([]*models.Issue, []error) { | |
| 	results := make([]func() (*models.Issue, error), len(keys)) | |
| 	for i, key := range keys { | |
| 		results[i] = l.LoadThunk(key) | |
| 	} | |
| 	return func() ([]*models.Issue, []error) { | |
| 		issues := make([]*models.Issue, len(keys)) | |
| 		errors := make([]error, len(keys)) | |
| 		for i, thunk := range results { | |
| 			issues[i], errors[i] = thunk() | |
| 		} | |
| 		return issues, errors | |
| 	} | |
| } | |
| 
 | |
| // Prime the cache with the provided key and value. If the key already exists, no change is made | |
| // and false is returned. | |
| // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) | |
| func (l *IssueLoader) Prime(key string, value *models.Issue) bool { | |
| 	l.mu.Lock() | |
| 	var found bool | |
| 	if _, found = l.cache[key]; !found { | |
| 		// make a copy when writing to the cache, its easy to pass a pointer in from a loop var | |
| 		// and end up with the whole cache pointing to the same value. | |
| 		cpy := *value | |
| 		l.unsafeSet(key, &cpy) | |
| 	} | |
| 	l.mu.Unlock() | |
| 	return !found | |
| } | |
| 
 | |
| // Clear the value at key from the cache, if it exists | |
| func (l *IssueLoader) Clear(key string) { | |
| 	l.mu.Lock() | |
| 	delete(l.cache, key) | |
| 	l.mu.Unlock() | |
| } | |
| 
 | |
| func (l *IssueLoader) unsafeSet(key string, value *models.Issue) { | |
| 	if l.cache == nil { | |
| 		l.cache = map[string]*models.Issue{} | |
| 	} | |
| 	l.cache[key] = value | |
| } | |
| 
 | |
| // keyIndex will return the location of the key in the batch, if its not found | |
| // it will add the key to the batch | |
| func (b *issueLoaderBatch) keyIndex(l *IssueLoader, key string) int { | |
| 	for i, existingKey := range b.keys { | |
| 		if key == existingKey { | |
| 			return i | |
| 		} | |
| 	} | |
| 
 | |
| 	pos := len(b.keys) | |
| 	b.keys = append(b.keys, key) | |
| 	if pos == 0 { | |
| 		go b.startTimer(l) | |
| 	} | |
| 
 | |
| 	if l.maxBatch != 0 && pos >= l.maxBatch-1 { | |
| 		if !b.closing { | |
| 			b.closing = true | |
| 			l.batch = nil | |
| 			go b.end(l) | |
| 		} | |
| 	} | |
| 
 | |
| 	return pos | |
| } | |
| 
 | |
| func (b *issueLoaderBatch) startTimer(l *IssueLoader) { | |
| 	time.Sleep(l.wait) | |
| 	l.mu.Lock() | |
| 
 | |
| 	// we must have hit a batch limit and are already finalizing this batch | |
| 	if b.closing { | |
| 		l.mu.Unlock() | |
| 		return | |
| 	} | |
| 
 | |
| 	l.batch = nil | |
| 	l.mu.Unlock() | |
| 
 | |
| 	b.end(l) | |
| } | |
| 
 | |
| func (b *issueLoaderBatch) end(l *IssueLoader) { | |
| 	b.data, b.error = l.fetch(b.keys) | |
| 	close(b.done) | |
| }
 |