Class: Berkshelf::API::CacheManager
- Inherits:
-
Object
- Object
- Berkshelf::API::CacheManager
- Extended by:
- Forwardable
- Includes:
- GenericServer, Logging
- Defined in:
- lib/berkshelf/api/cache_manager.rb
Constant Summary collapse
- SAVE_INTERVAL =
30.0
Instance Attribute Summary collapse
- #cache ⇒ DependencyCache readonly
Class Method Summary collapse
Instance Method Summary collapse
- #add(cookbook, metadata) ⇒ Hash
-
#has_cookbook?(name, version) ⇒ Boolean
Check if the cache knows about the given cookbook version.
-
#initialize ⇒ CacheManager
constructor
A new instance of CacheManager.
- #load_save ⇒ Object
- #process_worker(worker) ⇒ Object
-
#process_workers(workers) ⇒ Boolean
Loops through a list of workers and merges their cookbook sets into the cache.
-
#remove(name, version) ⇒ DependencyCache
Remove the cached item matching the given name and version.
- #to_s ⇒ String
Methods included from Logging
Methods included from GenericServer
Constructor Details
#initialize ⇒ CacheManager
Returns a new instance of CacheManager.
25 26 27 28 29 30 |
# File 'lib/berkshelf/api/cache_manager.rb', line 25 def initialize log.info "#{self} starting..." @cache = DependencyCache.new load_save if File.exist?(self.class.cache_file) every(SAVE_INTERVAL) { save } end |
Instance Attribute Details
#cache ⇒ DependencyCache (readonly)
23 24 25 |
# File 'lib/berkshelf/api/cache_manager.rb', line 23 def cache @cache end |
Class Method Details
.cache_file ⇒ String
5 6 7 |
# File 'lib/berkshelf/api/cache_manager.rb', line 5 def cache_file File.join(Application.home_path, "cerch") end |
Instance Method Details
#add(cookbook, metadata) ⇒ Hash
36 37 38 39 |
# File 'lib/berkshelf/api/cache_manager.rb', line 36 def add(cookbook, ) log.debug "#{self} adding (#{cookbook.name}, #{cookbook.version})" cache.add(cookbook, ) end |
#has_cookbook?(name, version) ⇒ Boolean
Check if the cache knows about the given cookbook version
117 118 119 |
# File 'lib/berkshelf/api/cache_manager.rb', line 117 def has_cookbook?(name, version) @cache.has_cookbook?(name, version) end |
#load_save ⇒ Object
121 122 123 124 125 |
# File 'lib/berkshelf/api/cache_manager.rb', line 121 def load_save log.info "Loading save from #{self.class.cache_file}" @cache = DependencyCache.from_file(self.class.cache_file) log.info "Cache contains #{@cache.cookbooks.size} items" end |
#process_worker(worker) ⇒ Object
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
# File 'lib/berkshelf/api/cache_manager.rb', line 81 def process_worker(worker) log.info "Processing #{worker}" remote_cookbooks = worker.cookbooks log.info "Found #{remote_cookbooks.size} cookbooks from #{worker}" created_cookbooks, deleted_cookbooks = diff(remote_cookbooks, worker.priority) log.debug "#{created_cookbooks.size} cookbooks to be added to the cache from #{worker}" log.debug "#{deleted_cookbooks.size} cookbooks to be removed from the cache from #{worker}" # Process metadata in chunks - Ridley cookbook resource uses a # task_class TaskThread, which means each future gets its own # thread. If we have many (>2000) cookbooks we can easily # exhaust the available threads on the system. = [] until created_cookbooks.empty? work = created_cookbooks.slice!(0,500) log.info "Processing metadata for #{work.size} cookbooks with #{created_cookbooks.size} remaining on #{worker}" work.map! do |remote| [ remote, worker.future(:metadata, remote) ] end.map! do |remote, | .value ? [remote, .value] : nil end += work.compact end log.info "About to merge cookbooks" merge(, deleted_cookbooks) log.info "#{self} cache updated." end |
#process_workers(workers) ⇒ Boolean
Loops through a list of workers and merges their cookbook sets into the cache
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# File 'lib/berkshelf/api/cache_manager.rb', line 58 def process_workers(workers) # If the cache has been warmed already, we want to spawn # workers for all the endpoints concurrently. However, if the # cache is cold we want to run sequentially, so higher priority # endpoints can work before lower priority, avoiding duplicate # downloads. # We don't want crashing workers to crash the CacheManager. # Crashes are logged so just ignore the exceptions if warmed? Array(workers).flatten.collect do |worker| self.future(:process_worker, worker) end.each do |f| f.value rescue nil end else Array(workers).flatten.each do |worker| process_worker(worker) rescue nil end end self.set_warmed end |
#remove(name, version) ⇒ DependencyCache
Remove the cached item matching the given name and version
47 48 49 50 |
# File 'lib/berkshelf/api/cache_manager.rb', line 47 def remove(name, version) log.debug "#{self} removing (#{name}, #{version})" cache.remove(name, version) end |
#to_s ⇒ String
128 129 130 |
# File 'lib/berkshelf/api/cache_manager.rb', line 128 def to_s "Cache manager" end |