mirror of
https://github.com/kemko/paperclip.git
synced 2026-01-01 16:05:40 +03:00
6
Gemfile
6
Gemfile
@@ -7,12 +7,12 @@ gemspec
|
||||
|
||||
gem 'pg'
|
||||
|
||||
gem 'aws-sdk-s3'
|
||||
gem 'aws-sdk-s3', '=1.143.0'
|
||||
gem 'fog-local'
|
||||
|
||||
gem 'delayed_paperclip', github: 'insales/delayed_paperclip'
|
||||
gem 'rails'
|
||||
gem 'sidekiq'
|
||||
gem 'sidekiq', '~>6.5' # in 6.4.2 worker started to be renamed to job, in 7 removed
|
||||
|
||||
gem 'test-unit'
|
||||
gem 'simplecov', require: false
|
||||
@@ -26,7 +26,7 @@ unless defined?(Appraisal)
|
||||
gem 'appraisal'
|
||||
|
||||
group :lint do
|
||||
gem 'rubocop', '~>0.81'
|
||||
gem 'rubocop'
|
||||
gem 'rubocop-rails'
|
||||
gem 'rubocop-rspec'
|
||||
gem 'rubocop-performance'
|
||||
|
||||
@@ -10,3 +10,18 @@ services:
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
minio:
|
||||
# image: bitnami/minio:2024.3.30 # fs backend removed, xl2 only
|
||||
image: bitnami/minio:2022.10.29
|
||||
ports:
|
||||
- '9002:9000'
|
||||
- '9003:9001'
|
||||
# volumes:
|
||||
# - './tmp/minio:/bitnami/minio/data:rw'
|
||||
# - './tmp/minio:/data:rw'
|
||||
environment:
|
||||
- MINIO_DEFAULT_BUCKETS=bucketname
|
||||
- MINIO_ROOT_USER=test
|
||||
- MINIO_ROOT_PASSWORD=testpassword
|
||||
- MINIO_STORAGE_USE_HTTPS=false
|
||||
@@ -2,8 +2,6 @@ module Paperclip
|
||||
module Storage
|
||||
autoload :Filesystem, 'paperclip/storage/filesystem'
|
||||
autoload :DelayedUpload, 'paperclip/storage/delayed_upload'
|
||||
autoload :Delayeds3, 'paperclip/storage/delayeds3'
|
||||
autoload :Cached, 'paperclip/storage/cached'
|
||||
autoload :NoCacheS3, 'paperclip/storage/no_cache_s3'
|
||||
end
|
||||
end
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
module Paperclip
|
||||
module Storage
|
||||
# Saves file to `:cache` store, and run jobs to copy files to one ore more `:store` store.
|
||||
# All stores are Fog::Storage::Directory instances (it has S3 and filesystem adapters).
|
||||
#
|
||||
# Options:
|
||||
# - `:cache` - temporary storage,
|
||||
# - `:stores` - one or more permanent storages (hash of {id => fog_directory}),
|
||||
# first one is main others are mirrors,
|
||||
# - `:key` - identifier template.
|
||||
# - `:url` - hash of tamples {cache: t1, store: t2}.
|
||||
# Values support :key interpolation which is merformed at configuration-time.
|
||||
# - `:to_file_using_fog` - use fog interface in #to_file to fetch file from store.
|
||||
# If disabled, downloads file by url via usual HTTP request.
|
||||
#
|
||||
# It uses `#{attachement_name}_synced_to_#{store_id}` field to mark that file
|
||||
# is uploaded to particular storage.
|
||||
module Cached
|
||||
class << self
|
||||
def included(base)
|
||||
base.extend(ClassMethods)
|
||||
end
|
||||
end
|
||||
|
||||
module ClassMethods
|
||||
attr_reader :key_template,
|
||||
:url_templates,
|
||||
:directories,
|
||||
:store_ids,
|
||||
:main_store_id,
|
||||
:download_by_url
|
||||
|
||||
def setup(*)
|
||||
super
|
||||
|
||||
@key_template = options.fetch(:key)
|
||||
@key_template = key_template[1..-1] if key_template.start_with?('/')
|
||||
|
||||
@url_templates = options.fetch(:url).map { |k, v| [k, v.gsub(':key', key_template)] }.to_h
|
||||
|
||||
@directories = options.fetch(:stores).symbolize_keys
|
||||
@directories[:cache] = @options.fetch(:cache)
|
||||
|
||||
@store_ids = options[:stores].keys.map(&:to_sym)
|
||||
@main_store_id = store_ids.first
|
||||
|
||||
@download_by_url = options[:download_by_url]
|
||||
end
|
||||
|
||||
def directory_for(store_id)
|
||||
directories.fetch(store_id.to_sym)
|
||||
end
|
||||
|
||||
def synced_field_name(store_id)
|
||||
@synced_field_names ||= store_ids.each_with_object({}) do |key, result|
|
||||
result[key] = :"#{attachment_name}_synced_to_#{key}"
|
||||
end
|
||||
@synced_field_names[store_id.to_sym]
|
||||
end
|
||||
end
|
||||
|
||||
def initialize(*)
|
||||
super
|
||||
@queued_jobs = []
|
||||
end
|
||||
|
||||
def key(style = default_style)
|
||||
interpolate(self.class.key_template, style)
|
||||
end
|
||||
|
||||
def storage_url(style = default_style)
|
||||
current_store = synced_to?(self.class.main_store_id) ? :store : :cache
|
||||
interpolate(self.class.url_templates.fetch(current_store), style)
|
||||
end
|
||||
|
||||
def reprocess!
|
||||
super
|
||||
flush_jobs
|
||||
end
|
||||
|
||||
# If store_id is given, it forces download from specific store using fog interface.
|
||||
# Otherway it tries to download from cache store and finally uses url to download file
|
||||
# via HTTP. This is the most compatible way to delayeds3.
|
||||
def to_file(style = default_style, store_id = nil)
|
||||
style_key = key(style)
|
||||
return download_from_fog(store_id, style_key) if store_id
|
||||
result = super(style) || download_from_fog(:cache, style_key)
|
||||
return result if result
|
||||
# Download by URL only if file is synced to main store. Similar to delayeds3.
|
||||
return unless synced_to?(self.class.main_store_id)
|
||||
if self.class.download_by_url
|
||||
# FIXME: do we need to escape here?
|
||||
uri = URI(URI::DEFAULT_PARSER.escape(storage_url(style)))
|
||||
response = Net::HTTP.get_response(uri)
|
||||
create_tempfile(response.body) if response.is_a?(Net::HTTPOK)
|
||||
else
|
||||
download_from_fog(self.class.main_store_id, style_key)
|
||||
end
|
||||
end
|
||||
|
||||
def path(*)
|
||||
raise '#path is not available for this type of storage, use #to_file instead'
|
||||
end
|
||||
|
||||
# Checks if attached file exists. When store_id is not given
|
||||
# it uses fast check and does not perform API request for synced files
|
||||
def exists?(style = default_style, store_id = nil)
|
||||
return true if !store_id && synced_to?(self.class.main_store_id)
|
||||
store_id ||= :cache
|
||||
!self.class.directory_for(store_id).files.head(key(style)).nil?
|
||||
end
|
||||
|
||||
def flush_writes #:nodoc:
|
||||
return if queued_for_write.empty?
|
||||
write_to_directory(:cache, queued_for_write)
|
||||
unless delay_processing? && dirty?
|
||||
self.class.store_ids.each { |store_id| enqueue_sync_job(store_id) }
|
||||
end
|
||||
queued_for_write.clear
|
||||
end
|
||||
|
||||
# Important: It does not delete files from permanent stores.
|
||||
def delete_styles_later(styles)
|
||||
# если мы картинку заливали в облака, значит мы скорее всего ее уже удалили
|
||||
# и можно не нагружать хранилище проверками
|
||||
return if all_synced?
|
||||
keys = styles.map { |x| key(x) }
|
||||
-> { delete_keys_from(:cache, keys) }
|
||||
end
|
||||
|
||||
# Enqueues all pending jobs. First, jobs are placed to internal queue in flush_writes
|
||||
# (in after_save) and this method pushes them for execution (in after_commit).
|
||||
def flush_jobs
|
||||
queued_jobs&.each(&:call)&.clear
|
||||
end
|
||||
|
||||
def upload_to(store_id)
|
||||
sync_to(store_id)
|
||||
clear_cache
|
||||
end
|
||||
|
||||
# Writes files from cache to permanent store.
|
||||
def sync_to(store_id)
|
||||
synced_field_name = self.class.synced_field_name(store_id)
|
||||
return unless instance.respond_to?(synced_field_name)
|
||||
return true if instance.public_send(synced_field_name)
|
||||
files = self.class.all_styles.each_with_object({}) do |style, result|
|
||||
file = to_file(style, :cache)
|
||||
# For easier monitoring
|
||||
unless file
|
||||
raise "Missing cached files for #{instance.class.name}:#{instance.id}:#{style}"
|
||||
end
|
||||
result[style] = file
|
||||
end
|
||||
write_to_directory(store_id, files)
|
||||
# ignore deleted objects and skip callbacks
|
||||
if instance.class.unscoped.where(id: instance.id).update_all(synced_field_name => true) == 1
|
||||
instance.touch
|
||||
instance[synced_field_name] = true
|
||||
end
|
||||
end
|
||||
|
||||
def clear_cache
|
||||
delete_styles_from(:cache) if all_synced?
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def synced_to?(store_id)
|
||||
instance.try(self.class.synced_field_name(store_id))
|
||||
end
|
||||
|
||||
def all_synced?
|
||||
self.class.store_ids.all? do |store_id|
|
||||
synced_field_name = self.class.synced_field_name(store_id)
|
||||
!instance.respond_to?(synced_field_name) || instance[synced_field_name]
|
||||
end
|
||||
end
|
||||
|
||||
attr_reader :queued_jobs
|
||||
|
||||
def enqueue_sync_job(store_id)
|
||||
synced_field_name = self.class.synced_field_name(store_id)
|
||||
return unless instance.respond_to?(synced_field_name)
|
||||
instance.update_column(synced_field_name, false) if instance[synced_field_name]
|
||||
queued_jobs.push -> { DelayedUpload.upload_later(self, store_id) }
|
||||
end
|
||||
|
||||
def download_from_fog(store_id, key)
|
||||
body = self.class.directory_for(store_id).files.get(key)&.body
|
||||
create_tempfile(body) if body
|
||||
end
|
||||
|
||||
def write_to_directory(store_id, files)
|
||||
directory = self.class.directory_for(store_id)
|
||||
common_options = {
|
||||
content_type: instance_read(:content_type),
|
||||
cache_control: "max-age=#{10.years.to_i}",
|
||||
}
|
||||
files.each do |style, file|
|
||||
path = key(style)
|
||||
log "Saving to #{store_id}:#{path}"
|
||||
directory.files.create(
|
||||
key: path,
|
||||
public: true,
|
||||
body: file,
|
||||
**common_options,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
def delete_styles_from(store_id, styles = self.class.all_styles)
|
||||
keys = styles.map { |x| key(x) }
|
||||
delete_keys_from(store_id, keys)
|
||||
end
|
||||
|
||||
def delete_keys_from(store_id, keys)
|
||||
directory = self.class.directory_for(store_id)
|
||||
keys.each do |key|
|
||||
log("Deleting #{store_id}:#{key}")
|
||||
directory.files.head(key)&.destroy
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,318 +0,0 @@
|
||||
begin
|
||||
require "aws-sdk-s3"
|
||||
rescue LoadError => e
|
||||
e.message << " (You may need to install the aws-sdk-s3 gem)"
|
||||
raise e
|
||||
end
|
||||
|
||||
module Paperclip
|
||||
module Storage
|
||||
# Need to create boolean field synced_to_s3
|
||||
module Delayeds3
|
||||
class << self
|
||||
def included(base)
|
||||
base.extend(ClassMethods)
|
||||
end
|
||||
|
||||
def parse_credentials creds
|
||||
creds = find_credentials(creds).stringify_keys
|
||||
(creds[Rails.env] || creds).symbolize_keys
|
||||
end
|
||||
|
||||
def find_credentials creds
|
||||
case creds
|
||||
when File
|
||||
YAML.load_file(creds.path)
|
||||
when String
|
||||
YAML.load_file(creds)
|
||||
when Hash
|
||||
creds
|
||||
else
|
||||
raise ArgumentError, "Credentials are not a path, file, or hash."
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module ClassMethods
|
||||
attr_reader :s3_url_template, :s3_path_template,
|
||||
:filesystem_url_template, :filesystem_path_template,
|
||||
:s3_credentials, :s3_bucket, :synced_to_s3_field,
|
||||
:synced_to_yandex_field, :yandex_bucket_name,
|
||||
:yandex_credentials,
|
||||
:synced_to_sbercloud_field,
|
||||
:sbercloud_bucket_name,
|
||||
:sbercloud_credentials
|
||||
|
||||
def setup(*)
|
||||
super
|
||||
|
||||
@s3_url_template = options[:s3_url]
|
||||
@s3_path_template = options[:s3_path]
|
||||
@filesystem_url_template = options[:filesystem_url]
|
||||
@filesystem_path_template = options[:filesystem_path]
|
||||
|
||||
@s3_credentials = Delayeds3.parse_credentials(options[:s3_credentials])
|
||||
@yandex_credentials = Delayeds3.parse_credentials(options[:yandex_credentials])
|
||||
@sbercloud_credentials = Delayeds3.parse_credentials(options[:sbercloud_credentials])
|
||||
|
||||
@s3_bucket = options[:bucket] || @s3_credentials[:bucket]
|
||||
@yandex_bucket_name = options[:yandex_bucket]
|
||||
@sbercloud_bucket_name = options[:sbercloud_bucket]
|
||||
|
||||
@synced_to_s3_field ||= :"#{attachment_name}_synced_to_s3"
|
||||
@synced_to_yandex_field ||= :"#{attachment_name}_synced_to_yandex"
|
||||
@synced_to_sbercloud_field ||= :"#{attachment_name}_synced_to_sbercloud"
|
||||
end
|
||||
|
||||
def aws_bucket
|
||||
@aws_bucket ||= begin
|
||||
params = s3_credentials.reject { |_k, v| v.blank? }
|
||||
params[:region] ||= 'us-east-1'
|
||||
s3_client = Aws::S3::Client.new(params)
|
||||
s3_resource = Aws::S3::Resource.new(client: s3_client)
|
||||
s3_resource.bucket(s3_bucket)
|
||||
end
|
||||
end
|
||||
|
||||
def yandex_bucket
|
||||
@yandex_bucket ||= begin
|
||||
params = yandex_credentials.reject { |_k, v| v.blank? }
|
||||
params[:region] ||= 'ru-central1'
|
||||
s3_client = Aws::S3::Client.new(params)
|
||||
s3_resource = Aws::S3::Resource.new(client: s3_client)
|
||||
s3_resource.bucket(yandex_bucket_name)
|
||||
end
|
||||
end
|
||||
|
||||
def sbercloud_bucket
|
||||
@sbercloud_bucket ||= begin
|
||||
params = sbercloud_credentials.reject { |_k, v| v.blank? }
|
||||
params[:region] ||= 'ru-moscow'
|
||||
s3_client = Aws::S3::Client.new(params)
|
||||
s3_resource = Aws::S3::Resource.new(client: s3_client)
|
||||
s3_resource.bucket(sbercloud_bucket_name)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
delegate :synced_to_s3_field, :synced_to_yandex_field, :synced_to_sbercloud_field, to: :class
|
||||
|
||||
def initialize(*)
|
||||
super
|
||||
@queued_jobs = []
|
||||
end
|
||||
|
||||
def storage_url(style = default_style)
|
||||
interpolate(self.class.s3_url_template, style)
|
||||
end
|
||||
|
||||
def path(style = default_style)
|
||||
return if original_filename.nil?
|
||||
interpolate(self.class.s3_path_template, style)
|
||||
end
|
||||
|
||||
def filesystem_path(style = default_style)
|
||||
return if original_filename.nil?
|
||||
interpolate(self.class.filesystem_path_template, style)
|
||||
end
|
||||
|
||||
def reprocess!
|
||||
super
|
||||
flush_jobs
|
||||
end
|
||||
|
||||
def to_file style = default_style
|
||||
super || (File.new(filesystem_path(style), 'rb') if exists?(style, :cache)) || download_file(style)
|
||||
end
|
||||
|
||||
def download_file(style = default_style)
|
||||
return unless instance_read(:synced_to_yandex)
|
||||
# FIXME: do we need to escape here?
|
||||
uri = URI(URI::DEFAULT_PARSER.escape(url(style)))
|
||||
response = Net::HTTP.get_response(uri)
|
||||
create_tempfile(response.body) if response.is_a?(Net::HTTPOK)
|
||||
end
|
||||
|
||||
# Checks if attached file exists. When store_id is not given
|
||||
# it uses fast check and does not perform API request for synced files
|
||||
def exists?(style = default_style, store_id = nil)
|
||||
return true if !store_id && instance_read(:synced_to_yandex)
|
||||
store_id ||= :cache
|
||||
case store_id
|
||||
when :cache
|
||||
File.exist?(filesystem_path(style))
|
||||
when :s3
|
||||
self.class.aws_bucket.object(s3_path(style)).exists?
|
||||
when :yandex
|
||||
self.class.yandex_bucket.object(s3_path(style)).exists?
|
||||
when :sbercloud
|
||||
self.class.sbercloud_bucket.object(s3_path(style)).exists?
|
||||
else
|
||||
raise 'Unknown store'
|
||||
end
|
||||
end
|
||||
|
||||
def s3_path style
|
||||
result = interpolate(self.class.s3_path_template, style)
|
||||
result.start_with?('/') ? result[1..-1] : result
|
||||
end
|
||||
|
||||
def filesystem_paths(styles = self.class.all_styles)
|
||||
h = {}
|
||||
styles.uniq.map do |style|
|
||||
path = filesystem_path(style)
|
||||
h[style] = path if File.exist?(path)
|
||||
end
|
||||
h
|
||||
end
|
||||
|
||||
def file_content_type(path)
|
||||
Paperclip::Upfile.content_type_from_file path
|
||||
end
|
||||
|
||||
def write_to_s3
|
||||
return true if instance_read(:synced_to_s3)
|
||||
paths = filesystem_paths
|
||||
if paths.length < styles.length || paths.empty? # To make monitoring easier
|
||||
raise "Local files not found for #{instance.class.name}:#{instance.id}"
|
||||
end
|
||||
paths.each do |style, file|
|
||||
log("saving to s3 #{file}")
|
||||
content_type = style == :original ? instance_read(:content_type) : file_content_type(file)
|
||||
s3_object = self.class.aws_bucket.object(s3_path(style))
|
||||
s3_object.upload_file(file,
|
||||
cache_control: "max-age=#{10.year.to_i}",
|
||||
content_type: content_type,
|
||||
expires: 10.year.from_now.httpdate,
|
||||
acl: 'public-read')
|
||||
end
|
||||
if instance.class.unscoped.where(id: instance.id).update_all(synced_to_s3_field => true) == 1
|
||||
instance.touch
|
||||
end
|
||||
end
|
||||
|
||||
def write_to_yandex
|
||||
return true if instance_read(:synced_to_yandex)
|
||||
paths = filesystem_paths
|
||||
if paths.length < styles.length || paths.empty? # To make monitoring easier
|
||||
raise "Local files not found for #{instance.class.name}:#{instance.id}"
|
||||
end
|
||||
paths.each do |style, file|
|
||||
log("saving to yandex #{file}")
|
||||
content_type = style == :original ? instance_read(:content_type) : file_content_type(file)
|
||||
s3_object = self.class.yandex_bucket.object(s3_path(style))
|
||||
s3_object.upload_file(file,
|
||||
cache_control: "max-age=#{10.year.to_i}",
|
||||
content_type: content_type,
|
||||
expires: 10.year.from_now.httpdate,
|
||||
acl: 'public-read')
|
||||
end
|
||||
if instance.class.unscoped.where(id: instance.id).update_all(synced_to_yandex_field => true) == 1
|
||||
instance.touch
|
||||
end
|
||||
end
|
||||
|
||||
def write_to_sbercloud
|
||||
return true if instance_read(:synced_to_sbercloud)
|
||||
paths = filesystem_paths
|
||||
if paths.length < styles.length || paths.empty? # To make monitoring easier
|
||||
raise "Local files not found for #{instance.class.name}:#{instance.id}"
|
||||
end
|
||||
paths.each do |style, file|
|
||||
log("saving to sbercloud #{file}")
|
||||
content_type = style == :original ? instance_read(:content_type) : file_content_type(file)
|
||||
s3_object = self.class.sbercloud_bucket.object(s3_path(style))
|
||||
s3_object.upload_file(file,
|
||||
cache_control: "max-age=#{10.year.to_i}",
|
||||
content_type: content_type,
|
||||
expires: 10.year.from_now.httpdate,
|
||||
acl: 'public-read')
|
||||
end
|
||||
if instance.class.unscoped.where(id: instance.id).update_all(synced_to_sbercloud_field => true) == 1
|
||||
instance.touch
|
||||
end
|
||||
end
|
||||
|
||||
def flush_writes #:nodoc:
|
||||
return if queued_for_write.empty?
|
||||
|
||||
queued_for_write.each do |style, file|
|
||||
file.close
|
||||
FileUtils.mkdir_p(File.dirname(filesystem_path(style)))
|
||||
log("saving to filesystem #{filesystem_path(style)}")
|
||||
FileUtils.mv(file.path, filesystem_path(style))
|
||||
FileUtils.chmod(0644, filesystem_path(style))
|
||||
end
|
||||
|
||||
unless delay_processing? && dirty?
|
||||
%i[yandex sbercloud].each do |storage|
|
||||
storage_field = send("synced_to_#{storage}_field")
|
||||
if instance.respond_to?(storage_field) && instance_read("synced_to_#{storage}")
|
||||
instance.update_column(storage_field, false)
|
||||
end
|
||||
# кажется, без задержки картинки не успевают расползтись по nfs
|
||||
queued_jobs.push -> { DelayedUpload.upload_later(self, storage, 10.seconds) }
|
||||
end
|
||||
end
|
||||
queued_for_write.clear
|
||||
end
|
||||
|
||||
# Deletes a file and all parent directories if they are empty
|
||||
def delete_recursive(path)
|
||||
initial_path = path
|
||||
begin
|
||||
FileUtils.rm(path)
|
||||
rescue Errno::ENOENT, Errno::ESTALE, Errno::EEXIST
|
||||
nil
|
||||
end
|
||||
begin
|
||||
while(true)
|
||||
path = File.dirname(path)
|
||||
FileUtils.rmdir(path)
|
||||
break if File.exist?(path) # Ruby 1.9.2 does not raise if the removal failed.
|
||||
end
|
||||
rescue Errno::EEXIST, Errno::EACCES, Errno::ENOTEMPTY,
|
||||
Errno::ENOENT, Errno::EINVAL, Errno::ENOTDIR, Errno::ESTALE
|
||||
# already deleted
|
||||
rescue SystemCallError => e
|
||||
Rollbar.error(e, {path: path, initial_path: initial_path})
|
||||
end
|
||||
end
|
||||
|
||||
def delete_styles_later(styles)
|
||||
# если мы картинку заливали в облака, значит мы скорее всего ее уже удалили
|
||||
# и можно не нагружать хранилище проверками
|
||||
return if instance_read(:synced_to_yandex) && instance_read(:synced_to_sbercloud)
|
||||
filenames = filesystem_paths(styles).values
|
||||
-> { delete_local_files!(filenames) }
|
||||
end
|
||||
|
||||
def delete_local_files!(_filenames = filesystem_paths.values)
|
||||
# TODO: _filenames как-то должно использоваться?
|
||||
filesystem_paths.values.each do |filename|
|
||||
log("Deleting local file #{filename}")
|
||||
delete_recursive(filename)
|
||||
end
|
||||
end
|
||||
|
||||
def flush_jobs
|
||||
queued_jobs&.each(&:call)&.clear
|
||||
end
|
||||
|
||||
def upload_to(store_id)
|
||||
case store_id.to_s
|
||||
when 's3' then write_to_s3
|
||||
when 'yandex' then write_to_yandex
|
||||
when 'sbercloud' then write_to_sbercloud
|
||||
else raise 'Unknown store id'
|
||||
end
|
||||
instance.reload
|
||||
delete_local_files! if instance_read(:synced_to_yandex) && instance_read(:synced_to_sbercloud)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :queued_jobs
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -170,13 +170,9 @@ module Paperclip
|
||||
return true if instance.public_send(synced_field_name)
|
||||
|
||||
styles_to_upload = subject_to_post_process? ? self.class.all_styles : [:original]
|
||||
files ||= styles_to_upload.each_with_object({}) do |style, result|
|
||||
file = to_file(style, self.class.main_store_id)
|
||||
# For easier monitoring
|
||||
unless file
|
||||
raise "Missing files in #{self.class.main_store_id} for #{instance.class.name}:#{instance.id}:#{style}"
|
||||
end
|
||||
result[style] = file
|
||||
files ||= styles_to_upload.index_with do |style|
|
||||
to_file(style, self.class.main_store_id) ||
|
||||
raise("Missing files in #{self.class.main_store_id} for #{instance.class.name}:#{instance.id}:#{style}")
|
||||
end
|
||||
write_to_store(store_id, files)
|
||||
# ignore deleted objects and skip callbacks
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'test_helper'
|
||||
require 'fog/local'
|
||||
require 'sidekiq'
|
||||
require 'sidekiq/testing'
|
||||
|
||||
require 'delayed_paperclip'
|
||||
DelayedPaperclip::Railtie.insert
|
||||
|
||||
# rubocop:disable Naming/VariableNumber
|
||||
|
||||
class FakeModel
|
||||
attr_accessor :synced_to_store_1, :synced_to_store_2
|
||||
end
|
||||
|
||||
class CachedStorageTest < Test::Unit::TestCase
|
||||
TEST_ROOT = Pathname(__dir__).join('test')
|
||||
|
||||
def fog_directory(suffix)
|
||||
Fog::Storage.new(provider: 'Local', local_root: TEST_ROOT.join(suffix.to_s))
|
||||
.directories.new(key: '', public: true)
|
||||
end
|
||||
|
||||
def stub_file(name, content)
|
||||
StringIO.new(content).tap { |string_io| string_io.stubs(:original_filename).returns(name) }
|
||||
end
|
||||
|
||||
setup do
|
||||
rebuild_model(
|
||||
storage: :cached,
|
||||
key: ':filename',
|
||||
url: {
|
||||
cache: 'http://cache.local/:key',
|
||||
store: 'http://store.local/:key'
|
||||
},
|
||||
cache: fog_directory(:cache),
|
||||
stores: {
|
||||
store_1: fog_directory(:store_1),
|
||||
store_2: fog_directory(:store_2)
|
||||
}
|
||||
)
|
||||
modify_table(:dummies) do |table|
|
||||
table.boolean :avatar_synced_to_store_1, null: false, default: false
|
||||
table.boolean :avatar_synced_to_store_2, null: false, default: false
|
||||
end
|
||||
@instance = Dummy.create
|
||||
end
|
||||
|
||||
teardown { TEST_ROOT.rmtree if TEST_ROOT.exist? }
|
||||
|
||||
context 'assigning file' do
|
||||
setup { Sidekiq::Testing.fake! }
|
||||
|
||||
should 'write to cache and enqueue jobs' do
|
||||
@instance.update!(avatar: stub_file('test.txt', 'qwe'))
|
||||
@instance.reload
|
||||
attachment = @instance.avatar
|
||||
key = attachment.key
|
||||
assert_equal true, attachment.exists?
|
||||
assert_equal false, attachment.class.directory_for(:cache).files.head(key).nil?
|
||||
assert_equal true, attachment.class.directory_for(:store_1).files.head(key).nil?
|
||||
assert_equal true, attachment.class.directory_for(:store_2).files.head(key).nil?
|
||||
assert_equal 'http://cache.local/test.txt', attachment.url(:original, false)
|
||||
end
|
||||
|
||||
context 'with inline jobs' do
|
||||
setup { Sidekiq::Testing.inline! }
|
||||
teardown { Sidekiq::Testing.fake! }
|
||||
|
||||
should 'write to permanent stores and clear cache' do
|
||||
@instance.update!(avatar: stub_file('test.txt', 'qwe'))
|
||||
@instance.run_callbacks(:commit)
|
||||
@instance.reload
|
||||
attachment = @instance.avatar
|
||||
key = attachment.key
|
||||
assert_equal true, attachment.class.directory_for(:cache).files.head(key).nil?
|
||||
assert_equal false, attachment.class.directory_for(:store_1).files.head(key).nil?
|
||||
assert_equal false, attachment.class.directory_for(:store_2).files.head(key).nil?
|
||||
assert_equal 'http://store.local/test.txt', attachment.url(:original, false)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# rubocop:enable Naming/VariableNumber
|
||||
@@ -11,10 +11,6 @@ DelayedPaperclip::Railtie.insert
|
||||
|
||||
# rubocop:disable Naming/VariableNumber
|
||||
|
||||
class FakeModel
|
||||
attr_accessor :synced_to_store_1, :synced_to_store_2
|
||||
end
|
||||
|
||||
class NoCacheS3Test < Test::Unit::TestCase
|
||||
TEST_ROOT = Pathname(__dir__).join('test')
|
||||
|
||||
@@ -25,17 +21,25 @@ class NoCacheS3Test < Test::Unit::TestCase
|
||||
setup do
|
||||
rebuild_model(
|
||||
storage: :no_cache_s3,
|
||||
key: ':filename',
|
||||
key: "dummy_imgs/:id/:style-:filename",
|
||||
url: 'http://store.local/:key',
|
||||
stores: {
|
||||
store_1: { access_key_id: '123', secret_access_key: '123', region: 'r', bucket: 'buck' },
|
||||
store_2: { access_key_id: '456', secret_access_key: '456', region: 'r', bucket: 'buck' }
|
||||
},
|
||||
# styles: {
|
||||
# original: { geometry: '4x4>', processors: %i[thumbnail optimizer] }, # '4x4>' to limit size
|
||||
# medium: '3x3',
|
||||
# small: { geometry: '2x2', processors: [:recursive_thumbnail], thumbnail: :medium },
|
||||
# micro: { geometry: '1x1', processors: [:recursive_thumbnail], thumbnail: :small }
|
||||
# }
|
||||
styles: {
|
||||
original: { geometry: '4x4>', processors: %i[thumbnail optimizer] },
|
||||
medium: '3x3',
|
||||
small: { geometry: '2x2', processors: [:recursive_thumbnail], thumbnail: :medium },
|
||||
micro: { geometry: '1x1', processors: [:recursive_thumbnail], thumbnail: :small }
|
||||
original: { geometry: '2048x2048>', processors: %i[thumbnail optimizer] },
|
||||
large: '480x480',
|
||||
medium: '240x240',
|
||||
compact: { geometry: '160x160', processors: [:recursive_thumbnail], thumbnail: :medium },
|
||||
thumb: { geometry: '100x100', processors: [:recursive_thumbnail], thumbnail: :compact },
|
||||
micro: { geometry: '48x48', processors: [:recursive_thumbnail], thumbnail: :thumb }
|
||||
}
|
||||
)
|
||||
modify_table(:dummies) do |table|
|
||||
@@ -48,15 +52,16 @@ class NoCacheS3Test < Test::Unit::TestCase
|
||||
@store1_stub.stubs(:url).returns('http://store.local')
|
||||
@store2_stub.stubs(:url).returns('http://store.local')
|
||||
@instance.avatar.class.stubs(:stores).returns({ store_1: @store1_stub, store_2: @store2_stub })
|
||||
Dummy::AvatarAttachment.any_instance.stubs(:to_file).returns(
|
||||
stub_file('pixel.gif', Base64.decode64('R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw'))
|
||||
)
|
||||
@gif_pixel = Base64.decode64('R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw')
|
||||
end
|
||||
|
||||
teardown { TEST_ROOT.rmtree if TEST_ROOT.exist? }
|
||||
|
||||
context 'assigning file' do
|
||||
setup { Sidekiq::Testing.fake! }
|
||||
setup do
|
||||
Sidekiq::Testing.fake!
|
||||
Dummy::AvatarAttachment.any_instance.stubs(:to_file).returns(stub_file('pixel.gif', @gif_pixel))
|
||||
end
|
||||
|
||||
should 'set synced_fields to false' do
|
||||
@instance.avatar_synced_to_store_1 = true
|
||||
@@ -73,7 +78,7 @@ class NoCacheS3Test < Test::Unit::TestCase
|
||||
@instance.run_callbacks(:commit)
|
||||
@instance.reload
|
||||
attachment = @instance.avatar
|
||||
assert_equal 'http://store.local/test.txt', attachment.url(:original, false)
|
||||
assert_equal 'http://store.local/dummy_imgs/1/original-test.txt', attachment.url(:original, false)
|
||||
end
|
||||
|
||||
context 'with inline jobs' do
|
||||
@@ -87,27 +92,61 @@ class NoCacheS3Test < Test::Unit::TestCase
|
||||
@instance.run_callbacks(:commit)
|
||||
@instance.reload
|
||||
attachment = @instance.avatar
|
||||
assert_equal 'http://store.local/test.txt', attachment.url(:original, false)
|
||||
assert_equal 'http://store.local/dummy_imgs/1/original-test.txt', attachment.url(:original, false)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def assert_no_leftover_tmp
|
||||
existing_files = Dir.children(Dir.tmpdir)
|
||||
yield
|
||||
leftover_files = (Dir.children(Dir.tmpdir) - existing_files).sort
|
||||
assert_empty(leftover_files)
|
||||
end
|
||||
|
||||
context "reprocess" do
|
||||
setup do
|
||||
Sidekiq::Testing.fake!
|
||||
@instance.update_columns avatar_file_name: 'foo.gif', avatar_content_type: 'image/gif'
|
||||
Dummy::AvatarAttachment.any_instance.stubs(:download_from_store).returns(stub_file('pixel.gif', @gif_pixel))
|
||||
@instance.update_columns avatar_file_name: 'foo.gif', avatar_content_type: 'image/gif',
|
||||
avatar_synced_to_store_1: true
|
||||
end
|
||||
|
||||
should "delete tmp files" do
|
||||
@store1_stub.expects(:put_object).times(1 + (@instance.avatar.options[:styles].keys - [:original]).size)
|
||||
# Paperclip.expects(:log).with { puts "Log: #{_1}"; true }.at_least(3)
|
||||
existing_files = Dir.children(Dir.tmpdir)
|
||||
@instance.avatar.reprocess!
|
||||
leftover_files = (Dir.children(Dir.tmpdir) - existing_files).sort
|
||||
assert_empty(leftover_files)
|
||||
assert_no_leftover_tmp { @instance.avatar.reprocess! }
|
||||
end
|
||||
end
|
||||
|
||||
context "with delayed_paperclip process_in_background" do # rubocop:disable Style/MultilineIfModifier
|
||||
setup do
|
||||
Dummy.process_in_background(:avatar)
|
||||
Sidekiq::Testing.fake!
|
||||
Sidekiq::Queues.clear_all
|
||||
|
||||
# local minio
|
||||
bucket = ::Aws::S3::Resource.new(client: ::Aws::S3::Client.new(
|
||||
access_key_id: 'test', secret_access_key: 'testpassword',
|
||||
endpoint: 'http://localhost:9002', region: 'laplandia', force_path_style: true
|
||||
)).bucket("bucketname")
|
||||
@instance.avatar.class.stubs(:stores).returns({ store_1: bucket })
|
||||
end
|
||||
|
||||
should "add job and process" do
|
||||
# @store1_stub.expects(:put_object).once
|
||||
# @store2_stub.expects(:put_object).never
|
||||
assert_no_leftover_tmp do
|
||||
@instance.update!(avatar: stub_file('pixel.gif', @gif_pixel))
|
||||
# @instance.update!(avatar: File.open('sample_notebook_1.jpg'))
|
||||
end
|
||||
assert_equal(1, DelayedPaperclip::Jobs::Sidekiq.jobs.size)
|
||||
|
||||
@instance = Dummy.find(@instance.id)
|
||||
assert_no_leftover_tmp { DelayedPaperclip::Jobs::Sidekiq.perform_one }
|
||||
end
|
||||
end unless ENV['CI']
|
||||
|
||||
context 'generating presigned_url' do
|
||||
setup do
|
||||
Dummy::AvatarAttachment.any_instance.stubs(:storage_url).returns('http://домен.pф/ключ?param1=параметр')
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'test_helper'
|
||||
require 'aws-sdk-s3'
|
||||
|
||||
class StorageTest < Test::Unit::TestCase
|
||||
context 'An attachment with Delayeds3 storage' do
|
||||
context 'An attachment with no_cache_s3 storage' do
|
||||
setup do
|
||||
rebuild_model storage: :delayeds3,
|
||||
bucket: 'testing',
|
||||
path: ':attachment/:style/:basename.:extension',
|
||||
s3_credentials: {},
|
||||
yandex_credentials: {},
|
||||
sbercloud_credentials: {}
|
||||
rebuild_model storage: :no_cache_s3,
|
||||
key: ':filename',
|
||||
stores: {
|
||||
store1: { access_key_id: '123', secret_access_key: '123', region: 'r', bucket: 'buck' }
|
||||
}
|
||||
end
|
||||
|
||||
should 'be extended by the Delayeds3 module' do
|
||||
assert Dummy.new.avatar.is_a?(Paperclip::Storage::Delayeds3)
|
||||
should 'be extended by the NoCacheS3 module' do
|
||||
assert Dummy.new.avatar.is_a?(Paperclip::Storage::NoCacheS3)
|
||||
end
|
||||
|
||||
should 'not be extended by the Filesystem module' do
|
||||
|
||||
Reference in New Issue
Block a user