AtOM/atom
Vincent Riquer d680d52425 Comment atom
2026-02-20 04:07:38 +01:00

1125 lines
29 KiB
Bash
Executable File

#!/usr/bin/env bash
# Copyright © 2012-2026 ScriptFanix
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# A copy of the GNU General Public License v3 is includded in the LICENSE file
# at the root of the project.
# Directories for various required data. Set by the configure script and the
# Makefile.
# Also save $IFS just in case.
declare -r \
DOCDIR=%DOCDIR% \
LIBDIR=%LIBDIR% \
SHAREDIR=%SHAREDIR%
declare -r \
exampleconf=$DOCDIR/example.cfg \
schema=$SHAREDIR/schema.sql \
\
oldIFS="$IFS"
## Define exit codes
source "$SHAREDIR"/errorcodes
# Config structures.
declare -A \
destinationenabled \
destinationascii \
destinationchannels \
destinationfat32compat \
destinationcopymime \
destinationcopyext \
destinationformat \
destinationfrequency \
destinationid \
destinationloss \
destinationmaxbps \
destinationnormalize \
destinationpath \
destinationquality \
destinationrename \
destinationnoresample \
destinationrenamepath \
destinationskipmime \
|| {
echo "Check your Bash version. You need >= 4.0" >&2
exit $EBASHVERS
}
# Locales break parsingg.
LC_ALL=C
# Enable extended globbing for some filename manipulations.
shopt -s extglob
# Array of ID3v1 genres, number to name mapping.
source "$SHAREDIR"/id3genres
for function in "$LIBDIR"/*/*
do
source "$function"
done
# Migrate old config to XDG where required
if ! [[ -f "${XDG_CONFIG_HOME:-$HOME/.config}/AtOM/atom.cfg" ]] \
&& [[ -f "$HOME/.atom/atom.cfg" ]]
then
echo "Configuration found in legacy location $HOME/.atom/atom.cfg."\
"Migrating configuration and data to XDG standard"
xdgMigrate
fi
cffile="${XDG_CONFIG_HOME:-$HOME/.config}/AtOM/atom.cfg"
help() {
cat <<-EOF
Options:
-c <file> Load configuration file <file>
-C Dump configuration and exit
-l <load> Override max-load
-f <workers> Use exactly <workers> child processes
-T <seconds> override load-interval
-F <destination> Force re-generation of all files in
<destination>
-B <batch size> Create/update no more than <batch size> files
-S Run setup
-h Show this text
-D Increase debug
-q Cron mode
EOF
}
#parse arguments
while getopts ':c:Cl:T:F:f:B:ShDq' opt
do
case $opt in
c)
cffile="$OPTARG"
;;
C)
cfgdump=1
;;
l)
cliload="$OPTARG"
;;
T)
cliltimer="$OPTARG"
;;
F)
forceall+=("$OPTARG")
;;
f)
fixed_workers="$OPTARG"
;;
B)
maxbatch="$OPTARG"
;;
S)
forcesetup=1
;;
h)
help
exit 0
;;
D)
(( debug++ ))
;;
q)
cron=1
;;
:)
echo "-$OPTARG requires an argument"
help
exit $EINVARG
;;
*)
echo "Unrecognized option: -$OPTARG"
help
exit $EINVARG
;;
esac
done
askconf() {
# Prompt user to interactively create a config if it doesn't exist and
# we're not in cron mode.
# Called recursively until a valid answer is given.
if (( cron ))
then
echo 'Non-interactive, not running setup. Please run atom -S.' >&2
createconf=n
else
read -p"Create one now? [Y/n/q] " createconf
fi
case $createconf in
''|[yY])
setup
;;
[nNqQ])
echo "You need a configuration file. If you" \
"want to create it yourself, please" \
"read doc/config and doc/example.cfg." >&2
exit $ENOCFG
;;
*)
echo "Come again?" >&2
askconf
;;
esac
}
# Read config if it exists, call askconf otherwise.
if [ ! -f "$cffile" ]
then
if [ ! -d "${cffile%/*}" ]
then
mkdir -p "${cffile%/*}"
fi
echo "No configuration file found!" >&2
askconf
fi
getConfig
# Uf user wants to changeg config, run setup.
(( forcesetup )) && setup
# Deactivate `!` history expansion, just in case.
set +H
# Apply CLI overrides
[ -n "$cliload" ] && maxload=$cliload
[ -n "$cliltimer" ] && loadinterval=$cliltimer
# Print config if requested or in debug mode. Exit if only dumping config.
(( debug || cfgdump )) && printConfig
(( cfgdump )) && exit
# check sanity
sanityCheck
openDatabase
# create missing destination directories and DB entries, get destination IDs
# for later use
createDestinations
# Apply destinations "enabled" status in DB with respect to config.
for destination in "${destinations[@]}"
do
if (( ${destinationenabled["$destination"]} ))
then
Update destinations enabled 1 <<<"name = $destination"
else
Update destinations enabled 0 <<<"name = $destination"
fi
done
# get source files. Update DB.
getFiles
# Scan mime-types (for new/changed files). Update DB
updateMimes
# Remove source files that are gone from DB. (`last_seen` column).
# FOREIGN KEY `source_file_id` on table `destination_files` gets set to NULL
# by `ON DELETE` parameter. We can use that to find destination files that need
# to be removed.
removeObsoleteFiles
# remove destination files for which the source file is gone. (`source_file_id`
# NUUL -- `rm` them if they exist, remove the DB entry in any case)
(( cron )) || echo -n 'Gathering files for cleaning...'
echo '
SELECT COUNT(id)
FROM destination_files
WHERE source_file_id is NULL;' >&3
read -u4 -r -d $'\0' removecount
# Gather in 500 files batches to avoid pipe overflow.
until (( ${#removefile[@]} == removecount ))
do
echo '
SELECT destination_files.id,
destinations.name,
destination_files.filename
FROM destination_files
INNER JOIN destinations
ON destination_files.destination_id
= destinations.id
WHERE source_file_id is NULL
LIMIT 500 OFFSET '${#removefile[@]}';
SELECT "AtOM:NoMoreFiles";
' >&3
read -u4 -r -d $'\0' line
until [[ $line == AtOM:NoMoreFiles ]]
do
removeFileId=${line%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::}
removeFileDestName=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
removefile[$removeFileId]="${destinationpath["$removeFileDestName"]}/${rest%%::AtOM:SQL:Sep::*}"
read -u4 -r -d $'\0' line
done
done
unset deleted
unset removed
echo 'BEGIN TRANSACTION;' >&3
# Remove the files if they exist. Unconditionnally remove from DB.
# Run in transactrion to speed up the process, COMMIT every 1000th file in case
# process gets killed.
for id in ${!removefile[@]}
do
filename=${removefile[id]}
if [ -n "$filename" ]
then
if rm "$filename"
then
Delete destination_files <<<"id = $id"
(( ++deleted ))
fi
else
Delete destination_files <<<"id = $id"
(( ++removed ))
fi
if (( (deleted + removed) % 1000 == 0 ))
then
echo 'COMMIT;BEGIN TRANSACTION;' >&3
fi
(( cron )) || echo -en "\rClean obsolete data: $(((deleted+removed)*100/removecount))%\033[K"
done
echo 'COMMIT;' >&3
(( cron )) || echo -n $'\r'
echo -n "${deleted+$deleted files deleted${removed:+, }}${removed:+$removed removed from database}"
(( cron )) || echo -ne "\033[K"
(( deleted || removed )) && echo
unset removecount deleted removed removefile
# Update tags for new/changed files and updated tag parsers. Update DB.
# Uses `tags.last_changge` vs `source_files.last_change`
# + `tags.tagreader` vs tagreader versions declared in running version's
# source code.
updateTags
# Reset timestamps for files in destinations that were requested to be fully
# rebuilt.
for forcedest in "${forceall[@]}"
do
if forcedestid=$(Select destinations id <<<"name = $forcedest")
then
echo "Resetting destination files timestamps on" \
"$forcedest ($forcedestid)..."
Update destination_files last_change 0 \
<<<"destination_id = $forcedestid"
else
echo "Full rebuild of destination $forcedest was requested," \
"but it does not exist!" >&2
fi
done
# Create TEMPORARY (in-memory) tables for tasks. Up to 60 arguments per task
# (including command).
#
# `requires` designates the id of a tasks that must be completed before this
# one can be run.
# `required_by` is a counter of tasks that require this one. Used for temp
# files cleanup.
# `status` is 0 for pending tasks, 2 for failed tasks, 4 for completed tasks
# depended upon (temp file should be left intact).
#
# TRIGGER `fail_depends` sets status of all tasks that depend on a failed task
# to 2
echo '
CREATE TEMPORARY TABLE tasks(
id INTEGER PRIMARY KEY,
requires INTEGER,
required_by INTEGER DEFAULT 0,
status INTEGER NOT NULL,
key TEXT UNIQUE,
rename_pattern TEXT,
fat32compat INTEGER,
ascii INTEGER,
source_file INTEGER,
fileid INTEGER,
destdir TEXT,
filename TEXT,
cmd_arg0 TEXT,
cmd_arg1 TEXT,
cmd_arg2 TEXT,
cmd_arg3 TEXT,
cmd_arg4 TEXT,
cmd_arg5 TEXT,
cmd_arg6 TEXT,
cmd_arg7 TEXT,
cmd_arg8 TEXT,
cmd_arg9 TEXT,
cmd_arg10 TEXT,
cmd_arg11 TEXT,
cmd_arg12 TEXT,
cmd_arg13 TEXT,
cmd_arg14 TEXT,
cmd_arg15 TEXT,
cmd_arg16 TEXT,
cmd_arg17 TEXT,
cmd_arg18 TEXT,
cmd_arg19 TEXT,
cmd_arg20 TEXT,
cmd_arg21 TEXT,
cmd_arg22 TEXT,
cmd_arg23 TEXT,
cmd_arg24 TEXT,
cmd_arg25 TEXT,
cmd_arg26 TEXT,
cmd_arg27 TEXT,
cmd_arg28 TEXT,
cmd_arg29 TEXT,
cmd_arg30 TEXT,
cmd_arg31 TEXT,
cmd_arg32 TEXT,
cmd_arg33 TEXT,
cmd_arg34 TEXT,
cmd_arg35 TEXT,
cmd_arg36 TEXT,
cmd_arg37 TEXT,
cmd_arg38 TEXT,
cmd_arg39 TEXT,
cmd_arg40 TEXT,
cmd_arg41 TEXT,
cmd_arg42 TEXT,
cmd_arg43 TEXT,
cmd_arg44 TEXT,
cmd_arg45 TEXT,
cmd_arg46 TEXT,
cmd_arg47 TEXT,
cmd_arg48 TEXT,
cmd_arg49 TEXT,
cmd_arg50 TEXT,
cmd_arg51 TEXT,
cmd_arg52 TEXT,
cmd_arg53 TEXT,
cmd_arg54 TEXT,
cmd_arg55 TEXT,
cmd_arg56 TEXT,
cmd_arg57 TEXT,
cmd_arg58 TEXT,
cmd_arg59 TEXT,
cleanup TEXT,
FOREIGN KEY(requires) REFERENCES tasks(id)
ON DELETE SET NULL
);
CREATE INDEX tasks_by_key ON tasks ( key );
CREATE INDEX tasks_by_sourcefile ON tasks ( source_file );
CREATE TEMPORARY TRIGGER fail_depends
AFTER UPDATE OF
status
ON
tasks
WHEN
NEW.status=2
BEGIN
UPDATE tasks SET status=2 WHERE requires=NEW.id;
END;
' >&3
# Get number of files to process. Apply `maxbatch` limit if specified.
echo '
SELECT COUNT(source_files.id)
FROM source_files
INNER JOIN destination_files
ON source_files.id
= destination_files.source_file_id
INNER JOIN destinations
ON destination_files.destination_id=destinations.id
INNER JOIN mime_type_actions
ON mime_type_actions.id = source_files.mime_type
INNER JOIN tags
ON source_files.id = tags.source_file
WHERE destinations.enabled = 1
AND CAST(destination_files.last_change AS TEXT)
<> CAST(source_files.last_change AS TEXT)
AND mime_type_actions.destination_id = destinations.id
AND mime_type_actions.action = 1;' >&3
read -u4 -r -d $'\0' filecount
if [ -n "$maxbatch" ] && (( maxbatch < filecount ))
then
(( togo = filecount - maxbatch ))
filecount=$maxbatch
fi
# Get files to process. Apply `maxbatch` limit if specified.
echo '
SELECT
source_files.id,
source_files.filename,
mime_type_actions.mime_text,
destinations.name,
destination_files.id,
tags.album,
tags.albumartist,
tags.artist,
tags.bitrate,
tags.channels,
tags.composer,
tags.depth,
tags.disc,
tags.genre,
tags.performer,
tags.rate,
tags.releasecountry,
tags.replaygain_alb,
tags.replaygain_trk,
tags.title,
tags.track,
tags.year
FROM source_files
INNER JOIN destination_files
ON source_files.id
= destination_files.source_file_id
INNER JOIN destinations
ON destination_files.destination_id=destinations.id
INNER JOIN mime_type_actions
ON mime_type_actions.id = source_files.mime_type
INNER JOIN tags
ON source_files.id = tags.source_file
WHERE destinations.enabled = 1
AND CAST(destination_files.last_change AS TEXT)
<> CAST(source_files.last_change AS TEXT)
AND mime_type_actions.destination_id = destinations.id
AND mime_type_actions.action = 1
ORDER BY source_files.id' >&3
(( maxbatch )) && echo "LIMIT $maxbatch" >&3
echo ';
SELECT "AtOM:NoMoreFiles";' >&3
read -u4 -r -d $'\0' line
while ! [[ $line = AtOM:NoMoreFiles ]]
do
# Append `::AtOM:SQL:Sep::` at the end of the line to make sure we can
# parse empty fields.
decodefiles+=("$line::AtOM:SQL:Sep::")
read -u4 -r -d $'\0' line
done
(( cron )) || echo -n $'Creating tasks...\033[K'
# Spawn perl coprocess for unicode to ascii conversion if needed.
(( textunidecodeneeded )) && ascii
# Generate tasks for each file. Tasks that depend on other tasks (e.g. encoding
# depends on decoding) get the ID of the task they depend on in `requires`
# column. This is used to make sure that encoding doesn't start before decoding
# and other transforms have completed.
echo 'BEGIN TRANSACTION;' >&3
for line in "${decodefiles[@]}"
do
# Parsing SQL output is fun. We use `::AtOM:SQL:Sep::` as separator
# between fields at the end of the line to make sure we can parse empty
# fields.
fileid=${line%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::}
filename=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
mimetype=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
destination=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
destfileid=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
album=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
albumartist=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
artist=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
bitrate=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
channels=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
composer=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
depth=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
disc=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
genre=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
performer=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
rate=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
releasecountry=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
replaygain_alb=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
replaygain_trk=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
title=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
track=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
year=${rest%%::AtOM:SQL:Sep::*}
unset rest
# Skip destinations with formats for which tools are missing.
case ${destinationformat["$destination"]} in
vorbis) (( disableoggenc )) && continue ;;
opus) (( disableopusenc )) && continue ;;
mp3) (( disablelame )) && continue ;;
esac
# Create decoding task depending on mimetype.
decodeFile
# Build target directory path from source file path OR from rename
# pattern if set.
getDestDir
# Same for filename.
getDestFile
# Set copied to 1 for files with extension in `copy_extension`.
for copy_ext in "${destinationcopyext[@]}"
do
if [[ $filename =~ '.*\.'"$copy_ext"'$' ]]
then
copied=1
break
fi
done
if (( copied ))
then
# Copy file as-is to destination.
copyFiles_matching
else
# Call suitable function to create encoding task depending on
# destination format.
# encodeFile::mp3
# encodeFile::opus
# encodeFile::vorbis
encodeFile::${destinationformat[$destination]}
fi
# Cleanup variables. Avoids leaking data between iterations.
unset \
album \
albumartist \
artist \
bitrate \
channels \
commandline \
composer \
copied \
decodetaskid \
depth \
destfileid \
destination \
disc \
fileid \
filename \
mimetype \
performer \
rate \
releasecountry \
replaygain_alb \
replaygain_trk \
rest \
sox_needed \
soxoptions_in \
soxoptions_out \
soxtaskid \
title \
track \
year \
tmpfile
done
echo 'COMMIT;' >&3
(( cron )) || echo -n $'\r\033[K'
(( count )) \
&& echo "Created $count tasks for $filecount files" \
"${togo:+($togo left) }" \
"${copies:+($copies immediate copies)}"
# remove perl unicode to ascii coprocess
(( textunidecodeneeded )) && eval exec "${toascii[1]}>&-"
# Main loop. Run up to `concurrency` tasks in parallel, depending on system
# load. Spawn new tasks as old ones complete. Update progress info and commit
# DB every minute.
#
# Start with concurrency = maxload / 2, which seems like a good starting point
# on most systems. If result is 0, force to 1 to make sure we get going. If
# `-f <workers>` option was used, use that value instead and don't change it.
concurrency=$(( maxload / 2 ))
(( concurrency )) || concurrency=1
active=0
concurrencychange=$EPOCHSECONDS
starttime=$concurrencychange
taskcount=$count
remaining=$taskcount
failed=0
echo 'BEGIN TRANSACTION;' >&3
committime=$EPOCHSECONDS
while (( remaining || ${#workers[@]} ))
do
timestamp=$EPOCHSECONDS
if (( $timestamp - committime >= 60 ))
then
echo $'COMMIT;\nBEGIN TRANSACTION;' >&3
committime=$timestamp
fi
read humanload garbage < /proc/loadavg
load=${humanload%.*}
# If `-f <workers>` option was used, keep concurrency fixed to that
# value. Otherwise, adjust concurrency according to load and `maxload`
# value. If load is above `maxload`, reduce concurrency by 1 (down to 0
# if `allow_zero_running` is set). If load is below `maxload`, increase
# concurrency by 1 (only if all slots are populated).
# Don't update concurrency more often than every `load-interval` seconds
# to reduce histeresis.
if (( fixed_workers ))
then
concurrency="$fixed_workers"
else
if (( timestamp - concurrencychange >= loadinterval ))
then
if (( concurrency > 1 || allow_zero_running )) \
&& (( load > maxload && concurrency ))
then
concurrencychange=$timestamp
(( --concurrency ))
elif (( load < maxload )) \
&& (( active > concurrency - 1 ))
then
concurrencychange=$timestamp
(( ++concurrency ))
fi
fi
fi
# check if workers have finished.
# If a worker finished with non-zero exit code, mark the task as failed
# in DB. TRIGGER `fail_depends` will fail all tasks that depend on it.
checkworkers
# If task failed, set status to 2, remove temp files if any and
# increment `failed` counter. Don't delete task from DB to keep track
# of failed tasks for final report.
# If task was successful, update destination_files.last_change to
# source_files.last_change, update old_filename to previous
# (destination file) filename, store rename_pattern, fat32compat and
# ascii settings.
# Set status to 4 for tasks that were depended upon by other tasks to
# avoid cleaning up temp files before all tasks depending on them have
# completed, delete task otherwise.
cleaner
# Look for pending tasks that can be started (required tasks's status
# is 4 or reqquires is NULL) and start them.
# Trigger a dump of the tasks table if it's inconsistent (no running
# tasks, no ready tasks, but pending tasks exist) and exit with error
# $ETASKLEFT.
master
# "Fancy" progress info. Only calculate if at least one task has
# succeeded to avoid division by zero.
if (( ran - failed ))
then
currenttime=$timestamp
(( runtime = currenttime - starttime ))
avgduration=$((
( runtime * 1000)
/
( ran - failed )
))
secsremaining=$(( remaining * avgduration / 1000 ))
(( days =
secsremaining
/
( 24*60*60 )
)) || true
(( hours =
( secsremaining - ( days*24*60*60 ) )
/
( 60*60 )
)) || true
(( minutes =
( secsremaining - ( ( days*24 + hours ) *60*60 ) )
/
60
)) || true
(( seconds =
secsremaining
-
( ( ( ( days*24 + hours ) *60 ) + minutes ) *60 )
)) || true
avgduration=$(printf %04i $avgduration)
avgdsec=${avgduration:0:-3}
avgdmsec=${avgduration#$avgdsec}
fi
fmtload='L:%4.1f/%i'
fmtworkers='W:%i/%i'
fmtprogress="T:%${#taskcount}i/%i (F:%i) %3i%%"
fmttime='%2id %2ih%02im%02is (A:%4.1fs/task)'
# Abuse timeformatting to get ETA.
eta="ETA:$(
printf "%(%c)T" "$(( currenttime + secsremaining ))"
)"
(( cron )) || printf \
"\r$fmtload $fmtworkers $fmtprogress $fmttime $eta\033[K"\
$humanload \
$maxload \
${active:-0} \
${concurrency:-0} \
${ran:-0} \
${taskcount:-0} \
${failed:-0} \
$(( ran * 100 / taskcount )) \
${days:-0} \
${hours:-0} \
${minutes:-0} \
${seconds:-0} \
${avgdsec:-0}.${avgdmsec:-0}
# If 0 concurrency is allowed, show paused status when concurrency is 0
if ! (( concurrency )) && ! (( cron ))
then
if (( active ))
then
echo -n ' | (pause)'
else
echo -n ' | PAUSED'
fi
fi
done
echo 'COMMIT;' >&3
unset count
# Final report. Calculate elapsed time and format it in human readable way.
endtime=$EPOCHSECONDS
(( elapsedseconds = endtime - starttime ))
(( days =
elapsedseconds
/
( 24*60*60 )
)) || unset days
(( hours =
( elapsedseconds - ( days*24*60*60 ) )
/
( 60*60 )
)) || (( days )) || unset hours
(( minutes =
( elapsedseconds - ( ( days*24 + hours ) *60*60 ) )
/
60
)) || (( days || hours )) || unset minutes
(( seconds =
elapsedseconds
-
( ( ( ( days*24 + hours ) *60 ) + minutes ) *60 )
)) || true
(( cron )) || echo -n $'\r'
(( ran )) \
&& echo -n "Ran $ran tasks${failed:+, $failed of which failed,}" \
"in ${days:+$days days,}" \
"${hours:+$hours hours,}" \
"${minutes:+$minutes minutes and}" \
"$seconds seconds."
(( cron )) || echo -en "\033[K"
(( ran )) && echo
# If some tasks failed, print them. Don't print failed tasks that did not run
# to avoid confusing tasks marked as failed because they depended on ones that
# failed.
if (( failed ))
then
echo $'\nFailed tasks:\n'
echo '
SELECT source_files.filename,
tasks.cmd_arg0,
tasks.cmd_arg1,
tasks.cmd_arg2,
tasks.cmd_arg3,
tasks.cmd_arg4,
tasks.cmd_arg5,
tasks.cmd_arg6,
tasks.cmd_arg7,
tasks.cmd_arg8,
tasks.cmd_arg9,
tasks.cmd_arg10,
tasks.cmd_arg11,
tasks.cmd_arg12,
tasks.cmd_arg13,
tasks.cmd_arg14,
tasks.cmd_arg15,
tasks.cmd_arg16,
tasks.cmd_arg17,
tasks.cmd_arg18,
tasks.cmd_arg19,
tasks.cmd_arg20,
tasks.cmd_arg21,
tasks.cmd_arg22,
tasks.cmd_arg23,
tasks.cmd_arg24,
tasks.cmd_arg25,
tasks.cmd_arg26,
tasks.cmd_arg27,
tasks.cmd_arg28,
tasks.cmd_arg29,
tasks.cmd_arg30,
tasks.cmd_arg31,
tasks.cmd_arg32,
tasks.cmd_arg33,
tasks.cmd_arg34,
tasks.cmd_arg35,
tasks.cmd_arg36,
tasks.cmd_arg37,
tasks.cmd_arg38,
tasks.cmd_arg39,
tasks.cmd_arg40,
tasks.cmd_arg41,
tasks.cmd_arg42,
tasks.cmd_arg43,
tasks.cmd_arg44,
tasks.cmd_arg45,
tasks.cmd_arg46,
tasks.cmd_arg47,
tasks.cmd_arg48,
tasks.cmd_arg49,
tasks.cmd_arg50,
tasks.cmd_arg51,
tasks.cmd_arg52,
tasks.cmd_arg53,
tasks.cmd_arg54,
tasks.cmd_arg55,
tasks.cmd_arg56,
tasks.cmd_arg57,
tasks.cmd_arg58,
tasks.cmd_arg59
FROM tasks
INNER JOIN source_files
ON tasks.source_file=source_files.id
WHERE tasks.status = 2;
SELECT "AtOM:NoMoreFiles";' >&3
read -u4 -r -d $'\0' line
while ! [[ $line = AtOM:NoMoreFiles ]]
do
failedtasks+=("$line")
read -u4 -r -d $'\0' line
done
for line in "${failedtasks[@]}"
do
echo "${line%%::AtOM:SQL:Sep::*}"
line="${line#*::AtOM:SQL:Sep::}"
line="${line//::AtOM:SQL:Sep::/ }"
echo $'\t'"${line/+( )$/}"$'\n'
done
fi
# Check if there are files that need to be renamed because their rename pattern
# changed.
for destination in "${!destinationpath[@]}"
do
echo '
SELECT
destination_files.filename,
destination_files.id,
source_files.filename,
tags.album,
tags.albumartist,
tags.artist,
tags.composer,
tags.disc,
tags.genre,
tags.performer,
tags.releasecountry,
tags.title,
tags.track,
tags.year
FROM destination_files
INNER JOIN destinations
ON destination_files.destination_id
=destinations.id
INNER JOIN tags
ON destination_files.source_file_id
=tags.source_file
INNER JOIN source_files
ON destination_files.source_file_id
=source_files.id
INNER JOIN mime_actions
ON source_files.mime_type
=mime_actions.mime_type
WHERE destinations.name="'"$destination"'"
AND (destination_files.rename_pattern
!=
"'"${destinationrenamepath[$destination]}/${destinationrename[$destination]}"'"
OR fat32compat != '${destinationfat32compat["$destination"]}'
OR ascii != '${destinationascii["$destination"]}'
OR destination_files.rename_pattern is NULL)
AND destination_files.last_change > 0
AND mime_actions.action=1
;
SELECT "AtOM:NoMoreFiles";
' >&3
read -u4 -r -d $'\0' line
while [[ $line != AtOM:NoMoreFiles ]]
do
renamefiles+=("$line")
read -u4 -r -d $'\0' line
done
if (( ${#renamefiles[@]} ))
then
case "${destinationformat[$destination]}" in
'mp3') extension=mp3 ;;
'opus') extension=opus ;;
'vorbis') extension=ogg ;;
esac
(( cron )) || echo -en "$destination: rename pattern changed, renaming files...\033[K"
# Spawn perl coprocess for unicode to ascii conversion if
# needed.
(( textunidecodeneeded )) && ascii
echo 'BEGIN TRANSACTION;' >&3
for line in "${renamefiles[@]}"
do
oldfilename=${line%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::}'::AtOM:SQL:Sep::'
destfileid=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
filename=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
album=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
albumartist=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
artist=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
composer=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
disc=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
genre=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
performer=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
releasecountry=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
title=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
track=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
year=${rest%%::AtOM:SQL:Sep::*}
rest=${rest#*::AtOM:SQL:Sep::}
if [ -n "$oldfilename" -a -f "$oldfilename" ]
then
getDestDir
getDestFile
destfilename="$destdir/$destfile.$extension"
progressSpin
if [[ "$oldfilename" != "$destfilename" ]]
then
mv \
"${destinationpath[$destination]}/$oldfilename" \
"${destinationpath[$destination]}/$destfilename"
(( changedcount++ ))
commit=1
fi
echo "UPDATE destination_files" \
"SET filename=\"${destfilename//\"/\"\"}\"," \
" rename_pattern=" \
"\"${destinationrenamepath[$destination]}/${destinationrename[$destination]}\","\
" fat32compat=" \
"${destinationfat32compat["$destination"]}," \
" ascii=" \
"${destinationascii["$destination"]}" \
"WHERE id=$destfileid;" \
>&3
if (( commit ))
then
echo $'COMMIT;\nBEGIN TRANSACTION;' >&3
unset commit
fi
fi
done
# remove perl unicode to ascii coprocess
(( textunidecodeneeded )) && eval exec "${toascii[1]}>&-"
echo 'COMMIT;' >&3
(( cron )) || echo -n $'\r'
(( changedcount )) \
&& echo -n "$destination: Renamed $changedcount files"
(( cron )) || echo -en "\033[K"
(( changedcount )) && echo
fi
unset count changedcount renamefiles
done
# Copy files of mime-types matching `copy_mime-type`
copyFiles_action
# Remove files obsoleted by `renamme_pattern`, `ascii` or `fat32compat` changes.
# Based on `destination_files.old_filename` field, populated upon task
# completion.
echo '
SELECT destination_files.id,
destination_files.filename,
destination_files.old_filename,
destinations.name
FROM destination_files
INNER JOIN destinations
ON destination_files.destination_id
= destinations.id
WHERE old_filename IS NOT NULL;
SELECT "AtOM:NoMoreFiles";
' >&3
(( cron )) || echo -n 'Removing obsolete files...'$'\033[K'
lines=()
read -u4 -r -d $'\0' line
while [[ $line != AtOM:NoMoreFiles ]]
do
lines+=("$line")
read -u4 -r -d $'\0' line
done
echo 'BEGIN TRANSACTION;' >&3
for line in "${lines[@]}"
do
id=${line%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::}
filename=${rest%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::}
oldfilename=${rest%%::AtOM:SQL:Sep::*}
destination=${rest#*::AtOM:SQL:Sep::}
if [[ $oldfilename != "$filename" ]] && [ -f "$oldfilename" ]
then
rm -f "${destinationpath[$destination]}/$oldfilename"
fi
Update destination_files old_filename NULL <<<"id = $id"
(( count++ ))
(( cron )) || printf '\b\b\b\b%3i%%' $(( (100 * count) / ${#lines[@]} ))
done
echo 'COMMIT;' >&3
(( cron )) || echo -n $'\r'
(( count )) \
&& echo -n "Removed $count obsolete files."
(( cron )) || echo -en "\033[K"
(( count )) && echo
# Remove empty directories in destinations.
# We blindly duplicate the source tree in the destination, so we may end up
# with empty directories.
(( debug )) && echo "Purging empty directories..."
for path in "${destinationpath[@]}"
do
find "$path" -type d -empty -delete
done
closeDatabase
# vim:set ts=8 sw=8: