Compare commits

...

2 Commits

Author SHA1 Message Date
Vincent Riquer
d680d52425 Comment atom 2026-02-20 04:07:38 +01:00
Vincent Riquer
756ce7ec01 atom: License 2026-02-20 02:03:15 +01:00

149
atom
View File

@ -1,5 +1,21 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Copyright © 2012-2026 ScriptFanix
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# A copy of the GNU General Public License v3 is includded in the LICENSE file
# at the root of the project.
# Directories for various required data. Set by the configure script and the
# Makefile.
# Also save $IFS just in case.
declare -r \ declare -r \
DOCDIR=%DOCDIR% \ DOCDIR=%DOCDIR% \
LIBDIR=%LIBDIR% \ LIBDIR=%LIBDIR% \
@ -13,7 +29,7 @@ declare -r \
## Define exit codes ## Define exit codes
source "$SHAREDIR"/errorcodes source "$SHAREDIR"/errorcodes
# config structures # Config structures.
declare -A \ declare -A \
destinationenabled \ destinationenabled \
destinationascii \ destinationascii \
@ -38,10 +54,13 @@ declare -A \
exit $EBASHVERS exit $EBASHVERS
} }
# Locales break parsingg.
LC_ALL=C LC_ALL=C
# Enable extended globbing for some filename manipulations.
shopt -s extglob shopt -s extglob
# Array of ID3v1 genres, number to name mapping.
source "$SHAREDIR"/id3genres source "$SHAREDIR"/id3genres
for function in "$LIBDIR"/*/* for function in "$LIBDIR"/*/*
@ -49,6 +68,7 @@ do
source "$function" source "$function"
done done
# Migrate old config to XDG where required
if ! [[ -f "${XDG_CONFIG_HOME:-$HOME/.config}/AtOM/atom.cfg" ]] \ if ! [[ -f "${XDG_CONFIG_HOME:-$HOME/.config}/AtOM/atom.cfg" ]] \
&& [[ -f "$HOME/.atom/atom.cfg" ]] && [[ -f "$HOME/.atom/atom.cfg" ]]
then then
@ -128,6 +148,9 @@ do
done done
askconf() { askconf() {
# Prompt user to interactively create a config if it doesn't exist and
# we're not in cron mode.
# Called recursively until a valid answer is given.
if (( cron )) if (( cron ))
then then
echo 'Non-interactive, not running setup. Please run atom -S.' >&2 echo 'Non-interactive, not running setup. Please run atom -S.' >&2
@ -152,6 +175,7 @@ askconf() {
esac esac
} }
# Read config if it exists, call askconf otherwise.
if [ ! -f "$cffile" ] if [ ! -f "$cffile" ]
then then
if [ ! -d "${cffile%/*}" ] if [ ! -d "${cffile%/*}" ]
@ -163,14 +187,17 @@ then
fi fi
getConfig getConfig
# Uf user wants to changeg config, run setup.
(( forcesetup )) && setup (( forcesetup )) && setup
# Deactivate `!` history expansion, just in case.
set +H set +H
# Apply CLI overrides # Apply CLI overrides
[ -n "$cliload" ] && maxload=$cliload [ -n "$cliload" ] && maxload=$cliload
[ -n "$cliltimer" ] && loadinterval=$cliltimer [ -n "$cliltimer" ] && loadinterval=$cliltimer
# Print config if requested or in debug mode. Exit if only dumping config.
(( debug || cfgdump )) && printConfig (( debug || cfgdump )) && printConfig
(( cfgdump )) && exit (( cfgdump )) && exit
@ -178,6 +205,11 @@ set +H
sanityCheck sanityCheck
openDatabase openDatabase
# create missing destination directories and DB entries, get destination IDs
# for later use
createDestinations
# Apply destinations "enabled" status in DB with respect to config.
for destination in "${destinations[@]}" for destination in "${destinations[@]}"
do do
if (( ${destinationenabled["$destination"]} )) if (( ${destinationenabled["$destination"]} ))
@ -188,14 +220,20 @@ do
fi fi
done done
createDestinations # get source files. Update DB.
getFiles getFiles
# Scan mime-types (for new/changed files). Update DB
updateMimes updateMimes
# Remove source files that are gone from DB. (`last_seen` column).
# FOREIGN KEY `source_file_id` on table `destination_files` gets set to NULL
# by `ON DELETE` parameter. We can use that to find destination files that need
# to be removed.
removeObsoleteFiles removeObsoleteFiles
# remove destination files for which the source file is gone. (`source_file_id`
# NUUL -- `rm` them if they exist, remove the DB entry in any case)
(( cron )) || echo -n 'Gathering files for cleaning...' (( cron )) || echo -n 'Gathering files for cleaning...'
echo ' echo '
SELECT COUNT(id) SELECT COUNT(id)
@ -203,6 +241,7 @@ echo '
WHERE source_file_id is NULL;' >&3 WHERE source_file_id is NULL;' >&3
read -u4 -r -d $'\0' removecount read -u4 -r -d $'\0' removecount
# Gather in 500 files batches to avoid pipe overflow.
until (( ${#removefile[@]} == removecount )) until (( ${#removefile[@]} == removecount ))
do do
echo ' echo '
@ -234,6 +273,10 @@ done
unset deleted unset deleted
unset removed unset removed
echo 'BEGIN TRANSACTION;' >&3 echo 'BEGIN TRANSACTION;' >&3
# Remove the files if they exist. Unconditionnally remove from DB.
# Run in transactrion to speed up the process, COMMIT every 1000th file in case
# process gets killed.
for id in ${!removefile[@]} for id in ${!removefile[@]}
do do
filename=${removefile[id]} filename=${removefile[id]}
@ -261,8 +304,14 @@ echo -n "${deleted+$deleted files deleted${removed:+, }}${removed:+$removed remo
(( deleted || removed )) && echo (( deleted || removed )) && echo
unset removecount deleted removed removefile unset removecount deleted removed removefile
# Update tags for new/changed files and updated tag parsers. Update DB.
# Uses `tags.last_changge` vs `source_files.last_change`
# + `tags.tagreader` vs tagreader versions declared in running version's
# source code.
updateTags updateTags
# Reset timestamps for files in destinations that were requested to be fully
# rebuilt.
for forcedest in "${forceall[@]}" for forcedest in "${forceall[@]}"
do do
if forcedestid=$(Select destinations id <<<"name = $forcedest") if forcedestid=$(Select destinations id <<<"name = $forcedest")
@ -277,6 +326,18 @@ do
fi fi
done done
# Create TEMPORARY (in-memory) tables for tasks. Up to 60 arguments per task
# (including command).
#
# `requires` designates the id of a tasks that must be completed before this
# one can be run.
# `required_by` is a counter of tasks that require this one. Used for temp
# files cleanup.
# `status` is 0 for pending tasks, 2 for failed tasks, 4 for completed tasks
# depended upon (temp file should be left intact).
#
# TRIGGER `fail_depends` sets status of all tasks that depend on a failed task
# to 2
echo ' echo '
CREATE TEMPORARY TABLE tasks( CREATE TEMPORARY TABLE tasks(
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
@ -370,6 +431,7 @@ echo '
END; END;
' >&3 ' >&3
# Get number of files to process. Apply `maxbatch` limit if specified.
echo ' echo '
SELECT COUNT(source_files.id) SELECT COUNT(source_files.id)
FROM source_files FROM source_files
@ -393,6 +455,8 @@ then
(( togo = filecount - maxbatch )) (( togo = filecount - maxbatch ))
filecount=$maxbatch filecount=$maxbatch
fi fi
# Get files to process. Apply `maxbatch` limit if specified.
echo ' echo '
SELECT SELECT
source_files.id, source_files.id,
@ -439,16 +503,26 @@ echo ';
read -u4 -r -d $'\0' line read -u4 -r -d $'\0' line
while ! [[ $line = AtOM:NoMoreFiles ]] while ! [[ $line = AtOM:NoMoreFiles ]]
do do
# Append `::AtOM:SQL:Sep::` at the end of the line to make sure we can
# parse empty fields.
decodefiles+=("$line::AtOM:SQL:Sep::") decodefiles+=("$line::AtOM:SQL:Sep::")
read -u4 -r -d $'\0' line read -u4 -r -d $'\0' line
done done
(( cron )) || echo -n $'Creating tasks...\033[K' (( cron )) || echo -n $'Creating tasks...\033[K'
# Spawn perl coprocess for unicode to ascii conversion if needed.
(( textunidecodeneeded )) && ascii (( textunidecodeneeded )) && ascii
# Generate tasks for each file. Tasks that depend on other tasks (e.g. encoding
# depends on decoding) get the ID of the task they depend on in `requires`
# column. This is used to make sure that encoding doesn't start before decoding
# and other transforms have completed.
echo 'BEGIN TRANSACTION;' >&3 echo 'BEGIN TRANSACTION;' >&3
for line in "${decodefiles[@]}" for line in "${decodefiles[@]}"
do do
# Parsing SQL output is fun. We use `::AtOM:SQL:Sep::` as separator
# between fields at the end of the line to make sure we can parse empty
# fields.
fileid=${line%%::AtOM:SQL:Sep::*} fileid=${line%%::AtOM:SQL:Sep::*}
rest=${line#*::AtOM:SQL:Sep::} rest=${line#*::AtOM:SQL:Sep::}
filename=${rest%%::AtOM:SQL:Sep::*} filename=${rest%%::AtOM:SQL:Sep::*}
@ -493,14 +567,24 @@ do
rest=${rest#*::AtOM:SQL:Sep::} rest=${rest#*::AtOM:SQL:Sep::}
year=${rest%%::AtOM:SQL:Sep::*} year=${rest%%::AtOM:SQL:Sep::*}
unset rest unset rest
# Skip destinations with formats for which tools are missing.
case ${destinationformat["$destination"]} in case ${destinationformat["$destination"]} in
vorbis) (( disableoggenc )) && continue ;; vorbis) (( disableoggenc )) && continue ;;
opus) (( disableopusenc )) && continue ;; opus) (( disableopusenc )) && continue ;;
mp3) (( disablelame )) && continue ;; mp3) (( disablelame )) && continue ;;
esac esac
# Create decoding task depending on mimetype.
decodeFile decodeFile
# Build target directory path from source file path OR from rename
# pattern if set.
getDestDir getDestDir
# Same for filename.
getDestFile getDestFile
# Set copied to 1 for files with extension in `copy_extension`.
for copy_ext in "${destinationcopyext[@]}" for copy_ext in "${destinationcopyext[@]}"
do do
if [[ $filename =~ '.*\.'"$copy_ext"'$' ]] if [[ $filename =~ '.*\.'"$copy_ext"'$' ]]
@ -511,10 +595,17 @@ do
done done
if (( copied )) if (( copied ))
then then
# Copy file as-is to destination.
copyFiles_matching copyFiles_matching
else else
# Call suitable function to create encoding task depending on
# destination format.
# encodeFile::mp3
# encodeFile::opus
# encodeFile::vorbis
encodeFile::${destinationformat[$destination]} encodeFile::${destinationformat[$destination]}
fi fi
# Cleanup variables. Avoids leaking data between iterations.
unset \ unset \
album \ album \
albumartist \ albumartist \
@ -557,6 +648,13 @@ echo 'COMMIT;' >&3
# remove perl unicode to ascii coprocess # remove perl unicode to ascii coprocess
(( textunidecodeneeded )) && eval exec "${toascii[1]}>&-" (( textunidecodeneeded )) && eval exec "${toascii[1]}>&-"
# Main loop. Run up to `concurrency` tasks in parallel, depending on system
# load. Spawn new tasks as old ones complete. Update progress info and commit
# DB every minute.
#
# Start with concurrency = maxload / 2, which seems like a good starting point
# on most systems. If result is 0, force to 1 to make sure we get going. If
# `-f <workers>` option was used, use that value instead and don't change it.
concurrency=$(( maxload / 2 )) concurrency=$(( maxload / 2 ))
(( concurrency )) || concurrency=1 (( concurrency )) || concurrency=1
active=0 active=0
@ -577,6 +675,13 @@ do
fi fi
read humanload garbage < /proc/loadavg read humanload garbage < /proc/loadavg
load=${humanload%.*} load=${humanload%.*}
# If `-f <workers>` option was used, keep concurrency fixed to that
# value. Otherwise, adjust concurrency according to load and `maxload`
# value. If load is above `maxload`, reduce concurrency by 1 (down to 0
# if `allow_zero_running` is set). If load is below `maxload`, increase
# concurrency by 1 (only if all slots are populated).
# Don't update concurrency more often than every `load-interval` seconds
# to reduce histeresis.
if (( fixed_workers )) if (( fixed_workers ))
then then
concurrency="$fixed_workers" concurrency="$fixed_workers"
@ -596,9 +701,30 @@ do
fi fi
fi fi
fi fi
# check if workers have finished.
# If a worker finished with non-zero exit code, mark the task as failed
# in DB. TRIGGER `fail_depends` will fail all tasks that depend on it.
checkworkers checkworkers
# If task failed, set status to 2, remove temp files if any and
# increment `failed` counter. Don't delete task from DB to keep track
# of failed tasks for final report.
# If task was successful, update destination_files.last_change to
# source_files.last_change, update old_filename to previous
# (destination file) filename, store rename_pattern, fat32compat and
# ascii settings.
# Set status to 4 for tasks that were depended upon by other tasks to
# avoid cleaning up temp files before all tasks depending on them have
# completed, delete task otherwise.
cleaner cleaner
# Look for pending tasks that can be started (required tasks's status
# is 4 or reqquires is NULL) and start them.
# Trigger a dump of the tasks table if it's inconsistent (no running
# tasks, no ready tasks, but pending tasks exist) and exit with error
# $ETASKLEFT.
master master
# "Fancy" progress info. Only calculate if at least one task has
# succeeded to avoid division by zero.
if (( ran - failed )) if (( ran - failed ))
then then
currenttime=$timestamp currenttime=$timestamp
@ -637,6 +763,7 @@ do
fmtworkers='W:%i/%i' fmtworkers='W:%i/%i'
fmtprogress="T:%${#taskcount}i/%i (F:%i) %3i%%" fmtprogress="T:%${#taskcount}i/%i (F:%i) %3i%%"
fmttime='%2id %2ih%02im%02is (A:%4.1fs/task)' fmttime='%2id %2ih%02im%02is (A:%4.1fs/task)'
# Abuse timeformatting to get ETA.
eta="ETA:$( eta="ETA:$(
printf "%(%c)T" "$(( currenttime + secsremaining ))" printf "%(%c)T" "$(( currenttime + secsremaining ))"
)" )"
@ -655,6 +782,7 @@ do
${minutes:-0} \ ${minutes:-0} \
${seconds:-0} \ ${seconds:-0} \
${avgdsec:-0}.${avgdmsec:-0} ${avgdsec:-0}.${avgdmsec:-0}
# If 0 concurrency is allowed, show paused status when concurrency is 0
if ! (( concurrency )) && ! (( cron )) if ! (( concurrency )) && ! (( cron ))
then then
if (( active )) if (( active ))
@ -668,6 +796,7 @@ done
echo 'COMMIT;' >&3 echo 'COMMIT;' >&3
unset count unset count
# Final report. Calculate elapsed time and format it in human readable way.
endtime=$EPOCHSECONDS endtime=$EPOCHSECONDS
(( elapsedseconds = endtime - starttime )) (( elapsedseconds = endtime - starttime ))
@ -702,6 +831,9 @@ endtime=$EPOCHSECONDS
(( cron )) || echo -en "\033[K" (( cron )) || echo -en "\033[K"
(( ran )) && echo (( ran )) && echo
# If some tasks failed, print them. Don't print failed tasks that did not run
# to avoid confusing tasks marked as failed because they depended on ones that
# failed.
if (( failed )) if (( failed ))
then then
echo $'\nFailed tasks:\n' echo $'\nFailed tasks:\n'
@ -788,6 +920,8 @@ then
done done
fi fi
# Check if there are files that need to be renamed because their rename pattern
# changed.
for destination in "${!destinationpath[@]}" for destination in "${!destinationpath[@]}"
do do
echo ' echo '
@ -847,6 +981,8 @@ do
'vorbis') extension=ogg ;; 'vorbis') extension=ogg ;;
esac esac
(( cron )) || echo -en "$destination: rename pattern changed, renaming files...\033[K" (( cron )) || echo -en "$destination: rename pattern changed, renaming files...\033[K"
# Spawn perl coprocess for unicode to ascii conversion if
# needed.
(( textunidecodeneeded )) && ascii (( textunidecodeneeded )) && ascii
echo 'BEGIN TRANSACTION;' >&3 echo 'BEGIN TRANSACTION;' >&3
for line in "${renamefiles[@]}" for line in "${renamefiles[@]}"
@ -922,8 +1058,12 @@ do
unset count changedcount renamefiles unset count changedcount renamefiles
done done
# Copy files of mime-types matching `copy_mime-type`
copyFiles_action copyFiles_action
# Remove files obsoleted by `renamme_pattern`, `ascii` or `fat32compat` changes.
# Based on `destination_files.old_filename` field, populated upon task
# completion.
echo ' echo '
SELECT destination_files.id, SELECT destination_files.id,
destination_files.filename, destination_files.filename,
@ -970,6 +1110,9 @@ echo 'COMMIT;' >&3
(( cron )) || echo -en "\033[K" (( cron )) || echo -en "\033[K"
(( count )) && echo (( count )) && echo
# Remove empty directories in destinations.
# We blindly duplicate the source tree in the destination, so we may end up
# with empty directories.
(( debug )) && echo "Purging empty directories..." (( debug )) && echo "Purging empty directories..."
for path in "${destinationpath[@]}" for path in "${destinationpath[@]}"
do do