Skip to content

Instantly share code, notes, and snippets.

@jart
Created December 12, 2023 15:24
Show Gist options
  • Save jart/bd2f603aefe6ac8004e6b709223881c0 to your computer and use it in GitHub Desktop.
Save jart/bd2f603aefe6ac8004e6b709223881c0 to your computer and use it in GitHub Desktop.
Shell script for renaming all images in a folder
#!/bin/sh
# rename-pictures.sh
# Author: Justine Tunney <[email protected]>
# License: Apache 2.0
#
# This shell script can be used to ensure all the images in a folder
# have good descriptive filenames that are written in English. It's
# based on the Mistral 7b and LLaVA v1.5 models.
#
# For example, the following command:
#
# ./rename-pictures.sh ~/Pictures
#
# Will iterate recursively through the specified directories. For each
# file, it'll ask the Mistral model if the filename looks reasonable. If
# Mistral doesn't like the filename, then this script will ask LLaVA to
# analyze the picture and generate a new filename with lowercase letters
# and underscores. Most image formats are supported (e.g. png/jpg/gif)
# and newer more exotic ones (e.g. webp) are also supported if Image
# Magick is installed.
#
# You need to have a system with at minimum 8gb of RAM. This will work
# even on older computers without GPUs; just let it run overnight!
abort() {
printf '%s\n' "renaming terminated." >&2
exit 1
}
if ! LLAVA=$(command -v llava-v1.5-7b-q4-main.llamafile); then
printf '%s\n' "llava-v1.5-7b-q4-main.llamafile: fatal error: update this script with the path of your llava llamafile" >&2
printf '%s\n' "please download https://huggingface.co/jartine/llava-v1.5-7B-GGUF/resolve/main/llava-v1.5-7b-q4-main.llamafile and put it on the system path" >&2
abort
fi
if ! MISTRAL=$(command -v mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile); then
printf '%s\n' "mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile: fatal error: update this script with the path of your mistral llamafile" >&2
printf '%s\n' "please download https://huggingface.co/jartine/mistral-7b.llamafile/resolve/main/mistral-7b-instruct-v0.1-Q4_K_M-main.llamafile and put it on the system path" >&2
abort
fi
if ! CONVERT=$(command -v convert); then
printf '%s\n' "${0##*/}: warning: convert command not found (please install imagemagick so we can analyze image formats like webp)" >&2
fi
isgood() {
"$MISTRAL" \
--temp 0 -ngl 35 \
--grammar 'root ::= "yes" | "no"' \
-p "[INST]Does the filename '${1##*/}' look like readable english text?[/INST]" \
--silent-prompt 2>/dev/null
}
pickname() {
"$LLAVA" \
--image "$1" --temp 0.3 -ngl 35 \
--grammar 'root ::= [a-z]+ (" " [a-z]+)+' -n 10 \
-p '### User: The image has...
### Assistant:' \
--silent-prompt 2>/dev/null
}
# https://stackoverflow.com/a/30133294/1653720
shuf() {
awk 'BEGIN {srand(); OFMT="%.17f"} {print rand(), $0}' "$@" |
sort -k1,1n |
cut -d ' ' -f2-
}
if [ $# -eq 0 ]; then
printf '%s\n' "${0##*/}: fatal error: missing operand" >&2
abort
fi
if [ x"$1" = x"--help" ]; then
printf '%s\n' "usage: ${0##*/} PATH..."
exit
fi
OIFS=$IFS
IFS='
'
for arg; do
# ensure argument is a file or directory
if [ ! -e "$arg" ]; then
printf '%s\n' "$arg: fatal error: file not found" >&2
abort
fi
# find all regular files under path argument
for path in $(find "$arg" -type f -print0 | tr '\0' '\n' | shuf); do
# ask mistral if filename needs renaming
if ! answer=$(isgood "$path"); then
printf '%s\n' "$path: fatal error: failed to ask mistral if file needs renaming" >&2
abort
fi
if [ "$answer" = "yes" ]; then
printf '%s\n' "skipping $path (mistral says it's good)" >&2
continue
fi
# ask llm to generate new filename. if it's a format like web that
# our stb library doesn't support yet, then we'll ask imagemagick to
# convert it to png and then try again.
if ! newname=$(pickname "$path"); then
png="${TMPDIR:-/tmp}/$$.png"
if [ -z "$CONVERT" ]; then
printf '%s\n' "$path: warning: llava failed to describe image (probably due to unsupported file format)" >&2
continue
fi
if "$CONVERT" "$path" "$png" 2>/dev/null; then
if newname=$(pickname "$png"); then
rm -f "$png"
else
printf '%s\n' "$path: warning: llava llm failed" >&2
rm -f "$png"
continue
fi
else
printf '%s\n' "skipping $path (not an image)" >&2
continue
fi
fi
# replace spaces with underscores
newname=$(printf '%s\n' "$newname" | sed 's/ /_/g')
# append the original file extension to the new name
if [ x"${path%.*}" != x"$path" ]; then
newname="$newname.${path##*.}"
fi
# prefix the original directory to the new name
if [ x"${path%/*}" != x"$path" ]; then
newname="${path%/*}/$newname"
fi
# ensure new name is unque
if [ -e "$newname" ]; then
i=2
while [ -e "${newname%.*}-$i.${newname##*.}" ]; do
i=$((i + 1))
done
newname="${newname%.*}-$i.${newname##*.}"
fi
# rename the file
printf '%s\n' "renaming $path to $newname"
if ! mv -n "$path" "$newname"; then
printf '%s\n' "$newname: fatal error: failed to rename file" >&2
abort
fi
done
done
IFS=$OIFS
@michapixel
Copy link

michapixel commented Jan 2, 2024

sorry to bother, windows user here ...

for me it seems the "newname" generation is completely borked under windows.
$ ./rename-pictures.sh ./photos
results in :
`renaming ./photos/x001Foo (11).png to ./photos/usage:_/D/_llama/llamafile-0.4.1/llamafile-0.4.1/llamafile.exe[options]

options:
__-h,--help________________show_this_help_message_and_exit
__-v,
--verbose_____________verbose_output_(default:disabled)
__-t_N,
--threads_N_________number_of_threads_to_use_during_computation_(default:6)
__-tb_N,
--threads-batch_N__number_of_threads_to_use_during_batch_and_prompt_processing_(default:same_as--threads)
__-c_N,--ctx-size_N________size_of_the_prompt_context(default:_512)
_--rope-scaling{none,linear,yarn}
____________________________RoPE_frequency_scaling_method,_defaults_to_linear_unless_specified_by_the_model
_--rope-freq-base_N________RoPE_base_frequency(default:_loaded_from_model)
__--rope-freq-scale_N_______RoPE_frequency_scaling_factor,_expands_context_by_a_factor_of_1/N
__--yarn-ext-factor_N_______YaRN:extrapolation_mix_factor(default:_1.0,0.0=_full_interpolation)
__--yarn-attn-factor_N______YaRN:_scale_sqrt(t)or_attention_magnitude(default:_1.0)
__--yarn-beta-slow_N________YaRN:high_correction_dim_or_alpha(default:_1.0)
_--yarn-beta-fast_N________YaRN:low_correction_dim_or_beta(default:32.0)
__-b_N,
--batch-size_N______batch_size_for_prompt_processing
(default:_512)
_--memory-f32______________use_f32_instead_of_f16_for_memory_key+value(default:_disabled)
____________________________not_recommended:doubles_context_memory_required_and_no_measurable_increase_in_quality
__--mlock_______________force_system_to_keep_model_in_RAM_rather_than_swapping_or_compressing
_--no-mmap_____________do_not_memory-map_model(slower_load_but_may_reduce_pageouts_if_not_using_mlock)
__--numa________________attempt_optimizations_that_help_on_some_NUMA_systems
__-ngl_N,
--n-gpu-layers_N
________________________number_of_layers_to_store_in_VRAM
_-ts_SPLIT--tensor-split_SPLIT
________________________how_to_split_tensors_across_multiple_GPUs,_comma-separated_list_of_proportions,e.g.3,1
__-mg_i,
--main-gpu_i___the_GPU_to_use_for_scratch_and_small_tensors
__-nommq,
--no-mul-mat-q
________________________use_cuBLAS_instead_of_custom_mul_mat_q_CUDA_kernels.
_______________________Not_recommended_since_this_is_both_slower_and_uses_more_VRAM.
__-m_FNAME,
--model_FNAME
_____________________model_path(default:models/7B/ggml-model-f16.gguf)
__-a_ALIAS,
--alias_ALIAS
set_an_alias_for_the_model,will_be_added_asmodelfield_in_completion_response
--lora_FNAME__________apply_LoRA_adapter(implies--no-mmap)
--lora-base_FNAME_____optional_model_to_use_as_a_base_for_the_layers_modified_by_the_LoRA_adapter
--host________________ip_address_to_listen(default
(default:127.0.0.1)
--port_PORT___________port_to_listen(default
(default:8080)
--path_PUBLIC_PATH____path_from_which_to_serve_static_files(default/zip/llama.cpp/server/public)
__-to_N,
--timeout_N____server_read/write_timeout_in_seconds
(default:600)
--embedding___________enable_embedding_vector_output(default:disabled)
__-np_N,
--parallel_N___number_of_slots_for_process_requests
(default:1)
__-cb,
--cont-batching__enable_continuous_batching
(a.k.a_dynamic_batching)
(default:_disabled)
___-spf_FNAME,--system-prompt-file_FNAME
Set_a_file_to_load_a_system_prompt(initial_prompt_of_all_slots),this_is_useful_for_chat_applications.
__--mmproj_MMPROJ_FILE__path_to_a_multimodal_projector_file_for_LLaVA.
__--log-disable_________disables_logging_to_a_file.
__--nobrowser___________Do_not_attempt_to_open_a_web_browser_tab_at_startup.
_--unsecure____________disables_pledge()sandboxing_on_Linux_and_OpenBSD.png
mv: failed to access './photos/usage:
/D/llama/llamafile-0.4.1/llamafile-0.4.1/llamafile.exe[options]'$'\n\n''options:'$'\n''
-h,
--help________________show_this_help_message_and_exit'$'\n''
-v,
--verbose_____________verbose_output
(default:_disabled)'$'\n''
-t_N,
--threads_N_________number_of_threads_to_use_during_computation
(default:_6)'$'\n''
-tb_N,
--threads-batch_N__number_of_threads_to_use_during_batch_and_prompt_processing
(default:same_as--threads)'$'\n''
-c_N,
--ctx-size_N________size_of_the_prompt_context
(default:_512)'$'\n''
--rope-scaling
{none,linear,yarn}'$'\n''____________________________RoPE_frequency_scaling_method,_defaults_to_linear_unless_specified_by_the_model'$'\n''
--rope-freq-base_N________RoPE_base_frequency
(default:_loaded_from_model)'$'\n''
--rope-freq-scale_N_______RoPE_frequency_scaling_factor,_expands_context_by_a_factor_of_1/N'$'\n''--yarn-ext-factor_N_______YaRN:extrapolation_mix_factor(default:_1.0,0.0=_full_interpolation)'$'\n''--yarn-attn-factor_N______YaRN:_scale_sqrt(t)or_attention_magnitude(default:_1.0)'$'\n''--yarn-beta-slow_N________YaRN:high_correction_dim_or_alpha(default:_1.0)'$'\n''--yarn-beta-fast_N________YaRN:low_correction_dim_or_beta(default:_32.0)'$'\n''
-b_N,
--batch-size_N______batch_size_for_prompt_processing
(default:_512)'$'\n''
--memory-f32______________use_f32_instead_of_f16_for_memory_key+value
(default:_disabled)'$'\n''____________________________not_recommended:_doubles_context_memory_required_and_no_measurable_increase_in_quality'$'\n''--mlock_______________force_system_to_keep_model_in_RAM_rather_than_swapping_or_compressing'$'\n''
--no-mmap_____________do_not_memory-map_model
(slower_load_but_may_reduce_pageouts_if_not_using_mlock)'$'\n''
--numa________________attempt_optimizations_that_help_on_some_NUMA_systems'$'\n''
-ngl_N,
--n-gpu-layers_N'$'\n''_____________________number_of_layers_to_store_in_VRAM'$'\n''-ts_SPLIT--tensor-split_SPLIT'$'\n''__________________how_to_split_tensors_across_multiple_GPUs,_comma-separated_list_of_proportions,_e.g._3,1'$'\n''-mg_i,--main-gpu_i___the_GPU_to_use_for_scratch_and_small_tensors'$'\n''-nommq,--no-mul-mat-q'$'\n''________________________use_cuBLAS_instead_of_custom_mul_mat_q_CUDA_kernels.'$'\n''_____________________Not_recommended_since_this_is_both_slower_and_uses_more_VRAM.'$'\n''-m_FNAME,--model_FNAME'$'\n''____model_path(default:_models/7B/ggml-model-f16.gguf)'$'\n''-a_ALIAS,--alias_ALIAS'$'\n''set_an_alias_for_the_model,will_be_added_asmodel_field_in_completion_response'$'\n''--lora_FNAME__________apply_LoRA_adapter(implies--no-mmap)'$'\n''--lora-base_FNAME_____optional_model_to_use_as_a_base_for_the_layers_modified_by_the_LoRA_adapter'$'\n''--host________________ip_address_to_listen(default(default:_127.0.0.1)'$'\n''--port_PORT___________port_to_listen(default(default:_8080)'$'\n''--path_PUBLIC_PATH____path_from_which_to_serve_static_files(default/zip/llama.cpp/server/public)'$'\n''-to_N,--timeout_N____server_read/write_timeout_in_seconds(default:_600)'$'\n''--embedding___________enable_embedding_vector_output(default:_disabled)'$'\n''-np_N,--parallel_N___number_of_slots_for_process_requests(default:_1)'$'\n''-cb,--cont-batching__enable_continuous_batching(a.k.a_dynamic_batching)(default:disabled)'$'\n''-spf_FNAME,--system-prompt-file_FNAME'$'\n''_______________Set_a_file_to_load_a_system_prompt(initial_prompt_of_all_slots),_this_is_useful_for_chat_applications.'$'\n''--mmproj_MMPROJ_FILE__path_to_a_multimodal_projector_file_for_LLaVA.'$'\n''--log-disable_________disables_logging_to_a_file.'$'\n''--nobrowser___________Do_not_attempt_to_open_a_web_browser_tab_at_startup.'$'\n''--unsecure____________disables_pledge()sandboxing_on_Linux_and_OpenBSD.png': File name too long
./photos/usage:
/D/_llama/llamafile-0.4.1/llamafile-0.4.1/llamafile.exe[options]

options:
__-h,--help________________show_this_help_message_and_exit
__-v,
--verbose_____________verbose_output_(default:disabled)
__-t_N,
--threads_N_________number_of_threads_to_use_during_computation_(default:6)
__-tb_N,
--threads-batch_N__number_of_threads_to_use_during_batch_and_prompt_processing_(default:same_as--threads)
__-c_N,--ctx-size_N________size_of_the_prompt_context(default:_512)
_--rope-scaling{none,linear,yarn}
____________________________RoPE_frequency_scaling_method,_defaults_to_linear_unless_specified_by_the_model
_--rope-freq-base_N________RoPE_base_frequency(default:_loaded_from_model)
__--rope-freq-scale_N_______RoPE_frequency_scaling_factor,_expands_context_by_a_factor_of_1/N
__--yarn-ext-factor_N_______YaRN:extrapolation_mix_factor(default:_1.0,0.0=_full_interpolation)
__--yarn-attn-factor_N______YaRN:_scale_sqrt(t)or_attention_magnitude(default:_1.0)
__--yarn-beta-slow_N________YaRN:high_correction_dim_or_alpha(default:_1.0)
_--yarn-beta-fast_N________YaRN:low_correction_dim_or_beta(default:32.0)
__-b_N,
--batch-size_N______batch_size_for_prompt_processing
(default:_512)
_--memory-f32______________use_f32_instead_of_f16_for_memory_key+value(default:_disabled)
____________________________not_recommended:doubles_context_memory_required_and_no_measurable_increase_in_quality
__--mlock_______________force_system_to_keep_model_in_RAM_rather_than_swapping_or_compressing
_--no-mmap_____________do_not_memory-map_model(slower_load_but_may_reduce_pageouts_if_not_using_mlock)
__--numa________________attempt_optimizations_that_help_on_some_NUMA_systems
__-ngl_N,
--n-gpu-layers_N
________________________number_of_layers_to_store_in_VRAM
_-ts_SPLIT--tensor-split_SPLIT
________________________how_to_split_tensors_across_multiple_GPUs,_comma-separated_list_of_proportions,e.g.3,1
__-mg_i,
--main-gpu_i___the_GPU_to_use_for_scratch_and_small_tensors
__-nommq,
--no-mul-mat-q
________________________use_cuBLAS_instead_of_custom_mul_mat_q_CUDA_kernels.
_______________________Not_recommended_since_this_is_both_slower_and_uses_more_VRAM.
__-m_FNAME,
--model_FNAME
_______________________model_path(default:models/7B/ggml-model-f16.gguf)
__-a_ALIAS,
--alias_ALIAS
________________________set_an_alias_for_the_model,will_be_added_asmodelfield_in_completion_response
--lora_FNAME__________apply_LoRA_adapter(implies--no-mmap)
--lora-base_FNAME_____optional_model_to_use_as_a_base_for_the_layers_modified_by_the_LoRA_adapter
--host________________ip_address_to_listen(default
(default:127.0.0.1)
--port_PORT___________port_to_listen(default
(default:8080)
--path_PUBLIC_PATH____path_from_which_to_serve_static_files(default/zip/llama.cpp/server/public)
__-to_N,
--timeout_N____server_read/write_timeout_in_seconds
(default:600)
--embedding___________enable_embedding_vector_output(default:disabled)
__-np_N,
--parallel_N___number_of_slots_for_process_requests
(default:1)
__-cb,
--cont-batching__enable_continuous_batching
(a.k.a_dynamic_batching)
(default:_disabled)
___-spf_FNAME,--system-prompt-file_FNAME
_______________________Set_a_file_to_load_a_system_prompt(initial_prompt_of_all_slots),_this_is_useful_for_chat_applications.
__--mmproj_MMPROJ_FILE__path_to_a_multimodal_projector_file_for_LLaVA.
__--log-disable_________disables_logging_to_a_file.
__--nobrowser___________Do_not_attempt_to_open_a_web_browser_tab_at_startup.
__--unsecure____________disables_pledge()_sandboxing_on_Linux_and_OpenBSD.png: fatal error: failed to rename file`

i'm lost i guess ... can you please help?

@alextremblay
Copy link

sorry to bother, windows user here ...

for me it seems the "newname" generation is completely borked under windows. $ ./rename-pictures.sh ./photos results in : `renaming ./photos/x001Foo (11).png to ./photos/usage:_/D/_llama/llamafile-0.4.1/llamafile-0.4.1/llamafile.exe[options]

i'm lost i guess ... can you please help?

this is a UNIX shell script. How are you running it in windows?

cygwin? WSL? Git Bash? Something else?

that, I think, will determine why some of this shell code isn’t working properly

@aibdance22
Copy link

@echo off
REM rename-pictures.bat
REM Windows batch version of the Unix shell script for renaming images
REM Author: Converted from Justine Tunney's original script
REM License: Apache 2.0
REM
REM This batch script can be used to ensure all the images in a folder
REM have good descriptive filenames that are written in English. It's
REM based on the Mistral 7b and LLaVA v1.5 models.
REM
REM For example, the following command:
REM
REM rename-pictures.bat "C:\Users\YourName\Pictures"
REM
REM Will iterate recursively through the specified directories. For each
REM file, it'll ask the Mistral model if the filename looks reasonable. If
REM Mistral doesn't like the filename, then this script will ask LLaVA to
REM analyze the picture and generate a new filename with lowercase letters
REM and underscores. Most image formats are supported (e.g. png/jpg/gif)
REM and newer more exotic ones (e.g. webp) are also supported if ImageMagick
REM is installed.
REM
REM You need to have a system with at minimum 8gb of RAM. This will work
REM even on older computers without GPUs; just let it run overnight!

setlocal enabledelayedexpansion

REM Check if help is requested
if "%1"=="--help" (
echo usage: %~nx0 PATH...
exit /b 0
)

REM Check if argument is provided
if "%1"=="" (
echo %~nx0: fatal error: missing operand
echo renaming terminated.
exit /b 1
)

REM Check for required files
where llamafile.exe >nul 2>&1
if errorlevel 1 (
echo llamafile.exe: fatal error: llamafile.exe not found in PATH
echo please download llamafile.exe and put it on the system path
echo renaming terminated.
exit /b 1
)

REM Check for LLaVA model file
if not exist "llava-v1.5-7b-Q4_K.gguf" (
echo llava-v1.5-7b-Q4_K.gguf: fatal error: LLaVA model file not found
echo please ensure llava-v1.5-7b-Q4_K.gguf is in the same directory as this script
echo renaming terminated.
exit /b 1
)

REM Check for Mistral model file
if not exist "mistral-7b-instruct-v0.1.Q4_K_M.gguf" (
echo mistral-7b-instruct-v0.1.Q4_K_M.gguf: fatal error: Mistral model file not found
echo please ensure mistral-7b-instruct-v0.1.Q4_K_M.gguf is in the same directory as this script
echo renaming terminated.
exit /b 1
)

REM Check for LLaVA multimodal projector (required for vision)
if not exist "llava-v1.5-7b-mmproj-Q4_0.gguf" (
echo llava-v1.5-7b-mmproj-Q4_0.gguf: fatal error: LLaVA multimodal projector not found
echo please ensure llava-v1.5-7b-mmproj-Q4_0.gguf is in the same directory as this script
echo renaming terminated.
exit /b 1
)

REM Check for ImageMagick (optional)
where convert >nul 2>&1
if errorlevel 1 (
echo %~nx0: warning: convert command not found ^(please install imagemagick so we can analyze image formats like webp^)
set CONVERT_AVAILABLE=0
) else (
set CONVERT_AVAILABLE=1
)

REM Create a temporary file for storing file list
set TEMP_FILE=%TEMP%\rename_files_%RANDOM%.txt

REM Process each argument
:process_args
if "%1"=="" goto :end_args

REM Check if path exists
if not exist "%~1" (
echo %~1: fatal error: file not found
echo renaming terminated.
exit /b 1
)

REM Find all files recursively and create a shuffled list
echo Finding files in "%~1"...
dir /s /b /a-d "%~1*.*" > "%TEMP_FILE%"

REM Process each file
for /f "usebackq delims=" %%F in ("%TEMP_FILE%") do (
call :process_file "%%F"
)

shift
goto :process_args

:end_args
REM Clean up
if exist "%TEMP_FILE%" del "%TEMP_FILE%"
echo All done!
exit /b 0

REM Function to check if filename is good
:is_good
set "filename=%~nx1"
set "temp_response=%TEMP%\mistral_response_%RANDOM%.txt"

REM Call Mistral via llamafile.exe to check filename
llamafile.exe -m "mistral-7b-instruct-v0.1.Q4_K_M.gguf" --temp 0 -ngl 35 --grammar "root ::= "yes" | "no"" -p "[INST]Does the filename '%filename%' look like readable english text?[/INST]" --silent-prompt > "%temp_response%" 2>nul

if errorlevel 1 (
if exist "%temp_response%" del "%temp_response%"
exit /b 1
)

set /p response=<"%temp_response%"
del "%temp_response%"

if /i "%response%"=="yes" (
exit /b 0
) else (
exit /b 1
)

REM Function to pick a new name
:pick_name
set "image_path=%~1"
set "temp_response=%TEMP%\llava_response_%RANDOM%.txt"

REM Call LLaVA via llamafile.exe to generate new filename
llamafile.exe -m "llava-v1.5-7b-Q4_K.gguf" --mmproj "llava-v1.5-7b-mmproj-Q4_0.gguf" --image "%image_path%" --temp 0.3 -ngl 35 --grammar "root ::= [a-z]+ (" " [a-z]+)+" -n 10 -p "### User: The image has...### Assistant:" --silent-prompt > "%temp_response%" 2>nul

if errorlevel 1 (
REM Try with ImageMagick conversion if available
if !CONVERT_AVAILABLE! equ 1 (
set "temp_png=%TEMP%\temp_image_%RANDOM%.png"
convert "%image_path%" "!temp_png!" 2>nul
if exist "!temp_png!" (
llamafile.exe -m "llava-v1.5-7b-Q4_K.gguf" --mmproj "llava-v1.5-7b-mmproj-Q4_0.gguf" --image "!temp_png!" --temp 0.3 -ngl 35 --grammar "root ::= [a-z]+ (" " [a-z]+)+" -n 10 -p "### User: The image has...### Assistant:" --silent-prompt > "%temp_response%" 2>nul
del "!temp_png!"
if errorlevel 1 (
if exist "%temp_response%" del "%temp_response%"
exit /b 1
)
) else (
if exist "%temp_response%" del "%temp_response%"
exit /b 1
)
) else (
if exist "%temp_response%" del "%temp_response%"
exit /b 1
)
)

set /p new_name=<"%temp_response%"
del "%temp_response%"

REM Replace spaces with underscores
set "new_name=!new_name: =_!"

exit /b 0

REM Function to process individual file
:process_file
set "filepath=%~1"
set "filename=%~nx1"
set "directory=%~dp1"
set "extension=%~x1"

REM Check if it's likely an image file
echo !extension! | findstr /i /c:".jpg" /c:".jpeg" /c:".png" /c:".gif" /c:".bmp" /c:".webp" /c:".tiff" /c:".tif" >nul
if errorlevel 1 (
goto :eof
)

echo Processing: !filepath!

REM Check if filename is already good
call :is_good "!filepath!"
if !errorlevel! equ 0 (
echo Skipping !filepath! ^(mistral says it's good^)
goto :eof
)

REM Generate new filename
call :pick_name "!filepath!"
if !errorlevel! neq 0 (
echo Warning: failed to generate new name for !filepath!
goto :eof
)

REM Construct full new path
set "new_filepath=!directory!!new_name!!extension!"

REM Ensure new name is unique
set "counter=2"
:check_unique
if exist "!new_filepath!" (
set "base_name=!new_name!"
set "new_name=!base_name!-!counter!"
set "new_filepath=!directory!!new_name!!extension!"
set /a counter+=1
goto :check_unique
)

REM Rename the file
echo Renaming "!filepath!" to "!new_filepath!"
ren "!filepath!" "!new_name!!extension!"
if errorlevel 1 (
echo !new_filepath!: fatal error: failed to rename file
echo renaming terminated.
exit /b 1
)

goto :eof

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment