Compare commits

...

39 Commits

Author SHA1 Message Date
d3132b0ca3 yt-dlp-ntfy: mention PID when already running 2024-11-16 20:34:17 +01:00
b9f7855afc yt-dlp-ntfy: silent kill command 2024-11-16 20:32:45 +01:00
eaa9059fb5 yt-dlp-ntfy: fix non-working trap statement 2024-11-16 20:29:20 +01:00
bb1eaa30ad yt-dlp-ntfy: more messages 2024-11-16 20:29:05 +01:00
7728ebfb16 yt-dlp-ntfy: do not output all commands 2024-11-16 20:28:31 +01:00
9a64735f3c yt-dlp-ntfy: filter out keepalive events 2024-11-16 20:23:25 +01:00
0a9617626c experiemental: youtube comment fs 2024-11-16 20:18:58 +01:00
e547a58026 add yt-dlp-ntfy 2024-11-14 19:53:46 +01:00
64e999d93c youtube-dl.sh: update
- fix hash bang
- use yt-dlp
- get cookies from my current browser of choice
2024-07-22 18:14:38 +02:00
e309917948 setvolumeto50.sh: add 2024-07-17 20:41:29 +02:00
e2dc9c8a8d merge-csv-lists: refactor again 2023-12-29 11:31:27 +01:00
a7c1936acf merge-csv-lists: refactor 2023-12-29 11:29:57 +01:00
7ff425cb2f ytdlqueue: add 2023-12-29 11:25:27 +01:00
523f959ad3 GetContainerLayerContents: add 2023-12-29 11:25:20 +01:00
163a53b972 steam_owned_games_readable: add 2023-12-29 11:25:04 +01:00
f9faf4f8a4 merge-csv-lists: do not sort similar same words together 2023-10-22 17:48:45 +02:00
07430482d4 merge-csv-lists: allow any number of arguments 2023-10-22 17:43:02 +02:00
68a60ea873 Improve name handling
Considered reversed first and last name order for uniqueness
Sort names with same words close to each other
2023-10-22 17:36:49 +02:00
019813cc30 bullet_list_to_unique_list: add 2023-10-22 17:35:16 +02:00
52becb8909 split_and_flat: add 2023-10-22 17:35:07 +02:00
7477c13c34 merge-csv-lists: add 2023-10-22 17:34:48 +02:00
03495ab43d embed_lyrics: add 2023-02-05 14:32:03 +01:00
9278aea593 add forced to list of preserved track comments 2022-12-28 11:52:12 +01:00
2e029290f9 copy_potd.sh: add TODO 2022-10-26 12:19:13 +02:00
647c207091 convert_to_flac.sh: add 2022-10-26 12:19:03 +02:00
d7c082249f strip_track_except: use complete TMPFILE name, exit when ffmpeg errors out 2022-10-08 16:24:22 +02:00
8dcb77789d strip_track_except: fix UNDESIRED_TRACK_COUNT 2022-10-08 16:23:30 +02:00
c33104c552 strip_track_except: copy to tempfile, then overwrite 2022-10-08 01:11:44 +02:00
69255ee577 strip_track_except: actually use UNDESIRED_TRACK_COUNT 2022-10-08 01:11:10 +02:00
df1f8f4889 strip_track_except: improve
detect if more than 1 track is present
detect if undesired tracks are present
show progress bar using pv
2022-10-08 00:30:24 +02:00
ec5f7d9379 strip_track_except: add a new script 2022-10-07 22:52:55 +02:00
e6bd94c27d rename script 2022-10-07 22:29:25 +02:00
9d435e9216 delete_video_track_titles: add more information 2022-10-07 22:28:28 +02:00
17574160da delete_video_track_titles: add colorprint, print processed file 2022-10-07 22:28:06 +02:00
0e53f76531 delete_video_track_titles: set -euo pipefail 2022-10-07 22:27:18 +02:00
50d9961d19 Add more whitelisted words 2022-07-28 13:37:18 +02:00
d3aef45acc Add video_convert_normalize 2022-07-09 21:19:00 +02:00
f646e81c11 update sub_extract_convert 2022-07-09 21:19:00 +02:00
82dafd7e90 add delete_video_track_titles 2022-07-09 21:19:00 +02:00
20 changed files with 663 additions and 7 deletions

View File

@ -0,0 +1,51 @@
param(
[Parameter(Mandatory=$true)]
[string]$imageName
)
# Save the Docker image as a tarball
$imageTar = "$imageName.tar"
docker save -o $imageTar $imageName
# Create a temporary directory to extract layers
$tempDir = [System.IO.Path]::GetTempPath() + [System.Guid]::NewGuid().ToString()
New-Item -ItemType Directory -Force -Path $tempDir
# Extract the image tarball using 'tar' command
tar -xf $imageTar -C $tempDir
# Function to list files in a layer
function ListLayerFiles {
param (
[string]$layerPath
)
$layerTar = Get-ChildItem -Path $layerPath -Filter "*.tar" -File
$layerDir = [System.IO.Path]::Combine($tempDir, [System.IO.Path]::GetRandomFileName())
New-Item -ItemType Directory -Force -Path $layerDir
# Extract the layer tarball using 'tar'
tar -xf $layerTar.FullName -C $layerDir
# List files in the layer
Write-Host "Files in layer:"
Get-ChildItem -Path $layerDir -Recurse
}
# List all layers and ask user to choose one
$layerDirs = Get-ChildItem -Path $tempDir -Directory
$layerDirs | ForEach-Object -Begin { $i = 0 } -Process {
Write-Host "[$i]: $($_.Name)"
$i++
}
$userChoice = Read-Host "Enter the index of the layer to list"
if ($userChoice -lt $layerDirs.Count -and $userChoice -ge 0) {
ListLayerFiles -layerPath $layerDirs[$userChoice].FullName
} else {
Write-Host "Invalid index selected."
}
# Cleanup
Remove-Item -Path $imageTar
Remove-Item -Path $tempDir -Recurse

View File

@ -0,0 +1,39 @@
#!/usr/bin/python
import sys
import io
def extract_unique_values(input_string):
# Split the input string by newline to get the list of entries
input_list = input_string.strip().split("\n")
# Extract values from each entry in the format $VALUE1 - $VALUE2
values = [item.strip() for entry in input_list for item in entry.split("-")]
# Remove duplicates by converting to a set and back to a list
unique_values = list(set(values))
# Sort the list
sorted_values = sorted(unique_values)
# Convert the list back to a comma-separated string
output = ",".join(sorted_values)
return output
def main():
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8")
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
# Read the input from standard input
input_string = sys.stdin.read()
# Extract unique values
result = extract_unique_values(input_string)
print(result)
if __name__ == "__main__":
main()

104
comment_fs/comments_fs.py Normal file
View File

@ -0,0 +1,104 @@
#!/usr/bin/env python3
import os
import stat
import errno
import fuse
from time import time
import json
from collections import defaultdict
fuse.fuse_python_api = (0, 2)
class MyStat(fuse.Stat):
def __init__(self):
self.st_mode = stat.S_IFDIR | 0o755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class CommentFS(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
with open('comments.jsonl', 'r', encoding='utf-8') as f:
self.comments = [json.loads(line) for line in f]
self.tree = self.build_comment_tree(self.comments)
self.files = {}
self.build_file_structure()
def build_comment_tree(self, comments):
tree = defaultdict(list)
for comment in comments:
parent = comment['parent'] if comment['parent'] != 'root' else ''
tree[parent].append(comment)
return tree
def build_file_structure(self):
def add_comment(comment, path):
comment_path = os.path.join(path, comment['id'])
self.files[comment_path] = comment
for reply in self.tree.get(comment['id'], []):
add_comment(reply, comment_path)
for comment in self.tree['']:
add_comment(comment, '/')
def getattr(self, path):
st = MyStat()
st.st_atime = int(time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
if path == '/':
return st
elif path in self.files:
st.st_mode = stat.S_IFREG | 0o444
st.st_nlink = 1
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
st.st_size = len(content.encode('utf-8'))
return st
else:
return -errno.ENOENT
def readdir(self, path, offset):
dirents = ['.', '..']
if path == '/':
dirents.extend(comment['id'] for comment in self.tree[''])
elif path[1:] in [comment['id'] for comment in self.comments]:
dirents.extend(reply['id'] for reply in self.tree.get(path[1:], []))
for r in dirents:
yield fuse.Direntry(r)
def open(self, path, flags):
if path not in self.files:
return -errno.ENOENT
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
return 0
def read(self, path, size, offset):
if path not in self.files:
return -errno.ENOENT
comment = self.files[path]
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
return content.encode('utf-8')[offset:offset+size]
def main():
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()

117
comment_fs/comments_fs2.py Normal file
View File

@ -0,0 +1,117 @@
#!/usr/bin/env python3
import os
import stat
import errno
import fuse
from time import time
import json
from collections import defaultdict
fuse.fuse_python_api = (0, 2)
class MyStat(fuse.Stat):
def __init__(self):
self.st_mode = stat.S_IFDIR | 0o755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class CommentFS(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
with open('comments.jsonl', 'r', encoding='utf-8') as f:
self.comments = [json.loads(line) for line in f]
self.tree = self.build_comment_tree(self.comments)
self.files = {}
self.directories = set()
self.build_file_structure()
def build_comment_tree(self, comments):
tree = defaultdict(list)
for comment in comments:
parent = comment['parent'] if comment['parent'] != 'root' else ''
tree[parent].append(comment)
return tree
def build_file_structure(self):
def add_comment(comment, path):
comment_path = os.path.join(path, comment['id'])
self.files[comment_path] = comment
if comment['id'] in self.tree:
self.directories.add(comment_path)
parent_file_path = os.path.join(comment_path, 'parent')
self.files[parent_file_path] = comment
for reply in self.tree[comment['id']]:
add_comment(reply, comment_path)
for comment in self.tree['']:
add_comment(comment, '/')
def getattr(self, path):
st = MyStat()
st.st_atime = int(time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
if path == '/' or path in self.directories:
st.st_mode = stat.S_IFDIR | 0o755
return st
elif path in self.files:
st.st_mode = stat.S_IFREG | 0o444
st.st_nlink = 1
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
st.st_size = len(content.encode('utf-8'))
return st
else:
return -errno.ENOENT
def readdir(self, path, offset):
dirents = ['.', '..']
if path == '/':
dirents.extend(comment['id'] for comment in self.tree[''])
elif path in self.directories:
dirents.append('parent')
dirents.extend(reply['id'] for reply in self.tree[path.split('/')[-1]])
for r in dirents:
yield fuse.Direntry(r)
def open(self, path, flags):
if path not in self.files:
return -errno.ENOENT
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
return 0
def read(self, path, size, offset):
if path not in self.files:
return -errno.ENOENT
comment = self.files[path]
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
return content.encode('utf-8')[offset:offset+size]
def main():
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
server.parser.add_option(mountopt="uid", metavar="UID", default=os.getuid(),
help="Set the owner of the mounted filesystem")
server.parser.add_option(mountopt="gid", metavar="GID", default=os.getgid(),
help="Set the group of the mounted filesystem")
server.multithreaded = False
server.allow_other = True
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,37 @@
#!/usr/bin/env python3
import json
from collections import defaultdict
def build_comment_tree(comments):
tree = defaultdict(list)
root_comments = []
print(f"Total comments: {len(comments)}") # Debug info
for comment in comments:
if comment['parent'] == "root":
root_comments.append(comment)
else:
tree[comment['parent']].append(comment)
print(f"Root comments: {len(root_comments)}") # Debug info
def build_subtree(comment):
return {
#"id": comment['id'],
"text": comment['text'],
"replies": [build_subtree(reply) for reply in tree[comment['id']]]
}
return [build_subtree(comment) for comment in root_comments]
with open('comments.jsonl', 'r', encoding='utf-8') as f:
comments = [json.loads(line) for line in f]
comment_tree = build_comment_tree(comments)
print(f"Final tree length: {len(comment_tree)}") # Debug info
with open('comment_tree.json', 'w') as f:
json.dump(comment_tree, f, ensure_ascii=False, indent=2)

2
convert_to_flac.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/env bash
for i in *.m4a; do ffmpeg -i "$i" -c:a flac "${i%.*}.flac"; done

View File

@ -21,3 +21,6 @@ else
echo "$ORIGINAL does not exist." echo "$ORIGINAL does not exist."
exit 1 exit 1
fi fi
# TODO: use below to create "copy all wallpapers" function
# for file in $HOME/.cache/plasma_engine_potd/*; do bfile=$(basename $file); cp "$file" "$HOME/Pictures/wallpapers/${bfile}_${TODAY}.jpg"; done

33
delete_track_titles.sh Executable file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
colorprint() {
echo -e "\e[31m${1}\e[0m"
}
SCRIPT_NAME=$(basename "$0")
test -z "${1}" && echo "File not specified or doesn't exist, aborting. Usage: $SCRIPT_NAME \$FILENAME" && exit 1
FILE=$1
# TODO: use -J and jq '.tracks[] | [.properties.number, .type, .codec, .properties.track_name]'
TRACKS=$(mkvmerge --identify "$FILE" | awk -F"[: ]" '$0 ~ /Track ID/{print $3}')
# capture JSON output and remove the excessively large code_private_data field
JSON_OUTPUT=$(mkvmerge -J "$FILE" | jq '.tracks[] | del(.properties.codec_private_data)')
echo -n "Processing "
colorprint "$FILE"
for TRACK in $TRACKS; do
TRACK_DATA=$(echo "$JSON_OUTPUT" | jq "select(.id == $TRACK)")
TRACK_NAME=$(echo "$TRACK_DATA" | jq ".properties.track_name")
TRACK_TYPE=$(echo "$TRACK_DATA" | jq ".type")
TRACK_CODEC=$(echo "$TRACK_DATA" | jq ".codec")
TRACK_LANGUAGE=$(echo "$TRACK_DATA" | jq ".properties.language")
echo -en "\tTrack no. ${TRACK} (${TRACK_TYPE}, ${TRACK_CODEC}, ${TRACK_LANGUAGE})... "
[[ $TRACK_NAME == "null" ]] && echo "has no title, skipping." && continue
echo -n "currently set to $TRACK_NAME... "
shopt -s nocasematch
[[ $TRACK_NAME =~ full|\stitles|comment|director|sdh|sign|forced ]] && echo "includes whitelisted word, not deleting." && continue
echo -n "deleting... "
# mkvmerge --identify indexes tracks from 0, while mkvpropedit indexes from 1
mkvpropedit "$FILE" --edit track:"$((TRACK+1))" --delete name &>/dev/null
echo "done."
done
echo -en "\tDeleting file title... "
mkvpropedit "$FILE" --edit info --delete title &>/dev/null
echo "done."

5
embed_lyrics Normal file
View File

@ -0,0 +1,5 @@
#!/bin/fish
metaflac \
--set-tag-from-file=LYRICS=\
(env FZF_DEFAULT_COMMAND="fdfind . /srv/mergerfs/storage/download/music -e lrc" fzf) \
(env FZF_DEFAULT_COMMAND="fdfind . /srv/mergerfs/storage/media/music2/ -e flac" fzf)

45
merge-csv-lists.py Normal file
View File

@ -0,0 +1,45 @@
#!/usr/bin/python
import argparse
def flatten_and_split(input_string):
elements = input_string.replace("\n", "").split(",")
flat_list = [item.strip() for element in elements for item in element.split("-")]
return ",".join(flat_list)
def combine_and_uniquify(*inputs):
combined_list = [
item.strip().title() for input_list in inputs for item in input_list.split(",")
]
unique_names = set(combined_list)
final_set = set()
for name in unique_names:
parts = name.split()
if len(parts) == 2:
first, last = parts
reversed_name = f"{last} {first}"
# Add the name if its reversed variant is not already in the final set
if reversed_name not in final_set:
final_set.add(name)
else:
final_set.add(name)
return ",".join(sorted(final_set))
def main():
parser = argparse.ArgumentParser(
description="Combine multiple comma-separated lists into one unique sorted list."
)
parser.add_argument("lists", nargs="+", type=str, help="Comma-separated lists.")
args = parser.parse_args()
processed_lists = [flatten_and_split(lst) for lst in args.lists]
result = combine_and_uniquify(*processed_lists)
print(result)
if __name__ == "__main__":
main()

2
setvolumeto50.sh Executable file
View File

@ -0,0 +1,2 @@
#!/usr/bin/env bash
amixer set Master,0 50% unmute

38
split_and_flat.py Normal file
View File

@ -0,0 +1,38 @@
import sys
def split_and_flat(input_string):
if "\n" in input_string:
_without_newlines = input_string.split("\n")
_without_empty = filter(lambda x: x != "", _without_newlines)
input_string = ",".join(_without_empty)
# Split the input string on commas
comma_split = input_string.split(",")
# Initialize an empty flat list
flat_list = []
# Iterate through the comma-separated values
for item in comma_split:
# Split each item on dashes
dash_split = item.split("-")
# Extend the flat list with the dash-separated values
flat_list.extend([value.strip().replace("\n", "") for value in dash_split])
map(lambda x: x.strip(), flat_list)
return ",".join(flat_list)
if __name__ == "__main__":
# Check if a single command-line argument is provided
if len(sys.argv) != 2:
print("Usage: python split_and_flat.py <input_string>")
sys.exit(1)
# Get the input string from the command-line argument
input_string = sys.argv[1]
# Call the split_and_flat function and print the result
result = split_and_flat(input_string)
print(result)

View File

@ -0,0 +1,43 @@
import json
import argparse
from datetime import datetime, timezone
def process_json_file(filename):
# Read JSON data from file
with open(filename, "r") as file:
data = json.load(file)
# Process each game
for game in data["response"]["games"]:
# Convert playtime from minutes to hours
game["playtime_forever"] /= 60
# Convert Unix timestamp to readable date and time in UTC
if game["rtime_last_played"] != 0:
game["rtime_last_played"] = datetime.fromtimestamp(
game["rtime_last_played"], timezone.utc
).strftime("%Y-%m-%d %H:%M:%S")
else:
game["rtime_last_played"] = "Not Played"
# Return the modified data
return data
def main():
parser = argparse.ArgumentParser(
description="Process a JSON file containing game data."
)
parser.add_argument("filename", help="JSON file to be processed")
args = parser.parse_args()
# Process the JSON file
modified_data = process_json_file(args.filename)
# Print the modified data
print(json.dumps(modified_data, indent=4))
if __name__ == "__main__":
main()

28
strip_track_except.sh Executable file
View File

@ -0,0 +1,28 @@
#!/usr/bin/env bash
set -euo pipefail
colorprint() {
echo -e "\e[31m${1}\e[0m"
}
SCRIPT_NAME=$(basename "$0")
test -z "${1}" && echo "File not specified or doesn't exist, aborting. Usage: $SCRIPT_NAME \$FILENAME \$LANGUAGE_TO_KEEP" && exit 1
FILENAME=$1
# default language to keep is second parameter, or japanese if unspecified
LANGUAGE_TO_KEEP="${2:-jpn}"
echo -n "Processing "
colorprint "$FILENAME"
# capture JSON output and remove the excessively large code_private_data field
JSON_OUTPUT=$(mkvmerge -J "$FILENAME" | jq '[ .tracks[] | select(.type=="audio") | del(.properties.codec_private_data) ]')
TRACK_COUNT=$(echo "$JSON_OUTPUT" | jq length)
# check if more than 1 audio track is present
[[ $TRACK_COUNT -lt 2 ]] && echo -e "\tFile has only 1 track, skipping." && exit 1
# check if tracks other than the one to keep are present
UNDESIRED_TRACK_COUNT=$(echo "$JSON_OUTPUT" | jq "[.[] | select(.properties.language!=\"${LANGUAGE_TO_KEEP}\")] | length")
[[ $UNDESIRED_TRACK_COUNT -eq 0 ]] && echo -e "\tFile has only '$LANGUAGE_TO_KEEP' tracks, skipping." && exit 1
FILENAME_EXT=${FILENAME##*.}
echo -e "\tRemoving all audio tracks except '$LANGUAGE_TO_KEEP' tracks..."
TMPFILE=$(mktemp --dry-run --suffix=.${FILENAME_EXT})
echo -e "\tOutputting to temporary file ${TMPFILE}..."
! pv "$FILENAME" | ffmpeg -i pipe:0 -map 0:v -map 0:a:m:language:${LANGUAGE_TO_KEEP} -map 0:s -c copy -v error "${TMPFILE}" && echo "Something went wrong, aborting." && exit
echo -e "\tReplacing ${FILENAME} with ${TMPFILE}..."
mv "${TMPFILE}" "$FILENAME"

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# usage: $scriptname $files $output_sub_format # usage: $scriptname $files $output_sub_format
output_sub_format=${@: -1} output_sub_format=${@: -1}
for i in $@ for i in "$@"
do do
if [ ! -e "$i" ]; if [ ! -e "$i" ];
then then
@ -9,16 +9,16 @@ do
continue continue
fi fi
index=$(ffprobe -v 8 -show_streams -select_streams s "$i" | grep index | cut --delimiter="=" -f 2) index=$(ffprobe -v 8 -show_streams -select_streams s "$i" | grep index | cut --delimiter="=" -f 2)
if [ -z $index ]; if [ -z "$index" ];
then then
echo "No subtitle streams found." echo "No subtitle streams found."
exit exit
fi fi
ffprobe -v 8 -show_streams -select_streams "$i" ffprobe -v 8 -show_streams -select_streams "$i"
filename=${i%.*} filename=${i%.*}
mkvextract "$i" tracks $index:"${filename}.en.ass" mkvextract "$i" tracks "$index":"${filename}.en.ass"
if [ ! -z $1 ]; if [ -n "$1" ];
then then
subtitleedit /convert "${filename}.en.ass" $output_sub_format 2>/dev/null | grep --color=none ass subtitleedit /convert "${filename}.en.ass" "$output_sub_format" 2>/dev/null | grep --color=none ass
fi fi
done done

8
video_convert_normalize Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
if [ ! -e "$1" ];
then
echo "File ${1} does not exist."
exit 1
fi
filename=${1%.*}
ffmpeg -i "$1" -metadata title="" -map 0:v -map 0:a:m:language:jpn -c:v copy -c:a ac3 "${filename}.converted.mkv"

5
watch-magnets.sh Executable file
View File

@ -0,0 +1,5 @@
# command -v notify-send 1>/dev/null || echo "The \"notify-send\" command is not available. Aborting." && exit 1
SAVEDIR=$(xdg-user-dir DOWNLOAD)
[[ "$1" =~ ^magnet ]] && notify-send -a "Clipboard Watcher" "Magnet URL copied to the clipboard."
aria2c -d "$SAVEDIR" --input-file <( echo "$1" ) --bt-metadata-only=true --bt-save-metadata=true
# TODO: spawn aria2c in a subshell in case a magnet link takes forever to load

View File

@ -1,7 +1,8 @@
#!/run/current-system/sw/bin/bash #!/usr/bin/env bash
youtube-dl "$1" \ yt-dlp "$1" \
--download-archive "~/Videos/YT/history.conf" \ --download-archive "~/Videos/YT/history.conf" \
--prefer-free-formats\ --prefer-free-formats\
--write-description \ --write-description \
--output "~/Videos/YT/%(uploader)s/%(upload_date)s - %(title)s.%(ext)s" --output "~/Videos/YT/%(uploader)s/%(upload_date)s - %(title)s.%(ext)s"
--cookies-from-browser vivaldi

45
yt-dlp-ntfy.sh Executable file
View File

@ -0,0 +1,45 @@
#!/bin/bash
set -ueo pipefail
# Define PID file location
PID_FILE="/tmp/yt-dlp-ntfy.pid"
# Check if the script is already running
if [ -e "$PID_FILE" ] && kill -0 $(cat "$PID_FILE") &>/dev/null; then
echo "Script is already running as PID $(cat $PID_FILE). Exiting."
exit 1
fi
# Store the PID of the current process
echo $$ > "$PID_FILE"
echo "Running as PID $(cat $PID_FILE)"
# Define ntfy server and channel
NTFY_SERVER="https://notify.kucharczyk.xyz"
CHANNEL="clipboard"
ACCESS_TOKEN="$NTFY_ACCESS_TOKEN"
echo "Monitoring channel $CHANNEL of server $NTFY_SERVER"
# Run the script in an infinite loop to listen for new messages
while true; do
while read -r message; do
event=$(echo "$message" | jq -r '.event')
[[ "$event" == "keepalive" ]] && continue
video_url=$(echo "$message" | jq -r '.message')
if [[ $video_url =~ ^https?:// ]]; then
echo "Downloading video from $video_url"
yt-dlp "$video_url"
curl -s -H "Authorization: Bearer $ACCESS_TOKEN" -d "Finished downloading." "$NTFY_SERVER/$CHANNEL"
fi
done
# Wait a bit before reconnecting if the connection drops
echo "Reconnecting..."
sleep 5
done < <(curl --no-buffer -s -H "Authorization: Bearer $ACCESS_TOKEN" "$NTFY_SERVER/$CHANNEL/json")
# Cleanup PID file on script exit
trap 'rm -f $PID_FILE' EXIT

50
ytdlqueue.py Normal file
View File

@ -0,0 +1,50 @@
#!/usr/bin/env python3
import subprocess
import sys
import time
from queue import Queue
from threading import Thread
# Queue to hold the URLs
download_queue = Queue()
def download_video(url):
"""
Function to download a video using youtube-dl.
"""
try:
print(f"Downloading {url}...")
subprocess.run(["yt-dlp", url], check=True)
print(f"Finished downloading {url}")
except subprocess.CalledProcessError as e:
print(f"Failed to download {url}: {e}")
def worker():
"""
Worker function to process items in the queue.
"""
while True:
url = download_queue.get()
download_video(url)
download_queue.task_done()
def main():
# Start the worker thread
thread = Thread(target=worker)
thread.daemon = True
thread.start()
print("Enter URLs to download. Type 'exit' to quit.")
while True:
url = input("URL: ").strip()
if url.lower() == 'exit':
break
download_queue.put(url)
# Wait for all downloads to finish
download_queue.join()
if __name__ == "__main__":
main()