Compare commits
29 Commits
df1f8f4889
...
main
Author | SHA1 | Date | |
---|---|---|---|
d3132b0ca3
|
|||
b9f7855afc
|
|||
eaa9059fb5
|
|||
bb1eaa30ad
|
|||
7728ebfb16
|
|||
9a64735f3c
|
|||
0a9617626c
|
|||
e547a58026 | |||
64e999d93c
|
|||
e309917948
|
|||
e2dc9c8a8d
|
|||
a7c1936acf
|
|||
7ff425cb2f
|
|||
523f959ad3
|
|||
163a53b972
|
|||
f9faf4f8a4 | |||
07430482d4 | |||
68a60ea873 | |||
019813cc30 | |||
52becb8909 | |||
7477c13c34 | |||
03495ab43d | |||
9278aea593
|
|||
2e029290f9
|
|||
647c207091
|
|||
d7c082249f
|
|||
8dcb77789d
|
|||
c33104c552
|
|||
69255ee577
|
51
GetContainerLayerContents.ps1
Normal file
51
GetContainerLayerContents.ps1
Normal file
@ -0,0 +1,51 @@
|
||||
param(
|
||||
[Parameter(Mandatory=$true)]
|
||||
[string]$imageName
|
||||
)
|
||||
|
||||
# Save the Docker image as a tarball
|
||||
$imageTar = "$imageName.tar"
|
||||
docker save -o $imageTar $imageName
|
||||
|
||||
# Create a temporary directory to extract layers
|
||||
$tempDir = [System.IO.Path]::GetTempPath() + [System.Guid]::NewGuid().ToString()
|
||||
New-Item -ItemType Directory -Force -Path $tempDir
|
||||
|
||||
# Extract the image tarball using 'tar' command
|
||||
tar -xf $imageTar -C $tempDir
|
||||
|
||||
# Function to list files in a layer
|
||||
function ListLayerFiles {
|
||||
param (
|
||||
[string]$layerPath
|
||||
)
|
||||
|
||||
$layerTar = Get-ChildItem -Path $layerPath -Filter "*.tar" -File
|
||||
$layerDir = [System.IO.Path]::Combine($tempDir, [System.IO.Path]::GetRandomFileName())
|
||||
New-Item -ItemType Directory -Force -Path $layerDir
|
||||
|
||||
# Extract the layer tarball using 'tar'
|
||||
tar -xf $layerTar.FullName -C $layerDir
|
||||
|
||||
# List files in the layer
|
||||
Write-Host "Files in layer:"
|
||||
Get-ChildItem -Path $layerDir -Recurse
|
||||
}
|
||||
|
||||
# List all layers and ask user to choose one
|
||||
$layerDirs = Get-ChildItem -Path $tempDir -Directory
|
||||
$layerDirs | ForEach-Object -Begin { $i = 0 } -Process {
|
||||
Write-Host "[$i]: $($_.Name)"
|
||||
$i++
|
||||
}
|
||||
|
||||
$userChoice = Read-Host "Enter the index of the layer to list"
|
||||
if ($userChoice -lt $layerDirs.Count -and $userChoice -ge 0) {
|
||||
ListLayerFiles -layerPath $layerDirs[$userChoice].FullName
|
||||
} else {
|
||||
Write-Host "Invalid index selected."
|
||||
}
|
||||
|
||||
# Cleanup
|
||||
Remove-Item -Path $imageTar
|
||||
Remove-Item -Path $tempDir -Recurse
|
39
bullet_list_to_unique_list.py
Normal file
39
bullet_list_to_unique_list.py
Normal file
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import sys
|
||||
import io
|
||||
|
||||
|
||||
def extract_unique_values(input_string):
|
||||
# Split the input string by newline to get the list of entries
|
||||
input_list = input_string.strip().split("\n")
|
||||
|
||||
# Extract values from each entry in the format $VALUE1 - $VALUE2
|
||||
values = [item.strip() for entry in input_list for item in entry.split("-")]
|
||||
|
||||
# Remove duplicates by converting to a set and back to a list
|
||||
unique_values = list(set(values))
|
||||
|
||||
# Sort the list
|
||||
sorted_values = sorted(unique_values)
|
||||
|
||||
# Convert the list back to a comma-separated string
|
||||
output = ",".join(sorted_values)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def main():
|
||||
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8")
|
||||
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
||||
# Read the input from standard input
|
||||
input_string = sys.stdin.read()
|
||||
|
||||
# Extract unique values
|
||||
result = extract_unique_values(input_string)
|
||||
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
104
comment_fs/comments_fs.py
Normal file
104
comment_fs/comments_fs.py
Normal file
@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import stat
|
||||
import errno
|
||||
import fuse
|
||||
from time import time
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
fuse.fuse_python_api = (0, 2)
|
||||
|
||||
class MyStat(fuse.Stat):
|
||||
def __init__(self):
|
||||
self.st_mode = stat.S_IFDIR | 0o755
|
||||
self.st_ino = 0
|
||||
self.st_dev = 0
|
||||
self.st_nlink = 2
|
||||
self.st_uid = 0
|
||||
self.st_gid = 0
|
||||
self.st_size = 4096
|
||||
self.st_atime = 0
|
||||
self.st_mtime = 0
|
||||
self.st_ctime = 0
|
||||
|
||||
class CommentFS(fuse.Fuse):
|
||||
def __init__(self, *args, **kw):
|
||||
fuse.Fuse.__init__(self, *args, **kw)
|
||||
|
||||
with open('comments.jsonl', 'r', encoding='utf-8') as f:
|
||||
self.comments = [json.loads(line) for line in f]
|
||||
|
||||
self.tree = self.build_comment_tree(self.comments)
|
||||
self.files = {}
|
||||
self.build_file_structure()
|
||||
|
||||
def build_comment_tree(self, comments):
|
||||
tree = defaultdict(list)
|
||||
for comment in comments:
|
||||
parent = comment['parent'] if comment['parent'] != 'root' else ''
|
||||
tree[parent].append(comment)
|
||||
return tree
|
||||
|
||||
def build_file_structure(self):
|
||||
def add_comment(comment, path):
|
||||
comment_path = os.path.join(path, comment['id'])
|
||||
self.files[comment_path] = comment
|
||||
for reply in self.tree.get(comment['id'], []):
|
||||
add_comment(reply, comment_path)
|
||||
|
||||
for comment in self.tree['']:
|
||||
add_comment(comment, '/')
|
||||
|
||||
def getattr(self, path):
|
||||
st = MyStat()
|
||||
st.st_atime = int(time())
|
||||
st.st_mtime = st.st_atime
|
||||
st.st_ctime = st.st_atime
|
||||
|
||||
if path == '/':
|
||||
return st
|
||||
elif path in self.files:
|
||||
st.st_mode = stat.S_IFREG | 0o444
|
||||
st.st_nlink = 1
|
||||
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
|
||||
st.st_size = len(content.encode('utf-8'))
|
||||
return st
|
||||
else:
|
||||
return -errno.ENOENT
|
||||
|
||||
def readdir(self, path, offset):
|
||||
dirents = ['.', '..']
|
||||
if path == '/':
|
||||
dirents.extend(comment['id'] for comment in self.tree[''])
|
||||
elif path[1:] in [comment['id'] for comment in self.comments]:
|
||||
dirents.extend(reply['id'] for reply in self.tree.get(path[1:], []))
|
||||
|
||||
for r in dirents:
|
||||
yield fuse.Direntry(r)
|
||||
|
||||
def open(self, path, flags):
|
||||
if path not in self.files:
|
||||
return -errno.ENOENT
|
||||
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
|
||||
if (flags & accmode) != os.O_RDONLY:
|
||||
return -errno.EACCES
|
||||
return 0
|
||||
|
||||
def read(self, path, size, offset):
|
||||
if path not in self.files:
|
||||
return -errno.ENOENT
|
||||
comment = self.files[path]
|
||||
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
|
||||
return content.encode('utf-8')[offset:offset+size]
|
||||
|
||||
def main():
|
||||
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
|
||||
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
|
||||
server.parse(errex=1)
|
||||
server.main()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
117
comment_fs/comments_fs2.py
Normal file
117
comment_fs/comments_fs2.py
Normal file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import stat
|
||||
import errno
|
||||
import fuse
|
||||
from time import time
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
fuse.fuse_python_api = (0, 2)
|
||||
|
||||
class MyStat(fuse.Stat):
|
||||
def __init__(self):
|
||||
self.st_mode = stat.S_IFDIR | 0o755
|
||||
self.st_ino = 0
|
||||
self.st_dev = 0
|
||||
self.st_nlink = 2
|
||||
self.st_uid = 0
|
||||
self.st_gid = 0
|
||||
self.st_size = 4096
|
||||
self.st_atime = 0
|
||||
self.st_mtime = 0
|
||||
self.st_ctime = 0
|
||||
|
||||
class CommentFS(fuse.Fuse):
|
||||
def __init__(self, *args, **kw):
|
||||
fuse.Fuse.__init__(self, *args, **kw)
|
||||
|
||||
with open('comments.jsonl', 'r', encoding='utf-8') as f:
|
||||
self.comments = [json.loads(line) for line in f]
|
||||
|
||||
self.tree = self.build_comment_tree(self.comments)
|
||||
self.files = {}
|
||||
self.directories = set()
|
||||
self.build_file_structure()
|
||||
|
||||
def build_comment_tree(self, comments):
|
||||
tree = defaultdict(list)
|
||||
for comment in comments:
|
||||
parent = comment['parent'] if comment['parent'] != 'root' else ''
|
||||
tree[parent].append(comment)
|
||||
return tree
|
||||
|
||||
def build_file_structure(self):
|
||||
def add_comment(comment, path):
|
||||
comment_path = os.path.join(path, comment['id'])
|
||||
self.files[comment_path] = comment
|
||||
if comment['id'] in self.tree:
|
||||
self.directories.add(comment_path)
|
||||
parent_file_path = os.path.join(comment_path, 'parent')
|
||||
self.files[parent_file_path] = comment
|
||||
for reply in self.tree[comment['id']]:
|
||||
add_comment(reply, comment_path)
|
||||
|
||||
for comment in self.tree['']:
|
||||
add_comment(comment, '/')
|
||||
|
||||
def getattr(self, path):
|
||||
st = MyStat()
|
||||
st.st_atime = int(time())
|
||||
st.st_mtime = st.st_atime
|
||||
st.st_ctime = st.st_atime
|
||||
|
||||
if path == '/' or path in self.directories:
|
||||
st.st_mode = stat.S_IFDIR | 0o755
|
||||
return st
|
||||
elif path in self.files:
|
||||
st.st_mode = stat.S_IFREG | 0o444
|
||||
st.st_nlink = 1
|
||||
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
|
||||
st.st_size = len(content.encode('utf-8'))
|
||||
return st
|
||||
else:
|
||||
return -errno.ENOENT
|
||||
|
||||
def readdir(self, path, offset):
|
||||
dirents = ['.', '..']
|
||||
if path == '/':
|
||||
dirents.extend(comment['id'] for comment in self.tree[''])
|
||||
elif path in self.directories:
|
||||
dirents.append('parent')
|
||||
dirents.extend(reply['id'] for reply in self.tree[path.split('/')[-1]])
|
||||
|
||||
for r in dirents:
|
||||
yield fuse.Direntry(r)
|
||||
|
||||
def open(self, path, flags):
|
||||
if path not in self.files:
|
||||
return -errno.ENOENT
|
||||
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
|
||||
if (flags & accmode) != os.O_RDONLY:
|
||||
return -errno.EACCES
|
||||
return 0
|
||||
|
||||
def read(self, path, size, offset):
|
||||
if path not in self.files:
|
||||
return -errno.ENOENT
|
||||
comment = self.files[path]
|
||||
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
|
||||
return content.encode('utf-8')[offset:offset+size]
|
||||
|
||||
def main():
|
||||
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
|
||||
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
|
||||
server.parser.add_option(mountopt="uid", metavar="UID", default=os.getuid(),
|
||||
help="Set the owner of the mounted filesystem")
|
||||
server.parser.add_option(mountopt="gid", metavar="GID", default=os.getgid(),
|
||||
help="Set the group of the mounted filesystem")
|
||||
server.multithreaded = False
|
||||
server.allow_other = True
|
||||
server.parse(errex=1)
|
||||
server.main()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
37
comment_fs/create_comment_tree.py
Normal file
37
comment_fs/create_comment_tree.py
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
def build_comment_tree(comments):
|
||||
tree = defaultdict(list)
|
||||
root_comments = []
|
||||
|
||||
print(f"Total comments: {len(comments)}") # Debug info
|
||||
|
||||
for comment in comments:
|
||||
if comment['parent'] == "root":
|
||||
root_comments.append(comment)
|
||||
else:
|
||||
tree[comment['parent']].append(comment)
|
||||
|
||||
print(f"Root comments: {len(root_comments)}") # Debug info
|
||||
|
||||
def build_subtree(comment):
|
||||
return {
|
||||
#"id": comment['id'],
|
||||
"text": comment['text'],
|
||||
"replies": [build_subtree(reply) for reply in tree[comment['id']]]
|
||||
}
|
||||
|
||||
return [build_subtree(comment) for comment in root_comments]
|
||||
|
||||
with open('comments.jsonl', 'r', encoding='utf-8') as f:
|
||||
comments = [json.loads(line) for line in f]
|
||||
|
||||
comment_tree = build_comment_tree(comments)
|
||||
|
||||
print(f"Final tree length: {len(comment_tree)}") # Debug info
|
||||
|
||||
with open('comment_tree.json', 'w') as f:
|
||||
json.dump(comment_tree, f, ensure_ascii=False, indent=2)
|
||||
|
2
convert_to_flac.sh
Executable file
2
convert_to_flac.sh
Executable file
@ -0,0 +1,2 @@
|
||||
#!/bin/env bash
|
||||
for i in *.m4a; do ffmpeg -i "$i" -c:a flac "${i%.*}.flac"; done
|
@ -21,3 +21,6 @@ else
|
||||
echo "$ORIGINAL does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: use below to create "copy all wallpapers" function
|
||||
# for file in $HOME/.cache/plasma_engine_potd/*; do bfile=$(basename $file); cp "$file" "$HOME/Pictures/wallpapers/${bfile}_${TODAY}.jpg"; done
|
||||
|
@ -22,7 +22,7 @@ for TRACK in $TRACKS; do
|
||||
[[ $TRACK_NAME == "null" ]] && echo "has no title, skipping." && continue
|
||||
echo -n "currently set to $TRACK_NAME... "
|
||||
shopt -s nocasematch
|
||||
[[ $TRACK_NAME =~ full|\stitles|comment|director|sdh|sign ]] && echo "includes whitelisted word, not deleting." && continue
|
||||
[[ $TRACK_NAME =~ full|\stitles|comment|director|sdh|sign|forced ]] && echo "includes whitelisted word, not deleting." && continue
|
||||
echo -n "deleting... "
|
||||
# mkvmerge --identify indexes tracks from 0, while mkvpropedit indexes from 1
|
||||
mkvpropedit "$FILE" --edit track:"$((TRACK+1))" --delete name &>/dev/null
|
||||
|
5
embed_lyrics
Normal file
5
embed_lyrics
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/fish
|
||||
metaflac \
|
||||
--set-tag-from-file=LYRICS=\
|
||||
(env FZF_DEFAULT_COMMAND="fdfind . /srv/mergerfs/storage/download/music -e lrc" fzf) \
|
||||
(env FZF_DEFAULT_COMMAND="fdfind . /srv/mergerfs/storage/media/music2/ -e flac" fzf)
|
45
merge-csv-lists.py
Normal file
45
merge-csv-lists.py
Normal file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/python
|
||||
import argparse
|
||||
|
||||
|
||||
def flatten_and_split(input_string):
|
||||
elements = input_string.replace("\n", "").split(",")
|
||||
flat_list = [item.strip() for element in elements for item in element.split("-")]
|
||||
return ",".join(flat_list)
|
||||
|
||||
|
||||
def combine_and_uniquify(*inputs):
|
||||
combined_list = [
|
||||
item.strip().title() for input_list in inputs for item in input_list.split(",")
|
||||
]
|
||||
unique_names = set(combined_list)
|
||||
|
||||
final_set = set()
|
||||
for name in unique_names:
|
||||
parts = name.split()
|
||||
if len(parts) == 2:
|
||||
first, last = parts
|
||||
reversed_name = f"{last} {first}"
|
||||
# Add the name if its reversed variant is not already in the final set
|
||||
if reversed_name not in final_set:
|
||||
final_set.add(name)
|
||||
else:
|
||||
final_set.add(name)
|
||||
|
||||
return ",".join(sorted(final_set))
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Combine multiple comma-separated lists into one unique sorted list."
|
||||
)
|
||||
parser.add_argument("lists", nargs="+", type=str, help="Comma-separated lists.")
|
||||
args = parser.parse_args()
|
||||
|
||||
processed_lists = [flatten_and_split(lst) for lst in args.lists]
|
||||
result = combine_and_uniquify(*processed_lists)
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
2
setvolumeto50.sh
Executable file
2
setvolumeto50.sh
Executable file
@ -0,0 +1,2 @@
|
||||
#!/usr/bin/env bash
|
||||
amixer set Master,0 50% unmute
|
38
split_and_flat.py
Normal file
38
split_and_flat.py
Normal file
@ -0,0 +1,38 @@
|
||||
import sys
|
||||
|
||||
|
||||
def split_and_flat(input_string):
|
||||
if "\n" in input_string:
|
||||
_without_newlines = input_string.split("\n")
|
||||
_without_empty = filter(lambda x: x != "", _without_newlines)
|
||||
input_string = ",".join(_without_empty)
|
||||
# Split the input string on commas
|
||||
comma_split = input_string.split(",")
|
||||
|
||||
# Initialize an empty flat list
|
||||
flat_list = []
|
||||
|
||||
# Iterate through the comma-separated values
|
||||
for item in comma_split:
|
||||
# Split each item on dashes
|
||||
dash_split = item.split("-")
|
||||
|
||||
# Extend the flat list with the dash-separated values
|
||||
flat_list.extend([value.strip().replace("\n", "") for value in dash_split])
|
||||
|
||||
map(lambda x: x.strip(), flat_list)
|
||||
return ",".join(flat_list)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Check if a single command-line argument is provided
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python split_and_flat.py <input_string>")
|
||||
sys.exit(1)
|
||||
|
||||
# Get the input string from the command-line argument
|
||||
input_string = sys.argv[1]
|
||||
|
||||
# Call the split_and_flat function and print the result
|
||||
result = split_and_flat(input_string)
|
||||
print(result)
|
43
steam_owned_games_readable.py
Normal file
43
steam_owned_games_readable.py
Normal file
@ -0,0 +1,43 @@
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
def process_json_file(filename):
|
||||
# Read JSON data from file
|
||||
with open(filename, "r") as file:
|
||||
data = json.load(file)
|
||||
|
||||
# Process each game
|
||||
for game in data["response"]["games"]:
|
||||
# Convert playtime from minutes to hours
|
||||
game["playtime_forever"] /= 60
|
||||
|
||||
# Convert Unix timestamp to readable date and time in UTC
|
||||
if game["rtime_last_played"] != 0:
|
||||
game["rtime_last_played"] = datetime.fromtimestamp(
|
||||
game["rtime_last_played"], timezone.utc
|
||||
).strftime("%Y-%m-%d %H:%M:%S")
|
||||
else:
|
||||
game["rtime_last_played"] = "Not Played"
|
||||
|
||||
# Return the modified data
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process a JSON file containing game data."
|
||||
)
|
||||
parser.add_argument("filename", help="JSON file to be processed")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Process the JSON file
|
||||
modified_data = process_json_file(args.filename)
|
||||
|
||||
# Print the modified data
|
||||
print(json.dumps(modified_data, indent=4))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -17,8 +17,12 @@ TRACK_COUNT=$(echo "$JSON_OUTPUT" | jq length)
|
||||
# check if more than 1 audio track is present
|
||||
[[ $TRACK_COUNT -lt 2 ]] && echo -e "\tFile has only 1 track, skipping." && exit 1
|
||||
# check if tracks other than the one to keep are present
|
||||
UNDESIRED_TRACK_COUNT=$(echo "$JSON_OUTPUT" | jq '[.[] | select(.properties.language!="$LANGUAGE_TO_KEEP")] | length')
|
||||
[[ $TRACK_COUNT -lt 2 ]] && echo -e "\tFile has only '$LANGUAGE_TO_KEEP' tracks, skipping." && exit 1
|
||||
FILENAME_WITHOUT_EXT=${FILENAME%.*}
|
||||
UNDESIRED_TRACK_COUNT=$(echo "$JSON_OUTPUT" | jq "[.[] | select(.properties.language!=\"${LANGUAGE_TO_KEEP}\")] | length")
|
||||
[[ $UNDESIRED_TRACK_COUNT -eq 0 ]] && echo -e "\tFile has only '$LANGUAGE_TO_KEEP' tracks, skipping." && exit 1
|
||||
FILENAME_EXT=${FILENAME##*.}
|
||||
echo -e "\tRemoving all audio tracks except '$LANGUAGE_TO_KEEP' tracks..."
|
||||
pv "$FILENAME" | ffmpeg -i pipe:0 -map 0:v -map 0:a:m:language:${LANGUAGE_TO_KEEP} -map 0:s -c copy -v warning "${FILENAME_WITHOUT_EXT}.converted.mkv"
|
||||
TMPFILE=$(mktemp --dry-run --suffix=.${FILENAME_EXT})
|
||||
echo -e "\tOutputting to temporary file ${TMPFILE}..."
|
||||
! pv "$FILENAME" | ffmpeg -i pipe:0 -map 0:v -map 0:a:m:language:${LANGUAGE_TO_KEEP} -map 0:s -c copy -v error "${TMPFILE}" && echo "Something went wrong, aborting." && exit
|
||||
echo -e "\tReplacing ${FILENAME} with ${TMPFILE}..."
|
||||
mv "${TMPFILE}" "$FILENAME"
|
||||
|
5
watch-magnets.sh
Executable file
5
watch-magnets.sh
Executable file
@ -0,0 +1,5 @@
|
||||
# command -v notify-send 1>/dev/null || echo "The \"notify-send\" command is not available. Aborting." && exit 1
|
||||
SAVEDIR=$(xdg-user-dir DOWNLOAD)
|
||||
[[ "$1" =~ ^magnet ]] && notify-send -a "Clipboard Watcher" "Magnet URL copied to the clipboard."
|
||||
aria2c -d "$SAVEDIR" --input-file <( echo "$1" ) --bt-metadata-only=true --bt-save-metadata=true
|
||||
# TODO: spawn aria2c in a subshell in case a magnet link takes forever to load
|
@ -1,7 +1,8 @@
|
||||
#!/run/current-system/sw/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
youtube-dl "$1" \
|
||||
yt-dlp "$1" \
|
||||
--download-archive "~/Videos/YT/history.conf" \
|
||||
--prefer-free-formats\
|
||||
--write-description \
|
||||
--output "~/Videos/YT/%(uploader)s/%(upload_date)s - %(title)s.%(ext)s"
|
||||
--cookies-from-browser vivaldi
|
||||
|
45
yt-dlp-ntfy.sh
Executable file
45
yt-dlp-ntfy.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
set -ueo pipefail
|
||||
|
||||
# Define PID file location
|
||||
PID_FILE="/tmp/yt-dlp-ntfy.pid"
|
||||
|
||||
# Check if the script is already running
|
||||
if [ -e "$PID_FILE" ] && kill -0 $(cat "$PID_FILE") &>/dev/null; then
|
||||
echo "Script is already running as PID $(cat $PID_FILE). Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Store the PID of the current process
|
||||
echo $$ > "$PID_FILE"
|
||||
echo "Running as PID $(cat $PID_FILE)"
|
||||
|
||||
# Define ntfy server and channel
|
||||
NTFY_SERVER="https://notify.kucharczyk.xyz"
|
||||
CHANNEL="clipboard"
|
||||
ACCESS_TOKEN="$NTFY_ACCESS_TOKEN"
|
||||
|
||||
echo "Monitoring channel $CHANNEL of server $NTFY_SERVER"
|
||||
|
||||
# Run the script in an infinite loop to listen for new messages
|
||||
while true; do
|
||||
while read -r message; do
|
||||
event=$(echo "$message" | jq -r '.event')
|
||||
[[ "$event" == "keepalive" ]] && continue
|
||||
video_url=$(echo "$message" | jq -r '.message')
|
||||
|
||||
if [[ $video_url =~ ^https?:// ]]; then
|
||||
echo "Downloading video from $video_url"
|
||||
yt-dlp "$video_url"
|
||||
curl -s -H "Authorization: Bearer $ACCESS_TOKEN" -d "Finished downloading." "$NTFY_SERVER/$CHANNEL"
|
||||
fi
|
||||
done
|
||||
|
||||
# Wait a bit before reconnecting if the connection drops
|
||||
echo "Reconnecting..."
|
||||
sleep 5
|
||||
done < <(curl --no-buffer -s -H "Authorization: Bearer $ACCESS_TOKEN" "$NTFY_SERVER/$CHANNEL/json")
|
||||
|
||||
# Cleanup PID file on script exit
|
||||
trap 'rm -f $PID_FILE' EXIT
|
||||
|
50
ytdlqueue.py
Normal file
50
ytdlqueue.py
Normal file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
# Queue to hold the URLs
|
||||
download_queue = Queue()
|
||||
|
||||
def download_video(url):
|
||||
"""
|
||||
Function to download a video using youtube-dl.
|
||||
"""
|
||||
try:
|
||||
print(f"Downloading {url}...")
|
||||
subprocess.run(["yt-dlp", url], check=True)
|
||||
print(f"Finished downloading {url}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to download {url}: {e}")
|
||||
|
||||
def worker():
|
||||
"""
|
||||
Worker function to process items in the queue.
|
||||
"""
|
||||
while True:
|
||||
url = download_queue.get()
|
||||
download_video(url)
|
||||
download_queue.task_done()
|
||||
|
||||
def main():
|
||||
# Start the worker thread
|
||||
thread = Thread(target=worker)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
print("Enter URLs to download. Type 'exit' to quit.")
|
||||
while True:
|
||||
url = input("URL: ").strip()
|
||||
if url.lower() == 'exit':
|
||||
break
|
||||
download_queue.put(url)
|
||||
|
||||
# Wait for all downloads to finish
|
||||
download_queue.join()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user