Compare commits

...

15 Commits

10 changed files with 469 additions and 37 deletions

View File

@ -0,0 +1,51 @@
param(
[Parameter(Mandatory=$true)]
[string]$imageName
)
# Save the Docker image as a tarball
$imageTar = "$imageName.tar"
docker save -o $imageTar $imageName
# Create a temporary directory to extract layers
$tempDir = [System.IO.Path]::GetTempPath() + [System.Guid]::NewGuid().ToString()
New-Item -ItemType Directory -Force -Path $tempDir
# Extract the image tarball using 'tar' command
tar -xf $imageTar -C $tempDir
# Function to list files in a layer
function ListLayerFiles {
param (
[string]$layerPath
)
$layerTar = Get-ChildItem -Path $layerPath -Filter "*.tar" -File
$layerDir = [System.IO.Path]::Combine($tempDir, [System.IO.Path]::GetRandomFileName())
New-Item -ItemType Directory -Force -Path $layerDir
# Extract the layer tarball using 'tar'
tar -xf $layerTar.FullName -C $layerDir
# List files in the layer
Write-Host "Files in layer:"
Get-ChildItem -Path $layerDir -Recurse
}
# List all layers and ask user to choose one
$layerDirs = Get-ChildItem -Path $tempDir -Directory
$layerDirs | ForEach-Object -Begin { $i = 0 } -Process {
Write-Host "[$i]: $($_.Name)"
$i++
}
$userChoice = Read-Host "Enter the index of the layer to list"
if ($userChoice -lt $layerDirs.Count -and $userChoice -ge 0) {
ListLayerFiles -layerPath $layerDirs[$userChoice].FullName
} else {
Write-Host "Invalid index selected."
}
# Cleanup
Remove-Item -Path $imageTar
Remove-Item -Path $tempDir -Recurse

104
comment_fs/comments_fs.py Normal file
View File

@ -0,0 +1,104 @@
#!/usr/bin/env python3
import os
import stat
import errno
import fuse
from time import time
import json
from collections import defaultdict
fuse.fuse_python_api = (0, 2)
class MyStat(fuse.Stat):
def __init__(self):
self.st_mode = stat.S_IFDIR | 0o755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class CommentFS(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
with open('comments.jsonl', 'r', encoding='utf-8') as f:
self.comments = [json.loads(line) for line in f]
self.tree = self.build_comment_tree(self.comments)
self.files = {}
self.build_file_structure()
def build_comment_tree(self, comments):
tree = defaultdict(list)
for comment in comments:
parent = comment['parent'] if comment['parent'] != 'root' else ''
tree[parent].append(comment)
return tree
def build_file_structure(self):
def add_comment(comment, path):
comment_path = os.path.join(path, comment['id'])
self.files[comment_path] = comment
for reply in self.tree.get(comment['id'], []):
add_comment(reply, comment_path)
for comment in self.tree['']:
add_comment(comment, '/')
def getattr(self, path):
st = MyStat()
st.st_atime = int(time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
if path == '/':
return st
elif path in self.files:
st.st_mode = stat.S_IFREG | 0o444
st.st_nlink = 1
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
st.st_size = len(content.encode('utf-8'))
return st
else:
return -errno.ENOENT
def readdir(self, path, offset):
dirents = ['.', '..']
if path == '/':
dirents.extend(comment['id'] for comment in self.tree[''])
elif path[1:] in [comment['id'] for comment in self.comments]:
dirents.extend(reply['id'] for reply in self.tree.get(path[1:], []))
for r in dirents:
yield fuse.Direntry(r)
def open(self, path, flags):
if path not in self.files:
return -errno.ENOENT
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
return 0
def read(self, path, size, offset):
if path not in self.files:
return -errno.ENOENT
comment = self.files[path]
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
return content.encode('utf-8')[offset:offset+size]
def main():
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()

117
comment_fs/comments_fs2.py Normal file
View File

@ -0,0 +1,117 @@
#!/usr/bin/env python3
import os
import stat
import errno
import fuse
from time import time
import json
from collections import defaultdict
fuse.fuse_python_api = (0, 2)
class MyStat(fuse.Stat):
def __init__(self):
self.st_mode = stat.S_IFDIR | 0o755
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class CommentFS(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
with open('comments.jsonl', 'r', encoding='utf-8') as f:
self.comments = [json.loads(line) for line in f]
self.tree = self.build_comment_tree(self.comments)
self.files = {}
self.directories = set()
self.build_file_structure()
def build_comment_tree(self, comments):
tree = defaultdict(list)
for comment in comments:
parent = comment['parent'] if comment['parent'] != 'root' else ''
tree[parent].append(comment)
return tree
def build_file_structure(self):
def add_comment(comment, path):
comment_path = os.path.join(path, comment['id'])
self.files[comment_path] = comment
if comment['id'] in self.tree:
self.directories.add(comment_path)
parent_file_path = os.path.join(comment_path, 'parent')
self.files[parent_file_path] = comment
for reply in self.tree[comment['id']]:
add_comment(reply, comment_path)
for comment in self.tree['']:
add_comment(comment, '/')
def getattr(self, path):
st = MyStat()
st.st_atime = int(time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
if path == '/' or path in self.directories:
st.st_mode = stat.S_IFDIR | 0o755
return st
elif path in self.files:
st.st_mode = stat.S_IFREG | 0o444
st.st_nlink = 1
content = f"ID: {self.files[path]['id']}\nText: {self.files[path]['text']}\nParent: {self.files[path]['parent']}\n"
st.st_size = len(content.encode('utf-8'))
return st
else:
return -errno.ENOENT
def readdir(self, path, offset):
dirents = ['.', '..']
if path == '/':
dirents.extend(comment['id'] for comment in self.tree[''])
elif path in self.directories:
dirents.append('parent')
dirents.extend(reply['id'] for reply in self.tree[path.split('/')[-1]])
for r in dirents:
yield fuse.Direntry(r)
def open(self, path, flags):
if path not in self.files:
return -errno.ENOENT
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
return 0
def read(self, path, size, offset):
if path not in self.files:
return -errno.ENOENT
comment = self.files[path]
content = f"ID: {comment['id']}\nText: {comment['text']}\nParent: {comment['parent']}\n"
return content.encode('utf-8')[offset:offset+size]
def main():
usage = "YouTubeCommentFS: A filesystem to browse YouTube comments"
server = CommentFS(version="%prog " + fuse.__version__, usage=usage, dash_s_do='setsingle')
server.parser.add_option(mountopt="uid", metavar="UID", default=os.getuid(),
help="Set the owner of the mounted filesystem")
server.parser.add_option(mountopt="gid", metavar="GID", default=os.getgid(),
help="Set the group of the mounted filesystem")
server.multithreaded = False
server.allow_other = True
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,37 @@
#!/usr/bin/env python3
import json
from collections import defaultdict
def build_comment_tree(comments):
tree = defaultdict(list)
root_comments = []
print(f"Total comments: {len(comments)}") # Debug info
for comment in comments:
if comment['parent'] == "root":
root_comments.append(comment)
else:
tree[comment['parent']].append(comment)
print(f"Root comments: {len(root_comments)}") # Debug info
def build_subtree(comment):
return {
#"id": comment['id'],
"text": comment['text'],
"replies": [build_subtree(reply) for reply in tree[comment['id']]]
}
return [build_subtree(comment) for comment in root_comments]
with open('comments.jsonl', 'r', encoding='utf-8') as f:
comments = [json.loads(line) for line in f]
comment_tree = build_comment_tree(comments)
print(f"Final tree length: {len(comment_tree)}") # Debug info
with open('comment_tree.json', 'w') as f:
json.dump(comment_tree, f, ensure_ascii=False, indent=2)

View File

@ -2,60 +2,42 @@
import argparse
def unique_combined_list(*inputs):
# Combine lists
def flatten_and_split(input_string):
elements = input_string.replace("\n", "").split(",")
flat_list = [item.strip() for element in elements for item in element.split("-")]
return ",".join(flat_list)
def combine_and_uniquify(*inputs):
combined_list = [
item.strip().title() for input_list in inputs for item in input_list.split(",")
]
unique_names = set(combined_list)
# Create an empty list to store the final unique names
final_list = []
# Check for reversed names
for name in combined_list:
final_set = set()
for name in unique_names:
parts = name.split()
# If the name has two words, check for its reversed variant
if len(parts) == 2:
first, last = parts
reversed_name = f"{last} {first}"
# If neither the name nor its reversed variant is in the final list, add the name
if name not in final_list and reversed_name not in final_list:
final_list.append(name)
# If it's a single-word name, simply add it if it's not in the final list
# Add the name if its reversed variant is not already in the final set
if reversed_name not in final_set:
final_set.add(name)
else:
if name not in final_list:
final_list.append(name)
final_set.add(name)
# Sort the list
sorted_list = sorted(final_list)
# Convert the list back to a comma-separated string
output = ",".join(sorted_list)
return output
return ",".join(sorted(final_set))
def main():
# Create an argument parser
parser = argparse.ArgumentParser(
description="Combine multiple comma-separated lists into one unique sorted list."
)
# Add a variable number of input lists
parser.add_argument("lists", nargs="+", type=str, help="Comma-separated lists.")
# Parse the arguments
args = parser.parse_args()
# If only one list is provided, use it twice
if len(args.lists) == 1:
args.lists.append(args.lists[0])
# Get the unique combined list
result = unique_combined_list(*args.lists)
processed_lists = [flatten_and_split(lst) for lst in args.lists]
result = combine_and_uniquify(*processed_lists)
print(result)

2
setvolumeto50.sh Executable file
View File

@ -0,0 +1,2 @@
#!/usr/bin/env bash
amixer set Master,0 50% unmute

View File

@ -0,0 +1,43 @@
import json
import argparse
from datetime import datetime, timezone
def process_json_file(filename):
# Read JSON data from file
with open(filename, "r") as file:
data = json.load(file)
# Process each game
for game in data["response"]["games"]:
# Convert playtime from minutes to hours
game["playtime_forever"] /= 60
# Convert Unix timestamp to readable date and time in UTC
if game["rtime_last_played"] != 0:
game["rtime_last_played"] = datetime.fromtimestamp(
game["rtime_last_played"], timezone.utc
).strftime("%Y-%m-%d %H:%M:%S")
else:
game["rtime_last_played"] = "Not Played"
# Return the modified data
return data
def main():
parser = argparse.ArgumentParser(
description="Process a JSON file containing game data."
)
parser.add_argument("filename", help="JSON file to be processed")
args = parser.parse_args()
# Process the JSON file
modified_data = process_json_file(args.filename)
# Print the modified data
print(json.dumps(modified_data, indent=4))
if __name__ == "__main__":
main()

View File

@ -1,7 +1,8 @@
#!/run/current-system/sw/bin/bash
#!/usr/bin/env bash
youtube-dl "$1" \
yt-dlp "$1" \
--download-archive "~/Videos/YT/history.conf" \
--prefer-free-formats\
--write-description \
--output "~/Videos/YT/%(uploader)s/%(upload_date)s - %(title)s.%(ext)s"
--cookies-from-browser vivaldi

45
yt-dlp-ntfy.sh Executable file
View File

@ -0,0 +1,45 @@
#!/bin/bash
set -ueo pipefail
# Define PID file location
PID_FILE="/tmp/yt-dlp-ntfy.pid"
# Check if the script is already running
if [ -e "$PID_FILE" ] && kill -0 $(cat "$PID_FILE") &>/dev/null; then
echo "Script is already running as PID $(cat $PID_FILE). Exiting."
exit 1
fi
# Store the PID of the current process
echo $$ > "$PID_FILE"
echo "Running as PID $(cat $PID_FILE)"
# Define ntfy server and channel
NTFY_SERVER="https://notify.kucharczyk.xyz"
CHANNEL="clipboard"
ACCESS_TOKEN="$NTFY_ACCESS_TOKEN"
echo "Monitoring channel $CHANNEL of server $NTFY_SERVER"
# Run the script in an infinite loop to listen for new messages
while true; do
while read -r message; do
event=$(echo "$message" | jq -r '.event')
[[ "$event" == "keepalive" ]] && continue
video_url=$(echo "$message" | jq -r '.message')
if [[ $video_url =~ ^https?:// ]]; then
echo "Downloading video from $video_url"
yt-dlp "$video_url"
curl -s -H "Authorization: Bearer $ACCESS_TOKEN" -d "Finished downloading." "$NTFY_SERVER/$CHANNEL"
fi
done
# Wait a bit before reconnecting if the connection drops
echo "Reconnecting..."
sleep 5
done < <(curl --no-buffer -s -H "Authorization: Bearer $ACCESS_TOKEN" "$NTFY_SERVER/$CHANNEL/json")
# Cleanup PID file on script exit
trap 'rm -f $PID_FILE' EXIT

50
ytdlqueue.py Normal file
View File

@ -0,0 +1,50 @@
#!/usr/bin/env python3
import subprocess
import sys
import time
from queue import Queue
from threading import Thread
# Queue to hold the URLs
download_queue = Queue()
def download_video(url):
"""
Function to download a video using youtube-dl.
"""
try:
print(f"Downloading {url}...")
subprocess.run(["yt-dlp", url], check=True)
print(f"Finished downloading {url}")
except subprocess.CalledProcessError as e:
print(f"Failed to download {url}: {e}")
def worker():
"""
Worker function to process items in the queue.
"""
while True:
url = download_queue.get()
download_video(url)
download_queue.task_done()
def main():
# Start the worker thread
thread = Thread(target=worker)
thread.daemon = True
thread.start()
print("Enter URLs to download. Type 'exit' to quit.")
while True:
url = input("URL: ").strip()
if url.lower() == 'exit':
break
download_queue.put(url)
# Wait for all downloads to finish
download_queue.join()
if __name__ == "__main__":
main()