Speed updatabase generation. Generate and use a certificate by default.
This commit is contained in:
124
media/cache.py
124
media/cache.py
@@ -1,10 +1,12 @@
|
||||
#
|
||||
#
|
||||
# Bragi - A Mumble music bot
|
||||
# Forked from botamusique by azlux (https://github.com/azlux/botamusque)
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import multiprocessing
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
|
||||
import json
|
||||
import threading
|
||||
@@ -23,6 +25,29 @@ class ItemNotCachedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _process_file_for_cache(file_path):
|
||||
"""Worker function to process a single file for the cache.
|
||||
This must be a module-level function for multiprocessing to work.
|
||||
|
||||
Args:
|
||||
file_path: Relative path to the audio file
|
||||
|
||||
Returns:
|
||||
dict: Music item dictionary ready for database insertion, or None on error
|
||||
"""
|
||||
try:
|
||||
# Import inside function to avoid pickling issues
|
||||
import variables as var
|
||||
from media.item import item_builders
|
||||
|
||||
item = item_builders['file'](path=file_path)
|
||||
return item.to_dict()
|
||||
except Exception as e:
|
||||
# Log errors but don't fail the whole process
|
||||
logging.getLogger("bot").warning(f"library: failed to process file {file_path}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
class MusicCache(dict):
|
||||
def __init__(self, db: MusicDatabase):
|
||||
super().__init__()
|
||||
@@ -115,27 +140,90 @@ class MusicCache(dict):
|
||||
|
||||
def build_dir_cache(self):
|
||||
self.dir_lock.acquire()
|
||||
self.log.info("library: rebuild directory cache")
|
||||
files = util.get_recursive_file_list_sorted(var.music_folder)
|
||||
try:
|
||||
self.log.info("library: rebuild directory cache")
|
||||
files_list = util.get_recursive_file_list_sorted(var.music_folder)
|
||||
files_on_disk = set(files_list) # Convert to set for O(1) lookup
|
||||
|
||||
# remove deleted files
|
||||
results = self.db.query_music(Condition().or_equal('type', 'file'))
|
||||
for result in results:
|
||||
if result['path'] not in files:
|
||||
self.log.debug("library: music file missed: %s, delete from library." % result['path'])
|
||||
self.db.delete_music(Condition().and_equal('id', result['id']))
|
||||
self.log.info(f"library: found {len(files_on_disk)} audio files on disk")
|
||||
|
||||
# Get all existing file paths from database as a set
|
||||
db_paths = set(self.db.query_all_paths())
|
||||
self.log.info(f"library: found {len(db_paths)} files in database")
|
||||
|
||||
# Find files to delete (in DB but not on disk)
|
||||
files_to_delete = db_paths - files_on_disk
|
||||
if files_to_delete:
|
||||
self.log.info(f"library: removing {len(files_to_delete)} deleted files from database")
|
||||
for path in files_to_delete:
|
||||
self.log.debug(f"library: music file missed: {path}, delete from library.")
|
||||
self.db.delete_music(Condition().and_equal('path', path))
|
||||
|
||||
# Find new files to add (on disk but not in DB)
|
||||
new_files = files_on_disk - db_paths
|
||||
if not new_files:
|
||||
self.log.info("library: no new files to add")
|
||||
self.db.manage_special_tags()
|
||||
return
|
||||
|
||||
self.log.info(f"library: processing {len(new_files)} new files with parallel workers")
|
||||
|
||||
# Determine number of worker processes from config
|
||||
# 0 = auto (cpu_count - 1), N = use N workers
|
||||
configured_workers = var.config.getint('bot', 'rebuild_workers', fallback=0)
|
||||
if configured_workers == 0:
|
||||
# Auto mode: use all cores minus one (leave one free for audio/system)
|
||||
num_workers = max(1, multiprocessing.cpu_count() - 1)
|
||||
self.log.info(f"library: auto-detected {multiprocessing.cpu_count()} cores, using {num_workers} workers")
|
||||
else:
|
||||
files.remove(result['path'])
|
||||
# User specified: validate minimum of 1
|
||||
num_workers = max(1, configured_workers)
|
||||
if num_workers == 1:
|
||||
self.log.info("library: using 1 worker (sequential processing)")
|
||||
else:
|
||||
self.log.info(f"library: using {num_workers} workers (configured)")
|
||||
|
||||
for file in files:
|
||||
results = self.db.query_music(Condition().and_equal('path', file))
|
||||
if not results:
|
||||
item = item_builders['file'](path=file)
|
||||
self.log.debug("library: music save into database: %s" % item.format_debug_string())
|
||||
self.db.insert_music(item.to_dict())
|
||||
|
||||
self.db.manage_special_tags()
|
||||
self.dir_lock.release()
|
||||
# Process files in parallel
|
||||
processed_items = []
|
||||
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
||||
# Submit all files for processing
|
||||
future_to_file = {executor.submit(_process_file_for_cache, file_path): file_path
|
||||
for file_path in new_files}
|
||||
|
||||
# Collect results as they complete
|
||||
completed = 0
|
||||
for future in as_completed(future_to_file):
|
||||
file_path = future_to_file[future]
|
||||
try:
|
||||
result = future.result()
|
||||
if result:
|
||||
processed_items.append(result)
|
||||
completed += 1
|
||||
if completed % 100 == 0:
|
||||
self.log.info(f"library: processed {completed}/{len(new_files)} files")
|
||||
except Exception as e:
|
||||
self.log.warning(f"library: failed to process {file_path}: {e}")
|
||||
|
||||
self.log.info(f"library: successfully processed {len(processed_items)} files")
|
||||
|
||||
# Batch insert all new items into database
|
||||
if processed_items:
|
||||
self.log.info(f"library: inserting {len(processed_items)} items into database")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self.db.db_path)
|
||||
try:
|
||||
for item in processed_items:
|
||||
self.db.insert_music(item, _conn=conn)
|
||||
conn.commit()
|
||||
self.log.info("library: database batch insert completed")
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
self.db.manage_special_tags()
|
||||
self.log.info("library: directory cache rebuild complete")
|
||||
finally:
|
||||
self.dir_lock.release()
|
||||
|
||||
|
||||
class CachedItemWrapper:
|
||||
|
||||
Reference in New Issue
Block a user