Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,14 @@ STASHCAST_USER_TOKEN=608AF9E5-E989-4729-9C05-7FFB6EA86FE4
# Optional: Slug generation settings
# STASHCAST_SLUG_MAX_WORDS=6
# STASHCAST_SLUG_MAX_CHARS=40

# Optional: Summarization settings
# Number of sentences in summary (set to 0 to disable summarization)
# STASHCAST_SUMMARY_SENTENCES=8

# Summarization backend: 'extractive' (default, fast, CPU-only) or 'ollama' (better quality, requires Ollama)
# STASHCAST_SUMMARIZER=extractive

# Ollama settings (only used when STASHCAST_SUMMARIZER=ollama)
# STASHCAST_OLLAMA_HOST=http://localhost:11434
# STASHCAST_OLLAMA_MODEL=qwen2.5:1.5b
3 changes: 3 additions & 0 deletions media/context_processors.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,13 @@

from django.conf import settings

from media.service.ollama import get_summarizer_status


def stashcast_settings(request):
"""Make StashCast settings available to all templates."""
return {
'user_token': settings.STASHCAST_USER_TOKEN,
'require_user_token_for_feeds': settings.REQUIRE_USER_TOKEN_FOR_FEEDS,
'summarizer_status': get_summarizer_status(),
}
80 changes: 80 additions & 0 deletions media/management/commands/check_ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""
Django management command to check Ollama configuration and status.

Usage:
./manage.py check_ollama
"""

from django.conf import settings
from django.core.management.base import BaseCommand

from media.service.ollama import get_ollama_status, get_summarizer_status


class Command(BaseCommand):
help = 'Check Ollama configuration and availability for summarization'

def handle(self, *args, **options):
self.stdout.write('\n=== Summarization Configuration ===\n')

# Show current settings
summarizer = settings.STASHCAST_SUMMARIZER
sentences = settings.STASHCAST_SUMMARY_SENTENCES
ollama_host = settings.STASHCAST_OLLAMA_HOST
ollama_model = settings.STASHCAST_OLLAMA_MODEL

self.stdout.write(f'STASHCAST_SUMMARIZER: {summarizer}')
self.stdout.write(f'STASHCAST_SUMMARY_SENTENCES: {sentences}')
self.stdout.write(f'STASHCAST_OLLAMA_HOST: {ollama_host}')
self.stdout.write(f'STASHCAST_OLLAMA_MODEL: {ollama_model}')

self.stdout.write('\n=== Status ===\n')

# Get overall status
status = get_summarizer_status()
self.stdout.write(f"Mode: {status['mode']}")
self.stdout.write(f"Status: {status['status']}")
self.stdout.write(f"Message: {status['message']}")

if summarizer == 'ollama':
self.stdout.write('\n=== Ollama Details ===\n')

ollama_status = get_ollama_status()

if ollama_status.available:
self.stdout.write(self.style.SUCCESS('Ollama service: Running'))
else:
self.stdout.write(self.style.ERROR('Ollama service: Not reachable'))
self.stdout.write(f' Error: {ollama_status.error}')
self.stdout.write('\n To start Ollama, run: ollama serve')

if ollama_status.model_loaded:
self.stdout.write(self.style.SUCCESS(f'Model {ollama_model}: Available'))
elif ollama_status.available:
self.stdout.write(self.style.WARNING(f'Model {ollama_model}: Not found'))
self.stdout.write(f'\n To pull the model, run: ollama pull {ollama_model}')

if ollama_status.ready:
self.stdout.write(
self.style.SUCCESS('\nOllama is ready for summarization!')
)
else:
self.stdout.write(
self.style.ERROR('\nOllama is NOT ready for summarization.')
)

elif summarizer == 'extractive':
self.stdout.write(
self.style.SUCCESS(
'\nExtractive summarizer (LexRank) is ready. No external service required.'
)
)

if sentences <= 0:
self.stdout.write(
self.style.WARNING(
'\nNote: Summarization is DISABLED (STASHCAST_SUMMARY_SENTENCES=0)'
)
)

self.stdout.write('')
180 changes: 180 additions & 0 deletions media/service/ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
"""Ollama service for LLM-based summarization."""

import urllib.request
import urllib.error
import json
from dataclasses import dataclass
from typing import Optional

from django.conf import settings


@dataclass
class OllamaStatus:
"""Status of Ollama service and model availability."""

available: bool
model_loaded: bool
error: Optional[str] = None

@property
def ready(self) -> bool:
"""Return True if Ollama is available and model is loaded."""
return self.available and self.model_loaded


def get_ollama_status() -> OllamaStatus:
"""
Check if Ollama is running and the configured model is available.

Returns:
OllamaStatus with availability information
"""
host = settings.STASHCAST_OLLAMA_HOST
model = settings.STASHCAST_OLLAMA_MODEL

try:
# Check if Ollama is running by listing models
url = f"{host}/api/tags"
req = urllib.request.Request(url, method='GET')
req.add_header('Content-Type', 'application/json')

with urllib.request.urlopen(req, timeout=5) as response:
data = json.loads(response.read().decode('utf-8'))

# Check if the configured model is available
available_models = [m.get('name', '') for m in data.get('models', [])]

# Model names in Ollama can be with or without :latest tag
model_base = model.split(':')[0]
model_found = any(
m == model or m.startswith(f"{model_base}:") or m == f"{model}:latest"
for m in available_models
)

if model_found:
return OllamaStatus(available=True, model_loaded=True)
else:
return OllamaStatus(
available=True,
model_loaded=False,
error=f"Model '{model}' not found. Run: ollama pull {model}",
)

except urllib.error.URLError as e:
return OllamaStatus(
available=False,
model_loaded=False,
error=f"Ollama not reachable at {host}: {e.reason}",
)
except Exception as e:
return OllamaStatus(
available=False, model_loaded=False, error=f"Error checking Ollama: {e}"
)


def generate_summary_ollama(text: str, max_sentences: int = 8) -> Optional[str]:
"""
Generate a summary using Ollama.

Args:
text: The full text to summarize (e.g., from subtitles)
max_sentences: Target number of sentences for the summary

Returns:
The generated summary, or None if generation failed
"""
host = settings.STASHCAST_OLLAMA_HOST
model = settings.STASHCAST_OLLAMA_MODEL

# Construct the prompt
prompt = f"""Summarize the following transcript in approximately {max_sentences} sentences.
Focus on the main topics, key points, and any important conclusions discussed.
Write in a clear, informative style suitable for a podcast description.
Do not include phrases like "This transcript discusses" or "The speaker talks about".
Just provide the summary directly.

Transcript:
{text}

Summary:"""

try:
url = f"{host}/api/generate"
payload = json.dumps({
'model': model,
'prompt': prompt,
'stream': False,
'options': {
'temperature': 0.3, # Lower temperature for more focused summaries
'num_predict': 500, # Limit output length
},
}).encode('utf-8')

req = urllib.request.Request(url, data=payload, method='POST')
req.add_header('Content-Type', 'application/json')

with urllib.request.urlopen(req, timeout=120) as response:
data = json.loads(response.read().decode('utf-8'))

summary = data.get('response', '').strip()
return summary if summary else None

except Exception as e:
# Log the error but don't raise - summarization failure shouldn't block anything
print(f"Ollama summarization failed: {e}")
return None


def get_summarizer_status() -> dict:
"""
Get the current summarizer configuration and status for display.

Returns:
Dict with 'mode', 'status', and 'message' keys
"""
summarizer = settings.STASHCAST_SUMMARIZER
sentences = settings.STASHCAST_SUMMARY_SENTENCES

if sentences <= 0:
return {
'mode': 'disabled',
'status': 'disabled',
'message': 'Summarization disabled (STASHCAST_SUMMARY_SENTENCES=0)',
}

if summarizer == 'extractive':
return {
'mode': 'extractive',
'status': 'ready',
'message': f'Extractive (LexRank, {sentences} sentences)',
}

if summarizer == 'ollama':
ollama_status = get_ollama_status()
model = settings.STASHCAST_OLLAMA_MODEL

if ollama_status.ready:
return {
'mode': 'ollama',
'status': 'ready',
'message': f'Ollama ({model})',
}
elif ollama_status.available:
return {
'mode': 'ollama',
'status': 'model_missing',
'message': f'Ollama: model not found ({model})',
}
else:
return {
'mode': 'ollama',
'status': 'unavailable',
'message': 'Ollama: service not running',
}

return {
'mode': 'unknown',
'status': 'error',
'message': f"Unknown summarizer: {summarizer}",
}
Loading