#!/usr/bin/env python3
import os
import re
import requests
import xml.etree.ElementTree as ET
from xml.dom import minidom
import datetime

# Registrar el namespace para que se use el prefijo "media"
ET.register_namespace("media", "http://search.yahoo.com/mrss/")

def is_stream_active(url):
    """
    Realiza una petición GET en modo stream para obtener parte del contenido.
    Verifica que el código de respuesta sea 200 y que la primera línea no vacía empiece con "#EXTM3U",
    lo que es característico de un archivo HLS (m3u8).
    """
    try:
        response = requests.get(url, timeout=5, stream=True)
        if response.status_code != 200:
            return False

        for line in response.iter_lines(decode_unicode=True):
            if line:
                # Si por alguna razón la línea sigue siendo bytes, la decodificamos
                if isinstance(line, bytes):
                    line = line.decode('utf-8', errors='replace')
                return line.strip().startswith("#EXTM3U")
        return False
    except requests.RequestException:
        return False

def parse_m3u(file_path):
    """
    Lee el archivo .m3u y extrae para cada entrada el título (tvg-name), el logo (tvg-logo) y la URL.
    Se asume que cada entrada consta de una línea que inicia con "#EXTINF:" seguida de la URL en la siguiente línea.
    """
    streams = []
    with open(file_path, "r", encoding="utf-8") as f:
        lines = f.read().splitlines()
    i = 0
    while i < len(lines):
        line = lines[i]
        if line.startswith("#EXTINF:"):
            name_match = re.search(r'tvg-name="([^"]+)"', line)
            logo_match = re.search(r'tvg-logo="([^"]+)"', line)
            title = name_match.group(1) if name_match else "No Title"
            logo = logo_match.group(1) if logo_match else ""
            if i + 1 < len(lines):
                url = lines[i + 1].strip()
                streams.append({"title": title, "logo": logo, "url": url})
            i += 2
        else:
            i += 1
    return streams

def generate_rss(active_streams):
    """
    Genera el XML RSS utilizando xml.etree.ElementTree y lo retorna como cadena formateada (pretty XML)
    con el formato idéntico al archivo guía.
    """
    rss = ET.Element("rss", {"version": "2.0"})
    channel = ET.SubElement(rss, "channel")
    
    # Configurar el canal según el archivo guía
    ET.SubElement(channel, "title").text = "Tilingo"
    ET.SubElement(channel, "link")
    ET.SubElement(channel, "description").text = "Feed on video content for testing channel applications built on the Roku developer platform"
    ET.SubElement(channel, "language").text = "en-us"
    ET.SubElement(channel, "pubDate").text = "Mon, 10 Mar 2025 23:38:55 GMT"

    image = ET.SubElement(channel, "image")
    ET.SubElement(image, "title").text = "Tilingo"
    ET.SubElement(image, "url").text = "http://axotv.com/logo.png"
    ET.SubElement(image, "width").text = "-1"
    ET.SubElement(image, "height").text = "-1"

    # Agregar cada stream activo como un <item>
    for stream in active_streams:
        item = ET.SubElement(channel, "item")
        ET.SubElement(item, "title").text = stream["title"]
        ET.SubElement(item, "link").text = stream["url"]
        ET.SubElement(item, "description")
        ET.SubElement(item, "pubDate")
        ET.SubElement(item, "guid", {"isPermaLink": "false"})
        
        media_content = ET.SubElement(item, "{http://search.yahoo.com/mrss/}content", {
            "url": stream["url"],
            "type": "stream/hls",
            "channels": "",
            "bitrate": "",
            "duration": "",
            "fileSize": "",
            "framerate": "",
            "height": "",
            "width": "",
            "isDefault": "true"
        })
        ET.SubElement(media_content, "{http://search.yahoo.com/mrss/}description")
        ET.SubElement(media_content, "{http://search.yahoo.com/mrss/}keywords")
        ET.SubElement(media_content, "{http://search.yahoo.com/mrss/}thumbnail", {"url": stream["logo"]})
        ET.SubElement(media_content, "{http://search.yahoo.com/mrss/}title").text = stream["title"]

    rough_string = ET.tostring(rss, encoding="utf-8")
    reparsed = minidom.parseString(rough_string)
    pretty_xml = reparsed.toprettyxml(indent="  ", encoding="UTF-8")
    return pretty_xml

def update_log(log_file, active_count):
    """
    Actualiza el archivo de log con la fecha y hora actual y la cantidad de streams activos,
    manteniendo solo las últimas 10 ejecuciones.
    """
    now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    new_entry = f"{now} - Streams activos: {active_count}\n"
    entries = []
    if os.path.exists(log_file):
        with open(log_file, "r", encoding="utf-8") as f:
            entries = f.readlines()
    entries.append(new_entry)
    if len(entries) > 10:
        entries = entries[-10:]
    with open(log_file, "w", encoding="utf-8") as f:
        f.writelines(entries)

def main():
    script_dir = os.path.dirname(os.path.abspath(__file__))
    m3u_file = os.path.join(script_dir, "parejas.m3u")
    output_file = "/home/migosu/disco/web/servidor-mitilingo/programacion/parejas.rss"
    log_file = os.path.join(script_dir, "parejas.log")

    streams = parse_m3u(m3u_file)
    active_streams = []
    for stream in streams:
        if is_stream_active(stream["url"]):
            active_streams.append(stream)
        else:
            print(f"Stream not active: {stream['title']} - {stream['url']}")
    
    rss_content = generate_rss(active_streams)
    with open(output_file, "wb") as f:
        f.write(rss_content)
    print(f"RSS file generated at {output_file}")

    update_log(log_file, len(active_streams))
    print(f"Log updated at {log_file}")

if __name__ == "__main__":
    main()
