# -*- coding: utf-8 -*-

import httplib2, re, json, logging, os, tempfile

from datetime import datetime, timedelta
from lxml import html
from lxml.etree import XMLSyntaxError

from epguide.data_formats import Channel, ChannelDict, Event, EventList

from roman import fromRoman, RomanError

MAX_TRIES = 3

DAY = timedelta(days=1)
MIN = timedelta(minutes=1)

XROOT = '/html/div/'

class CpNgParser(object):
    """
    Parser pobierajacy dane z portalu Cyfry Plus
    """
    base = "http://cyfraplus.pl/"
    base_url = base + "program.html"
    icon_url = base + "local/img/channel/large/%s.png"
    schedule_url = base + "page/schedule/broadcasts.json?date=%s&channels=%s"
    details_url = base + "%s,broadcast,view-schedule.ajax"
    encoding = "UTF-8"
    channels = None
    categories = None

    tag = 'cplus'
    language = 'pl'
    info_name = "Cyfra+"
    info_url = base
    data_url = base_url

    headers = {
        'Connection': 'keep-alive'
    }

    res = {
        'channel': re.compile("""
                "(?P<icon_id>[A-Z0-9]{3})",     # id ikony
                \s*"(?P<name>[^"]+)"\s*,        # nazwa
                \s*"(?P<type>[A-Z]{3})"\s*,     # typ {DOK, DZI, FIL, INF, POL, ROZ, SPO}
                \s*"(?P<language>[a-z]+)"\s*,   # język
                \s*"(?P<number>\d+)"\s*,        # numer na dekoderze
                \s*"(?P<id>[A-Z0-9]{3})"        # id programu
            """, re.VERBOSE),
        'category': re.compile("""
                "(?P<id>[A-Z0-9]{2})",          # id kategorii
                \s*"(?P<name>[^"]+)"\s*,        # nazwa
                \s*"(?P<group>[A-Z0-9]{2})"     # grupa
            """, re.VERBOSE),
        'updates': {
            'title': re.compile("""
                    ^
                    (?P<title>.+?)                  # tytuł
                    ((?=.*odc\.\s)\s                # jeśli odcinek...
                    (?P<season>([0-9]{1,3})         # ...to może sezon...
                    |(X{0,3})(IX|IV|V?I{0,3})))?    # ...może nawet rzymski
                    (:\s|$)                         # po tytule
                    (?P<subtitle>(.(?!dc.\s|:\s))+?)? # podtytuł / tytuł odcinka
                    (,\s)?                          # po podtytule
                    (odc.\s(?P<episode>[0-9]+)      # odcinek
                    (/(?P<episode_count>[0-9]+))?)? # ilość odcinków
                    $
                """, re.IGNORECASE | re.VERBOSE),
            'info': re.compile("""
                    ^((
                    (?P<year>[0-9]{4}(-[0-9]{4})?)  # rok produkcji
                    |(?P<runtime>[0-9]+)\smin       # czas trwania
                    |(?P<countries>[^,]+)           # produkcja
                    )(,\s|$))+$                     # rozdzielone przecinkami
                """, re.IGNORECASE | re.VERBOSE)
        }
    }
    
    xpaths = {
        'script': '//script[contains(.,"channels.push")]',
        'singlevalue': {
            'original_title': XROOT + 'small/preceding-sibling::h3',
            'category': XROOT + 'small[1]',
            'info': XROOT + 'small[2]',
            'description': XROOT + 'blockquote',
        },
        'multivalue': {
            'directors': XROOT + u'a[preceding::h3[contains(., "reżyseria")] and following::h3[contains(.,"wykonawcy")]]',
            'actors': XROOT + 'a[preceding::h3[contains(., "wykonawcy")] and following::h3[contains(.,"emisja")]]',
        },
        'directors_label': XROOT + u'h3[contains(., "reżyseria")]',
        'actors_label': XROOT + 'h3[contains(., "wykonawcy")]',
        'text': XROOT + 'text()',
    }
    
    ints =  ('season', 'episode', 'episode_count', 'runtime')
    
    renames = {
        'thumb': 'thumb_url',
        'props': 'properties'
    }
    
    cleanups = ('id', 'info', 'directors_elements', 'actors_elements', 'time')

    def Init(self):
        self.http = httplib2.Http(os.path.join(tempfile.gettempdir(), 'httplib2.cplus'))
        self.log = logging.getLogger("epguide")
        i = 0
        while i < MAX_TRIES:
            i += 1
            try:
                resp, content = self.http.request(self.base_url)
            except Exception, e:
                self.log.warn('TCP error: %s Status: %s' % (self.base_url, e))
                continue
            if resp['status'] != '200':
                self.log.warn('HTTP: %s Status: %s' % (self.base_url, resp['status']))
                continue

            try:
                tree = html.fromstring(content.decode(self.encoding))
            except (IOError, XMLSyntaxError), e:
                self.log.warn('HTML Error: %s %s' % (e, self.base_url))
                continue
            else:
                break

        script = tree.xpath(self.xpaths['script'])[0].text
        self.channels = \
            ChannelDict(self,
                        [(m.group('id'),
                         Channel(m.group('name'),
                                 m.group('id'),
                                 language=m.group('language'),
                                 icon_url=self.icon_url % m.group('icon_id'),
                                 type=m.group('type'),
                                 number=int(m.group('number'))
                                )) for m in self.res['channel'].finditer(script)]).sorted()
        self.categories = {m.group('id'): m.group('name').lower() \
                           for m in self.res['category'].finditer(script)}

    def Finish(self):
        pass

    def GetChannelList(self):
        return self.channels
    
    def GetGuide(self, date, channel_id):
        """
        pobiera informacje ze strony oraz parsuje dane, zwraca liste elementow
        klasy Event
        """
        events = EventList(self, date, self.channels[channel_id])

        guide_url = self.schedule_url % (date.strftime('%Y-%m-%d'), channel_id)

        tries = 0
        while tries < MAX_TRIES:
            tries += 1
            try:
                resp, content = self.http.request(guide_url, headers=self.headers)
            except Exception, e:
                self.log.warn('TCP error: %s %s' % (guide_url, e))
                continue
            if resp['status'] != '200':
                self.log.warn('HTTP error: %s Status: %s' % (guide_url, resp['status']))
                continue

            try:
                data = json.loads(content)
            except ValueError, e:
                self.log.warn('JSON error: %s %s\n' % (e, guide_url))
                continue
            else:
                break

        if not data:
            # brak programów dla tej daty
            return events

        try:
            schedule = data[channel_id]
        except (KeyError, TypeError), e:
            self.log.warn('No data: %s %s\n' % (e, self.schedule_url % (date.strftime('%Y-%m-%d'),
                                                                        channel_id)))
            # brak programów dla tej daty
            return events

        for element in schedule:
            element['categories'] = []

            # kategoria podstawowa 
            if element['category']:
                element['categories'].append(self.categories[element['category']])
            element.pop('category')
            
            # kategoria w polu info
            if element['info']:
                element['categories'].append(element['info'])
            element.pop('info')

            # szczegóły programu
            tries = 0
            details_url = self.details_url % element['id']
            while tries < MAX_TRIES:
                tries += 1
                try:
                    resp, content = self.http.request(details_url, headers=self.headers)
                except Exception, e:
                    self.log.warn('Connection error: %s %s' % (details_url, e))
                    continue
                if resp['status'] != '200':
                    self.log.warn("HTTP error: %s Status: %s" % (details_url, resp['status']))
                    continue

                try:
                    h = html.fromstring(content.decode(self.encoding))
                except (IOError, XMLSyntaxError), e:
                    self.log.warn('HTML Error: %s %s' % (e, details_url))
                    continue
                else:
                    break

            if h is not None:
                # wczytanie informacji z html
                for k, v in self.xpaths['singlevalue'].items():
                    e = h.xpath(v)
                    if e and e[0].text:
                        if k == 'category':
                            element['categories'].append(e[0].text)
                        else:
                            element[k] = e[0].text            
    
                # jak wyżej, dane wielowartościowe
                for k, v in self.xpaths['multivalue'].items():
                    e = h.xpath(v)
                    element[k+'_elements'] = e + h.xpath(self.xpaths[k+'_label'])
                    e = h.xpath(v+'/text()')
                    element[k] = e
    
                # osierocony tekst - nazwiska autorów
                for e in h.xpath(self.xpaths['text']):
                    text = e.strip(', ')
                    if not text:
                        continue
                    if e.getparent() in element['directors_elements']:
                        element['directors'].extend(text.split(', '))
                    elif e.getparent() in element['actors_elements']:
                        element['actors'].extend(text.split(', '))

            # dodatkowe przetwarzanie informacji
            for k, v in self.res['updates'].items():
                try:
                    m = v.match(element[k])
                    if m:
                        element.update(m.groupdict())
                    else:
                        self.log.error("Unmatched update %s: %s" % (k, element[k]))
                except KeyError:
                    pass

            # czas rozpoczęcia
            element['time_start'] = datetime.fromtimestamp(element['time'])

            # czyszczenie podtytułu
            if element['subtitle']:
                element['subtitle'] = element['subtitle'].strip('-, \n')

            # produkcja rozdzielona ukośnikami
            try:
                element['countries'] = element['countries'] and element['countries'].split('/')
            except KeyError:
                pass
            
            # wartości dziesiętne
            for k in self.ints:
                try:
                    element[k] = element[k] and int(element[k])
                except ValueError:
                    element[k] = None
                except KeyError:
                    pass
            
            # zmiana nazwy
            for k, v in self.renames.iteritems():
                try:
                    element[v] = element[k]
                    del element[k]
                except KeyError:
                    pass

            # kategorie unikalne, z zachowaniem kolejności
            seen = set()
            element['categories'] = [e for e in element['categories'] if e not in seen and not seen.add(e)]

            # formatowanie podtytułu
            if element['episode']:
                subtitle = 'odc. %d' % element['episode']
                if element['season'] > 1:
                    subtitle = 'seria %d, %s' % (element['season'], subtitle)
                if not element['season'] and element['episode_count']:
                    subtitle = '%s/%d' % (subtitle, element['episode_count'])
                if element['subtitle']:
                    subtitle = '%s: %s' % (subtitle, element['subtitle'])
                element['subtitle'] = subtitle
            
            # czyszczenie słownika
            for k in self.cleanups:
                try:
                    del element[k]
                except KeyError:
                    pass
            
            events.append(Event(**element))
        return events
