# -*- coding: utf-8 -*-

import utils
import re
from datetime import timedelta

class ProleagueMatchesScraper(object):
    LEAGUE_INFO_PATH = [0,1,3,1,1,1,0]

    FIRST_MATCH_PATH = [0,3,1,1,3,1,5]
    SECOND_MATCH_PATH = [0,3,1,5]

    MATCH_PATHS = [FIRST_MATCH_PATH, SECOND_MATCH_PATH]

    def __init__(self, soup):
        self.soup = soup

    def scrape(self):
        league_text = utils.dive_into_soup(self.soup, self.LEAGUE_INFO_PATH)
        league = league_text[0:league_text.find('&nbsp;')].strip()

        # No league, no matches, no reason to continue...
        if not league:
            return None

        matches = []

        for match_path in self.MATCH_PATHS:
            try:
                match_soup = utils.dive_into_soup(self.soup, match_path)
            except utils.DiveError:
                break

            match_scraper = ProleagueMatchScraper(match_soup, skip_first=False)
            match_results = match_scraper.scrape()

            matches.append(match_results)

        return dict(league=league,matches=matches)

class ProleagueMatchScraper(object):
    STAGE_INFO_PATH = [3,1,1,1,1,0]

    # Paths start from their respective rows and columns
    PLAYER_PATH = [0, 0]
    MAP_PATH = [0, 0]
    MATCH_RESULT_PATH = [1,1]
    TEAM_ONE_PATH = [1, 1, 7, 0, 0]
    TEAM_TWO_PATH = [0, 0]
    VICTORY_ICON_PATH = [0]
    
    PLAYTIME_PATH = [0,1,1,1,5,1,0]
    PLAYER_ONE_START_LOCATION_PATH = [0,1,1,1,1,1,0]
    PLAYER_TWO_START_LOCATION_PATH = [0,1,1,1,9,1]

    # Row with teams and results (if match is complete)
    TEAM_ROW = 7
    TEAM_ONE_COLUMN = 5
    RESULT_COLUMN = 9
    TEAM_TWO_COLUMN = 13

    # Rows with players, maps and results for games (if match is complete)
    GAME_START_ROW = 13
    GAME_ROW_SKIP = 6

    PLAYER_ONE_COLUMN = 5
    PLAYER_ONE_VICTORY_COLUMN = 7
    MAP_COLUMN = 11
    PLAYER_TWO_VICTORY_COLUMN = 15
    PLAYER_TWO_COLUMN = 17

    MAX_NUMBER_OF_GAMES = 7


    MAP_ID_REGEX = re.compile('mapC=map(\d{3,5})')
    PLAYER_ID_REGEX = re.compile('pCode=(\d{2,5})')
    MATCH_RESULT_REGEX = re.compile(u'<strong>(.*) \uc2b9\((\d):(\d)\)</strong>')
    PLAYTIME_REGEX = re.compile(u'(\d{1,2})[분\|]\s*(\d{1,2})초')
    START_LOCATION_REGEX = re.compile(u'(\d{1,2})시')

    def __init__(self, soup, skip_first=True):
        self.soup = soup

        if skip_first:
            self.soup = self.soup.contents[0]

    def scrape(self):
        match = dict(games=[])
        match.update(self._scrape_teams())
        match.update(self._scrape_stage_info())

        for game_number in range(1,self.MAX_NUMBER_OF_GAMES + 1):
            game_info = self._scrape_game(game_number)

            if game_info is None:
                break

            match['games'].append(game_info)

        return match

    def _scrape_teams(self):
        row_soup = self.soup.contents[self.TEAM_ROW]

        # Find the teams
        team_one = utils.dive_into_soup(row_soup,
                [self.TEAM_ONE_COLUMN] + self.TEAM_ONE_PATH)
        team_two = utils.dive_into_soup(row_soup,
                [self.TEAM_TWO_COLUMN] + self.TEAM_TWO_PATH)

        match = {
            'team_one' : unicode(team_one),
            'team_two' : unicode(team_two),
        }

        # Find the results, or no results
        result_text = utils.dive_into_soup(row_soup,
                [self.RESULT_COLUMN] + self.MATCH_RESULT_PATH)

        results = self.MATCH_RESULT_REGEX.findall(unicode(result_text))
        if results:
            results = results[0] # Get first match
            match['winner'] = results[0]
            match['winner_score'] = int(results[1])
            match['loser_score'] = int(results[2])
        else:
            match['winner'] = None
            match['winner_score'] = None
            match['loser_score'] = None

        return match

    def _scrape_stage_info(self):
        stage_info = utils.dive_into_soup(self.soup, self.STAGE_INFO_PATH)
        stage_info = unicode(stage_info)
        stage_info = stage_info.replace(u'&nbsp;일정명 : ', '').strip()
        return dict(stage=unicode(stage_info))

    def _scrape_game(self, game_number):
        row = self.GAME_START_ROW + \
            (self.GAME_ROW_SKIP * (game_number-1))

        try:
            row_soup = self.soup.contents[row]
        except IndexError:
            # The row does not exist, so the game was never played.
            return None

        player_one = \
            self._scrape_player_url(row_soup, self.PLAYER_ONE_COLUMN)
        player_two = \
            self._scrape_player_url(row_soup, self.PLAYER_TWO_COLUMN)

        info = {
            'player_one' : player_one,
            'player_two' : player_two,
            'map' : self._scrape_map_url(row_soup),
        }

        winner = self._scrape_winner(row_soup)
        if winner is None:
            info['winner'] = None
        else:
            info['winner'] = [player_one,player_two][winner]

        # Move to next "row" to get more game info (+2 to skip blank)
        row = row + 2
        row_soup = self.soup.contents[row]

        playtime, player_one_start, player_two_start = self._scrape_game_info(row_soup)
        info['playtime'] = playtime
        info['player_one_start_location'] = player_one_start
        info['player_two_start_location'] = player_two_start

        return info


    def _scrape_game_info(self, row_soup):
        # Playtime
        playtime_text = utils.dive_into_soup(row_soup, self.PLAYTIME_PATH)
        if '00|00' in playtime_text:
            playtime = None
        else:
            minutes, seconds = self.PLAYTIME_REGEX.findall(playtime_text)[0]
            playtime = timedelta(minutes=int(minutes), seconds=int(seconds))

        player_one_start = self._scrape_start_location(
                row_soup, self.PLAYER_ONE_START_LOCATION_PATH)
        player_two_start = self._scrape_start_location(
                row_soup, self.PLAYER_TWO_START_LOCATION_PATH)

        return playtime, player_one_start, player_two_start

    def _scrape_start_location(self, soup, path):
        text = unicode(utils.dive_into_soup(soup, path))
        start = self.START_LOCATION_REGEX.findall(text)


        if start:
            return int(start[0])
        else:
            return None


    def _scrape_map_url(self, row_soup):
        path = [self.MAP_COLUMN] + self.MAP_PATH
        element = utils.dive_into_soup(row_soup, path)

        if not hasattr(element, 'attrs'):
            return None

        url = dict(element.attrs)['href']
        map_ids_found = self.MAP_ID_REGEX.findall(url)

        if map_ids_found:
            return int(map_ids_found[0])
        else:
            return None

    def _scrape_player_url(self, row_soup, column):
        path = [column] + self.PLAYER_PATH
        element = utils.dive_into_soup(row_soup, path)

        if hasattr(element, 'attrs'):
            url = dict(element.attrs)['href']
            return int(self.PLAYER_ID_REGEX.findall(url)[0])
        else:
            return None

    def _scrape_winner(self, row_soup):
        """
        Returns 0 if player one won.
        Returns 1 if player two won.
        Returns None if unknown or no game was played.
        """
        path = [self.PLAYER_ONE_VICTORY_COLUMN] + self.VICTORY_ICON_PATH
        element = utils.dive_into_soup(row_soup, path)

        # No results
        if str(element) == '&nbsp;':
            return None

        # The results are an image
        url = dict(element.attrs)['src']
        if url.find('succ.gif') > -1:
            return 0
        else:
            return 1

class MapScraper(object):
    NAME_PATH = [0, 1, 3, 1, 1, 1, 1, 3, 7, 0]
    VERSION_PATH = [0, 1, 3, 1, 1, 1, 1, 5, 7, 0]
    SIZE_PATH = [0, 1, 3, 1, 1, 1, 1, 7, 7, 0]
    IMAGE_PATH = [0,1,1,1,1,1,1,1,0]

    SIZE_REGEX = re.compile(u'(\d{2,3})\xd7(\d{2,3})')
    IMAGE_REGEX = re.compile(u'/board/file/map/img/(.*).jpg')

    def __init__(self, soup):
        self.soup = soup

    def scrape(self):
        # If the version is not there, then we have a blank page, so it's not a
        # legit map.
        pre_version_elem_path = self.VERSION_PATH[:-1]
        pre_version_elem = utils.dive_into_soup(self.soup, pre_version_elem_path)
        if len(pre_version_elem.contents) == 0:
            return None

        name_elem = utils.dive_into_soup(self.soup, self.NAME_PATH)
        version_elem = utils.dive_into_soup(self.soup, self.VERSION_PATH)
        size_elem = utils.dive_into_soup(self.soup, self.SIZE_PATH)
        image_elem = utils.dive_into_soup(self.soup, self.IMAGE_PATH)

        width, height = self.SIZE_REGEX.findall(size_elem)[0]
        image_filename = self.IMAGE_REGEX.findall(unicode(image_elem))[0] + '.jpg'


        # Remove extraneous nickname (ex: 'Destination(Destination)')
        name = unicode(name_elem)
        name = name[0:name.find('(')]

        return {
            'name' : name,
            'version' : unicode(version_elem),
            'width' : int(width),
            'height' : int(height),
            'image_filename' : image_filename
        }

class PlayerScraper(object):
    NAME_PATH = [0, 1, 3, 1, 3, 1, 0, 15, 1, 0, 9, 7]
    RACE_PATH = [0, 1, 3, 1, 3, 1, 0, 15, 1, 0, 7, 7]

    def __init__(self, soup):
        self.soup = soup

    def scrape(self):
        # If the name is not there, then we have a blank page, so it's not a
        # legit player.
        pre_name_elem = utils.dive_into_soup(self.soup, self.NAME_PATH)
        if len(pre_name_elem.contents) == 0:
            return None

        name = unicode(pre_name_elem.contents[0]).strip()

        pre_race_elem = utils.dive_into_soup(self.soup, self.RACE_PATH)
        if len(pre_race_elem.contents) == 0:
            race = None
        else:
            race = unicode(pre_race_elem.contents[0]).lower()

        return {
            'name' : name,
            'race' : race,
            'aliases' : []
        }
