#coding: utf-8

import os
import hashlib
import traceback
from bs4 import BeautifulSoup

import config
from utils.gzip_util import extract
from utils.xml_util import parse_xml_to_dict, filter_xml_escape_chars
from dao.data_feed import DataFeed
from dao.data_feed_dao import DataFeedDAO
from utils.http_util import HttpUtil
from utils.logger_factory import LOG_ERR, LOG_INFO

import sys
reload(sys)
sys.setdefaultencoding("UTF8")

WEBPAGE_TABLE_COLUMNS = 5

class ConfigLoader(object):
    @classmethod
    def filter(cls, all_infos):
        for file_name in all_infos.keys():
            flag = False
            for prefix in config.AMAZON_BOOKS_NAME_PREFIX:
                if prefix in file_name:
                    flag = True
                    continue

            if flag is False:
                del all_infos[file_name]

        dao = DataFeedDAO()
        data_feeds = []
        for file_name, info in all_infos.iteritems():
            data_feed = cls.build_data_feed(info)

            exists_data_feed = dao.select(file_name)
            if exists_data_feed is None:
                data_feeds.append(data_feed)
                continue

            if not exists_data_feed.equals(data_feed):
                data_feeds.append(data_feed)

        dao.close()
        return data_feeds

    @classmethod
    def build_data_feed(cls, info_map):
        data_feed = DataFeed()
        data_feed._file_name = info_map.get("file_name")
        data_feed._last_modified = info_map.get("last_modified")
        data_feed._md5_value = info_map.get("md5_value")
        data_feed._path = info_map.get("path")
        data_feed._size = long(info_map.get("size"))
        return data_feed

class FileSystemConfigLoader(ConfigLoader):
    @classmethod
    def load(cls):
        if not os.path.exists(config.DATA_FEED_LOCAL_CONFIG):
            LOG_ERR("local data feed config not exists, path: %s" % config.DATA_FEED_LOCAL_CONFIG)
            return

        all_infos = parse_xml_to_dict(config.DATA_FEED_LOCAL_CONFIG,
                                      "data", "file_name")
        data_feeds = cls.filter(all_infos)
        processed_data_feeds = []

        # 解压gz文件,重新设置data_feed的_path字段,返回处理后的data_feed列表
        for data_feed in data_feeds:
            src_path = data_feed._path
            if not os.path.exists(src_path):
                LOG_ERR("local data_feed file not exists! path: %s" % src_path)
                continue

            dst_path = extract(src_path)
            # 过滤xml中的非法字符
            filter_xml_escape_chars(dst_path)
            # os.remove(src_path)
            data_feed._path = dst_path
            processed_data_feeds.append(data_feed)

        return processed_data_feeds

class WebPageConfigLoader(ConfigLoader):
    @classmethod
    def get_feed_list_page(cls):
        feed_list_page = HttpUtil.get_for_object(config.DATA_FEED_LIST_URL,
                        config.DATA_FEED_USERNAME, config.DATA_FEED_PASSWORD)
        return feed_list_page

    @classmethod
    def load(cls):
        html_page = cls.get_feed_list_page()

        doc = BeautifulSoup(html_page, "lxml")
        table = doc.find("table", {"class": "datatable"})
        table_lines = table.find_all("tr")
        all_infos = cls.parse_html_to_dict(table_lines)
        data_feeds = cls.filter(all_infos)

        for data_feed in data_feeds:
            content = None
            try:
                LOG_INFO("begin download file: %s" % data_feed._path)
                content = HttpUtil.get_for_object(data_feed._path,
                                              config.DATA_FEED_USERNAME,
                                              config.DATA_FEED_PASSWORD)

                # 对下载的内容进行md5校验
                md5_generator = hashlib.md5()
                md5_generator.update(content)
                md5_value = md5_generator.hexdigest()
                if md5_value != data_feed._md5_value:
                    raise Exception("md5 check error! expected md5: %s, downloaded file's md5 %s" %
                                    (data_feed._md5_value, md5_value))
                LOG_INFO("downloaded file: %s" % data_feed._path)
            except Exception, e:
                LOG_ERR("failed download file %s, error: %s" % (data_feed._path, traceback.format_exc()))
                continue

            cls.extract_data_to_file(content, data_feed)
            yield data_feed

    @classmethod
    def extract_data_to_file(cls, content, data_feed):
        if not os.path.exists(config.DATA_FEED_LOCAL_TMP_DIR):
            os.mkdir(config.DATA_FEED_LOCAL_TMP_DIR)

        # 将文件内容写入临时gz文件
        path = "%s/%s" % (config.DATA_FEED_LOCAL_TMP_DIR, data_feed._file_name)
        gz_file = open(path, "wb+")

        #需要先清空文件内容，再写入数据
        gz_file.truncate(0)
        gz_file.write(content)
        gz_file.close()

        # 解压缩gz文件,删除源文件,设置data_seed的_path变量
        des_file_name = extract(path)
        # 过滤xml中的非法字符
        filter_xml_escape_chars(des_file_name)
        # os.remove(path)
        data_feed._path = des_file_name

    @classmethod
    def parse_html_to_dict(cls, table_lines):
        all_infos = {}

        first_line = True
        for table_line in table_lines:
            if first_line:
                # 第一行是标题行,暂无处理
                columns = table_line.find_all("th")
                for cell in columns:
                    pass

                first_line = False

            else:
                info = cls.parse_table_line(table_line)
                if info is None:
                    continue

                all_infos[info.get("file_name")] = info

        return all_infos

    @classmethod
    def parse_table_line(cls, table_line):

        columns = table_line.find_all("td")
        if len(columns) != WEBPAGE_TABLE_COLUMNS:
            LOG_ERR("failed get info from source page, table line info: %s" % table_line)
            return

        file_name = columns[0].text
        if "delta" not in file_name:
            return

        last_modified = columns[1].text
        if last_modified is None:
            LOG_ERR("failed get last_modified, table line info: %s" % table_line)
            return

        md5_value = columns[2].text
        if md5_value is None:
            LOG_ERR("failed get md5_value, table line info: %s" % table_line)
            return

        size = columns[3].text
        if size is None or not size.isdigit():
            LOG_ERR("failed get size, table line info: %s" % table_line)
            return

        hlink_tag = columns[4].find("a")
        href = hlink_tag.get("href")
        if not href:
            LOG_ERR("failed get download url, table line info: %s" % table_line)
            return

        info = {}
        info["file_name"] = file_name.strip()
        info["last_modified"] = last_modified.strip()
        info["md5_value"] = md5_value.strip().strip('"')
        info["size"] = size.strip()
        info["path"] = config.DATA_FEED_ROOT_URL + href

        return info
