#!/usr/bin/env python
#-*- coding=utf-8 -*-

import re as regex
from Utility import *
from ProductInfoParser import *

#---------------------------------------------------------------
class QihuiwangPicker:
    #网站根地址
    root_url = 'http://www.qihuiwang.com'

    #产品分类正则式
    root_regex_1 = regex.compile(r'(?m)<h2\s+class="[^"]*">[^<]*<a\s+href="([^"]+)"\s+title="[^"]+"\s+target="[^"]*">([^>]*)</a>')
    root_regex_2 = regex.compile(r'(?m)<h2\s+class="[^"]*">[^<]*<a\s+title="[^"]*"\s+target="[^"]*"\s+href="([^"]*)">([^<]*?)</a>')
    #类目正则式
    category_regex = regex.compile(r'(?m)^\s*<li><a\s+href="([^"]+)"\s+rel="\d+">([^<]+)</a>')
    #商品正则式
    item_regex = regex.compile(r'(?m)^\s*<span class="f14 blue"><a\s+href="([^"]+)"[$\s]*title="([^"]+)"')

    def __init__(self):
        self.product_tree = []
        self.product_list = [] 
        self.filter_list = []
        pass

    #设置抓取路径过滤器
    #param path 抓取路径
    #           '建材/门/木门'  抓取'建材->门->木门'下的所有商品
    #           '建材/门'       抓取'建材->门'下所有的商品
    #           '建材'          抓取'建材'下所有的商品
    #           ''              抓取下所有的商品
    #      正则式模式： 
    #           '建材/门|窗'    抓取'建材->门'与'建材->窗'下的所有商品
    def set_filter(self, path):
        self.filter_list = path.split('/')
        pass

    def get_filter(self, level):
        if level < len(self.filter_list):
            return self.filter_list[level]

    #根据深度过滤类目列表，返回过滤后的类目列表
    def filter_item_list(self, level, item_list):
        #如果有过滤器，则进行过滤
        item_filter = self.get_filter(level)
        if item_filter != None:
            def filter_function(item):
                name = item[1].strip()
                m = regex.match(item_filter, name)
                return m != None
            item_list = filter(filter_function, item_list)
        return item_list

    #打开商品网页，获取商品与商家的信息
    def fetch_product_info(self, url):
        page_context = get_webpage(url)
        if page_context == None:
            return
        parser = ProductInfoParser()
        parser.feed(page_context)
        return parser.get()

    #从第三级目录中获取商品信息列表，并一一访问（有多页）
    def scan_last_level(self, url, level4_list):
        page_context = get_webpage(url)
        if page_context == None:
            return
        item_list = self.item_regex.findall(page_context)
        item_list = self.filter_item_list(3, item_list)

        list_num = len(item_list)
        curr_index = 1

        for item in item_list:
            href = item[0].strip()
            name = item[1].strip()
            print("      (%d/%d): %s" % (curr_index, list_num, name))
            product_info = self.fetch_product_info(href)
            level4_list.append((name, href, product_info))
            curr_index += 1
        pass

    #从二级目录网页中再获取三级目录列表，如：木门、玻璃门、卷帘门等，并一一访问
    def scan_3rd_level(self, url, level3_list):
        page_context = get_webpage(url)
        if page_context == None:
            return
        item_list = self.category_regex.findall(page_context)
        item_list = self.filter_item_list(2, item_list)

        list_num = len(item_list)
        curr_index = 1

        for item in item_list:
            href = item[0].strip()
            name = item[1].strip()
            level4_list = []
            print("    (%d/%d): %s" % (curr_index, list_num, name))
            href = self.root_url + href
            level3_list.append((name, level4_list))
            self.scan_last_level(href, level4_list)
            curr_index += 1
        pass

    #从一级目录网页中获取二级目录列表，如：门、窗、地板等，并一一访问
    def scan_2nd_level(self, url, level2_list):
        page_context = get_webpage(url)
        if page_context == None:
            return
        item_list = self.category_regex.findall(page_context)
        item_list = self.filter_item_list(1, item_list)

        list_num = len(item_list)
        curr_index = 1

        for item in item_list:
            href = item[0].strip()
            name = item[1].strip()
            level3_list = []
            print("  (%d/%d): %s" % (curr_index, list_num, name))
            href = self.root_url + href
            level2_list.append((name, level3_list))
            self.scan_3rd_level(href, level3_list)
            curr_index += 1
        pass

    #从主网上获取产品一级目录列表，如：建材，并一一访问
    def scan_root(self):
        self.product_tree = []
        page_context = get_webpage(self.root_url)
        if page_context == None:
            return

        #因为有些项格式不一样，分两个正则式捕获
        item_list_1 = self.root_regex_1.findall(page_context)
        item_list_2 = self.root_regex_2.findall(page_context)
        item_list = item_list_1 + item_list_2
        #for item in item_list:
        #    print(item[1].strip())
        #exit()
        item_list = self.filter_item_list(0, item_list)

        list_num = len(item_list)
        curr_index = 1

        for item in item_list:
            href = item[0].strip()
            name = item[1].strip()
            level2_list = []
            print("(%d/%d): %s" % (curr_index, list_num, name))
            self.product_tree.append((name, level2_list))
            self.scan_2nd_level(href, level2_list)
            curr_index += 1
        pass

    #获取优化处理后的数据
    def make_product_list(self):
        self.product_list = [] 
        for level1 in self.product_tree:
            level1_name = level1[0] #建材
            level2_list = level1[1]
            for level2 in level2_list:
                level2_name = level2[0] #门
                level3_list = level2[1]
                for level3 in level3_list:
                    level3_name = level3[0] #木门
                    level4_list = level3[1]
                    for level4 in level4_list:  #商品
                        product_info = {}
                        self.product_list.append(product_info)

                        name = level4[0] 
                        url = level4[1] 
                        detail = level4[2]
                        path = '%s/%s/%s' % (level1_name, level2_name, level3_name)
                        product_info['name'] = name
                        product_info['url'] = url
                        product_info['path'] = path
                        product_info['detail'] = detail

    #开始爬
    def run(self):
        print('开始从网站中抓取信息')
        try:
            self.scan_root();
            print('完成信息抓取')
        except KeyboardInterrupt:
            print('终止信息抓取')
        finally:
            self.make_product_list()

    #获取原始结果数据
    def get_tree_data(self):
        return self.product_tree

    def get_list_data(self):
        return self.product_list

    #打印商品列表与详细
    def print_data(self):
        for prod in self.product_list:
            product_info = prod['detail']
            print('[%s] %s' % (prod['path'], prod['name']))
            print(prod['url'])
            print('    名称：' + product_info['name'])
            print('    价格：' + product_info['price'])
            print('详细说明：' + product_info['detail'])
            print('发布日期：' + product_info['start-date'])
            print('  有效至：' + product_info['end-date'])
            print('    公司：' + product_info['company'])
            print('  联系人：' + product_info['contact'])
            print('    地址：' + product_info['address'])
            print('    电话：' + product_info['tel-phone'])
            print('    手机：' + product_info['cell-phone'])
            print('    传真：' + product_info['fax'])
            print('    网址：' + product_info['website'])
        pass
