#!/usr/bin/env python3
#-*- coding:utf-8 -*-

import os
import sys
import conf
import multiprocessing
from functools import partial

from crawl_ware_info import DocJsValueSniffer

# 为了程序的灵活性，没有检查配置项是否配置，请确保需要的配置项都配置了

def crawl_map(url, query):
    doc_js_sniffer = DocJsValueSniffer(url, query)
    data = doc_js_sniffer.sniff_paragraph("wareid")

    return (query,data)

def topn_same_ware_count(result1, result2):
    """
    返回两个排序list的diff信息
    """
    return {i : len(set(result1[:i]) & set(result2[:i])) / min(i,len(result1),len(result2))
            for i in [5,10,20,30,60,180,300,600]}

def crawl_query_file(query_file):
    """
    使用配置文件当中的url作为前缀构造query爬取数据
    """
    querys = [query.strip() for query in open(query_file)]
    file_base_name = os.path.basename(query_file)

    def crawl_with_sort_type(url):
        result = {}

        for sort_type in conf.sort_type:
            url = url + "&sort_type=" + sort_type
            partial_crawl_map = partial(crawl_map, url)

            with multiprocessing.Pool(conf.parallel_num) as pool:
                map_responses = pool.map(partial_crawl_map, querys)

                result[sort_type] = map_responses
        return result

    result1 = crawl_with_sort_type(conf.url1)
    result2 = crawl_with_sort_type(conf.url2)

    output_path = conf.output_path if conf.output_path else "result"

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    def output_to_file(result,suffix):
        for sort_type,value in result.items():
            output_file = output_path + "/" + file_base_name + "_" + sort_type + "_" + suffix
            with open(output_file,'w', encoding="utf-8") as f:
                for val in value:
                    f.write("{}\n".format(val))
        pass

    output_to_file(result1,"url1")
    output_to_file(result2,'url2')

    for sort_type in conf.sort_type:
        value1 = result1[sort_type]
        value2 = result2[sort_type]
        res1_dict = {query : wares for query,wares in value1}
        res2_dict = {query : wares for query,wares in value2}

        with open(output_path + "/" + file_base_name + "_" + sort_type + ".diff",'w', encoding = "utf-8") as f:
            for query, ware1 in res1_dict.items():
                if query not in res2_dict:
                    continue

                ware2 = res2_dict[query]
                if not ware1 or not ware2:
                    continue

                same_ratio = topn_same_ware_count(list(zip(*ware1))[0], list(zip(*ware2))[0])
                f.write("%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n"
                        % (query,same_ratio[5],same_ratio[10],same_ratio[20],
                            same_ratio[30],same_ratio[60],same_ratio[180],
                            same_ratio[300],same_ratio[600]))

    pass

def crawl_url_file():
    """
    直接使用url进行爬取数据，可以使用线上url直接进行爬取
    """
    urls = [url.strip() for url in open(conf.url_file, encoding = "utf-8")]
    pass

def main():
    """如果指定参数的时候，认为其为query list，并且对所有sort_type都进行diff
    """
    if len(sys.argv) == 2:
        conf.query_file = sys.argv[1]
        conf.sort_type = ['sort_default',
                  'sort_dredisprice_desc',
                  'sort_dredisprice_asc',
                  'sort_commentcount_desc',
                  'sort_totalsales15_desc']
        pass

    query_file = getattr(conf, "query_file", None)
    url_file = getattr(conf, "url_file", None)

    # 若query_file配置了优先使用query_file
    if query_file:
        crawl_query_file(conf.query_file)
    elif url_file:
        crawl_url_file()

if __name__=='__main__':
    main()


