# coding=utf-8
import fnmatch
import sys
# sys.path.append('.')
sys.path.append('../')
# sys.path.append('../core')
# sys.path.append('../manager')
# sys.path.append('../middlewares')
# sys.path.append('../outputs')
# sys.path.append('../parsers')
# sys.path.append('../pipelines')
# sys.path.append('../tasks')
reload(sys)
sys.setdefaultencoding('utf8')

from spiders.nodeSpider import NodeSpider
import codecs
import json
import os
import re

import datetime

# import nodeSpider
from core.m_logging import logger
from manager.fileState import FileState
from spiders import amazonAnsiListingParser, util

# page_no = 20
# text = "https://www.amazon.com/s/ref=lp_289742_pg_2?rh=n%3A1055398%2Cn%3A%211063498%2Cn%3A284507%2Cn%3A289913%2Cn%3A289742&page=2&ie=UTF8&qid=1552525738"
# pattern = re.compile(r'lp_(\d+)+_pg_(\d+)', re.IGNORECASE)
# result = re.sub(pattern, lambda m: ('lp_%s_pg_%s' % (m.group(1), page_no)), text)
#
# pattern = re.compile(r'page=(\d+)', re.IGNORECASE)
# result = re.sub(pattern, lambda m: ('page=%s' % page_no), result)

### test end

# nohup python amazonAnsiListingSpider.py "task1" "/home/ec2-user/tasks1" "batch20190420" > 1.out 2>&1 &

if len(sys.argv) >= 4:
    task_no = sys.argv[1]
    task_dir = sys.argv[2]
    batch_no = sys.argv[3]
else:
    task_no = "default_task"
    task_dir = "../tasks"
    batch_no = "default_batch"

node_no_list = []
logger.info("task_dir is %s..." % task_dir)

task_files = fnmatch.filter(os.listdir(task_dir), 'task*.json')
logger.info("loading tasks %s" % task_files)
for f in task_files:
    with codecs.open("%s/%s" % (task_dir, f), mode='r', encoding='utf-8') as opened_file:
        r = json.load(opened_file)
        node_no_list += list(r["list"])
# for root, dirs, files in os.walk(task_dir):
#     for f in files:
#         with codecs.open("%s/%s" % (task_dir, f), mode='r', encoding='utf-8') as opened_file:
#             r = json.load(opened_file)
#             node_no_list += list(r["list"])

undone_template = {"status": "0", "spider_no": 0, "failed": 0, "list": [], "id": 1}
done_template = {"status": "0", "spider_no": 0, "success": 0, "list": [], "id": 1}

undone_state = FileState("%s/undone_node.json" % task_dir, undone_template)
# undone_state.init_state()
undone_state.load_state()

done_state = FileState("%s/done_node.json" % task_dir, done_template)
# done_state.init_state()
done_state.load_state()

skipped_count = 0
nodeSpider = NodeSpider(batch_no, task_no)

for current_node_no in node_no_list:
    if current_node_no in done_state.content["list"]:  # skip done nodes
        skipped_count += 1
        logger.info("skipped in done_node %s" % current_node_no)
        continue

    if current_node_no in undone_state.content["list"]:  # skip failed nodes, it shall be processed under another spider
        skipped_count += 1
        logger.info("skipped in undone_node %s" % current_node_no)
        continue

    node_state_dir = "node"
    node_state_template = {"node_no": current_node_no, "failed_pages": []}
    node_state = FileState("%s/%s/node_%s.json" % (task_dir, node_state_dir, current_node_no), node_state_template)
    node_state.load_state()

    # logger.info("total skipped %s..." % skipped_count)
    # assemble the output path
    # base_s3_path = "TopProducts/"
    # logger.info("processing node no %s..." % current_node_no)
    # today = datetime.datetime.now().strftime('%Y-%m-%d')
    # current_node_path = "%s%s/%s" % (base_s3_path, today, current_node_no)
    # if not os.path.isdir(current_node_path):
    #     os.makedirs(current_node_path)
    current_page_no = 1
    # start_urls = [('https://www.amazon.com/b/?node=%s' % node_no) for node_no in start_urls]
    start_url = 'https://www.amazon.com/b/?node=%s' % current_node_no
    # page_urls = 'https://www.amazon.com/s?rh=n%3A1055398%2Cn%3A%211063498%2Cn%3A284507%2Cn%3A289913%2Cn%3A289742&page=2&qid=1552525738&ref=lp_289742_pg_2'
    max_page = 20

    logger.info("starting to process node_no: %s" % current_node_no)

    try:
        # scrape index page
        item = nodeSpider.request(current_node_no, current_page_no, start_url, amazonAnsiListingParser.parse_index)
        if item.content["totalPage"].isdigit():
            total_page = int(item.content["totalPage"])
        else:
            total_page = 1

        max_page = total_page if (total_page / 5) <= max_page else max_page
        logger.info("max page is %s..." % max_page)
        if max_page <= 0:
            raise ValueError("max page error, value is %s..." % max_page)
        # total_page = min(max_page, total_page / 5)
        # generate page urls
        if max_page <= 1:
            # completed the only
            logger.info("completed scraping node %s..." % current_node_no)
            done_state.content["status"] = "10"
            done_state.content["list"].append(current_node_no)
            done_state.content["success"] = int(done_state.content["success"]) + 1
            done_state.save_state()
            logger.info("node %s put in done state..." % current_node_no)
            continue

        page_url_template = item.content["nextPageUrl"][0]
        # page_urls = ['https://www.amazon.com'+util.generate_page_url(page_url_template, page_no) for page_no in range(2, max_page + 1)]

        p_list = range(2, max_page + 1)
        # loop pages
        for p_no in p_list:
        # for index, page_url in enumerate(page_urls):
        #     current_page_no = index + 2 # start from page 2
            current_page_no = p_no
            page_url = 'https://www.amazon.com' + util.generate_page_url(page_url_template, p_no)
            logger.info("processing node no %s, page no %s..." % (current_node_no, current_page_no))

            item = nodeSpider.request(current_node_no, current_page_no, page_url, amazonAnsiListingParser.parse_paging)
            if not item.content["success"]:
                node_state.content["failed_pages"].append(current_page_no)
                node_state.save_state()

        # completed this node
        logger.info("completed scraping node %s..." % current_node_no)
        done_state.content["status"] = "10"
        done_state.content["list"].append(current_node_no)
        done_state.content["success"] = int(done_state.content["success"]) + 1
        done_state.save_state()
        logger.info("node %s put in done state..." % current_node_no)
    except Exception, e:
        logger.error("unexpected error(exception) occurred...")
        import traceback

        node_state.content["failed_pages"].append(current_page_no)
        node_state.save_state()

        if current_node_no not in undone_state.content["list"]:
            undone_state.content["list"].append(current_node_no)
            undone_state.content["failed"] = int(undone_state.content["failed"]) + 1
            undone_state.save_state()
            logger.info("node %s put in undone state..." % current_node_no)
        else:
            logger.info("node %s already in undone state, would be skipped this time..." % current_node_no)
        # logger.error("error occurred...")
        logger.error("node scrape error occurred, node %s is skipped, last processing page is %s... e.message: %s" % (current_node_no, current_page_no, e.message))
        logger.error('traceback.format_exc():\n%s' % traceback.format_exc())
        # logger.error(e)
logger.info("process is completed...")




