# coding=utf-8
import codecs
import json
import os
from array import array

from core.m_logging import logger
from core.retryableRequest import RetryableRequest
from core.seleniumRequest import SeleniumRequest
from manager.state import State
from manager.task import Task
from middlewares.randomHeaderMiddleware import RandomHeaderMiddleware
from middlewares.proxyScraperApiMiddleware import ProxyScraperApiMiddleware
from middlewares.proxyNoneMiddleware import ProxyNoneMiddleware
from middlewares.randomSleepMiddleware import RandomSleepMiddleware

from parsers.xpathParser import XPathParser
from outputs.jsonFileOutput import JsonFileOutput

import spiders.amazonAnsiListingParser

a = {"abc": 123}

if "abc" in a:
    print ('exists')


response = SeleniumRequest("https://www.baidu.com").middleware(RandomHeaderMiddleware()).request()
print(response)


node_no_list = []
task_dir = "tasks"
for root, dirs, files in os.walk(task_dir):
    for f in files:
        with codecs.open("%s/%s" % (task_dir, f), mode='r', encoding='utf-8') as opened_file:
            r = json.load(opened_file)
            node_no_list += list(r["list"])

# start_urls = array(start_urls).flatten()
# node_no = '262607011'

# start_url = 'https://www.baidu.com'

undone_state = State("./undone_node.json")
undone_state.load_state()

done_state = State("./done_node.json")
done_state.load_state()

for current_node_no in node_no_list:
    current_page_no = 1
    # start_urls = [('https://www.amazon.com/b/?node=%s' % node_no) for node_no in start_urls]
    start_url = 'https://www.amazon.com/b/?node=%s' % current_node_no
    max_page = 20

### skip done nodes
    if current_node_no in done_state.content["list"]:
        print("skipped %s" % current_node_no)
        continue
### end skip done nodes


    # print("starting...")
    #
    response = RetryableRequest(start_url, retry_check=spiders.amazonAnsiListingParser.robot_check)\
        .middleware(RandomHeaderMiddleware())\
        .middleware(ProxyNoneMiddleware())\
        .middleware(RandomSleepMiddleware())\
        .request()

    parser = XPathParser(spiders.amazonAnsiListingParser.parse_index)
    item = parser.parse(response)

    item.output(JsonFileOutput('amazon_ansi_%s_%s.json' % (current_node_no, current_page_no))).do()

    for i in range(max_page):
        if "nextPageUrl" in item.content and len(item.content["nextPageUrl"]) > 0:
            has_next_page = True
        else:
            has_next_page = False

        if not has_next_page:
            logger.info("node No.: %s, page No.: %s is skipped")
            undone_state.content["list"].append(current_node_no)
            undone_state.save_state()

        current_page_no += 1
        nextPageUrl = "https://www.amazon.com" + item.content["nextPageUrl"][0]
        response = RetryableRequest(nextPageUrl, retry_check=spiders.amazonAnsiListingParser.robot_check)\
            .middleware(RandomHeaderMiddleware())\
            .middleware(ProxyNoneMiddleware()) \
            .middleware(RandomSleepMiddleware()) \
            .request()

        parser = XPathParser(spiders.amazonAnsiListingParser.parse_paging)
        item = parser.parse(response)

        item.output(JsonFileOutput('amazon_ansi_%s_%s.json' % (current_node_no, current_page_no))).do()

    # completed this node
    logger.info("completed scraping node %s..." % current_node_no)
    done_state.content["status"] = "10"
    done_state.content["list"].append(current_node_no)
    done_state.save_state()
