# coding=utf-8
import time
from core.request import Request
from core.retryableRequest import RetryableRequest
from core.seleniumRequest import SeleniumRequest
from middlewares.proxyNoneMiddleware import ProxyNoneMiddleware
from middlewares.proxyScraperApiMiddleware import ProxyScraperApiMiddleware
from middlewares.randomHeaderMiddleware import RandomHeaderMiddleware
from middlewares.randomSleepMiddleware import RandomSleepMiddleware
from outputs.parquetFileOutput import ParquetFileOutput
from outputs.jsonFileOutput import JsonFileOutput
from outputs.parquetS3Output import ParquetS3Output
from outputs.redisOutput import RedisOutput
from outputs.s3Output import S3Output
from outputs.s3StampOutput import S3StampOutput
from parsers.xpathParser import XPathParser
import amazonAnsiListingParser

# parquet = ParquetFileOutput(file_name='df2.gzip.parquet', flush_size=1)


class NodeSpider(object):
    def __init__(self, batch_no, task_no):
        s3_key_prefix = "TopProducts"
        file_base_path = "."
        redis_host = "10.0.58.153"
        self.batch_no = batch_no
        self.task_no = task_no
        self.parquetS3Output = ParquetS3Output("%s/%s/%s.gzip.parquet" % (s3_key_prefix, batch_no, task_no), flush_size=100)  # flush_size by asin
        self.s3Output = S3Output(s3_key_prefix)
        self.s3StampOutput = S3StampOutput("%s/%s/%s.stamp" % (s3_key_prefix, batch_no, task_no), flush_size=10)  # flush_size by page
        self.jsonFileOutput = JsonFileOutput(file_base_path)
        self.redisOutput = RedisOutput(redis_host)
        # self.parquetS3Output = ParquetS3Output("TopProducts/abc.gzip.parquet", flush_size=1)

    def request(self, node_no, page_no, url, parse_method):
        response = RetryableRequest(Request(url), retry_check=amazonAnsiListingParser.robot_check) \
            .middleware(RandomHeaderMiddleware()) \
            .middleware(ProxyScraperApiMiddleware()) \
            .middleware(RandomSleepMiddleware()) \
            .request()
        # parser = XPathParser(amazonAnsiListingParser.parse_index)
        parser = XPathParser(parse_method)
        item = parser.parse(response)
        item.content["batch_no"] = self.batch_no
        item.content["task_no"] = self.task_no
        item.content["node_no"] = node_no
        item.content["page_no"] = page_no
        item.content["scrape_date"] = time.strftime("%Y-%m-%d %X", time.localtime())

        # output_file_path = current_node_path + '/amazon_ansi_%s_%s.json' % (node_no, page_no)

        # item.output(self.jsonFileOutput).output(self.redisOutput).do()
        # item.output(self.s3Output).output(self.jsonFileOutput).output(self.redisOutput).do()
        # item.output(self.s3Output).output(self.jsonFileOutput).do()
        # item.output(self.s3StampOutput).output(self.parquetS3Output).output(self.redisOutput).do()
        item.output(self.s3Output).output(self.s3StampOutput).output(self.redisOutput).do() # todo prod
        # item.output(self.s3Output).output(self.jsonFileOutput).do()
        # item.output(self.s3Output).output(self.redisOutput).do()
        # item.output(self.jsonFileOutput).do() # todo local
        # item.output(parquet).do()
        return item

