# -*- coding: utf-8 -*-
import random
import subprocess
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import settings
import json
import queue
import threading
import re
import requests
import time
from pymongo import MongoClient
from util.log import create_logger, create_email_logger
from util.tool_func import ThreadSafetyList, ThreadSafetyCounter, create_proxy_session
import socket
HOSTNAME = socket.gethostname()


try:
    import cookielib
except:
    import http.cookiejar as cookielib

class WorkContext:
    PROXY_URL = ''
    LOG_LEVEL = 'INFO'
    PER_REQ_INTERVAL = 5
    CHECK_WORK_PROCESS_INTERVAL = 10
    SWITCH_SESSION_INTERVAL = 20
    REQ_FAIL_SLEEP = 10
    # PARTIAL_START_URL = '/voters?include=data%5B*%5D.answer_count%2Carticles_count%2Cfollower_count%2Cgender%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&limit=20&offset=0'
    # PARTIAL_START_URL = '/voters?include=data%5B%2A%5D.answer_count%2Carticles_count%2Cfollower_count%2Cgender%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics&limit=10&offset=0'
    TABLE_NAME = ''
    COOKIE_FILE = []


class BadRequestError(Exception):
    def __init__(self, url, http_code, error, resp_body, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.url = url
        self.http_code = http_code
        self.error = error
        self.resp_body = resp_body

    def __str__(self):
        return json.dumps({'url': self.url, 'http_code': self.http_code, 'error': self.error, 'resp_body': self.resp_body})


class CriticalRequestError(BadRequestError):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)


class Worker(threading.Thread):
    def __init__(self, logger, db, thread_id, task_queue, completed_task_counter, req_counter, per_req_interval=WorkContext.PER_REQ_INTERVAL):
        super().__init__()
        self.logger = logger
        self.db = db
        self.logger.info('thread_id %s start', thread_id)
        self.thread_id = thread_id
        self.task_queue = task_queue
        self.per_req_interval = per_req_interval
        self.completed_task_counter = completed_task_counter
        self.req_counter = req_counter
        self.session = self.create_session()

    def do_get(self, url, headers=None, timeout=10):
        req_headers = {
            "Host": "www.zhihu.com",
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
        }
        req_headers.update((headers or {}))
        resp = None
        self.logger.debug('[request]%s', url)
        try:
            resp = self.session.get(url, headers=req_headers, timeout=timeout)
            if resp.status_code in [404]:
                #ignore
                self.logger.info('[404]%s', url)
                return
            elif resp.status_code >= 401:
                raise CriticalRequestError(url, resp.status_code, resp.reason, resp.content.decode('utf-8'))
            return resp.json()
        except Exception as e:
            if isinstance(e, CriticalRequestError):
                raise
            else:
                raise BadRequestError(url, (resp.status_code if resp else 'not_code'), str(e), (resp.content.decode('utf-8') if resp else 'not_resp'))
        finally:
            self.req_counter.add()

    def create_session(self):
        if WorkContext.PROXY_URL:
            session = create_proxy_session(WorkContext.PROXY_URL)
        else:
            session = requests.session()
        # headers = {
        #     'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
        #     'Host': 'www.zhihu.com',
        #     'Referer': 'https://www.zhihu.com/'
        # }
        # session.get('https://www.zhihu.com/people/sgai/answers', headers=headers)
        # cookie_file_path_list = self.list_all_cookies_file_path()
        choice_cookie_file = random.choice(WorkContext.COOKIE_FILE)
        session.cookies = cookielib.LWPCookieJar(filename=os.path.join(settings.ROOT_PATH, 'spiders/zhihu/cookies', choice_cookie_file))
        session.cookies.load(ignore_discard=True, ignore_expires=True)
        return session

    def list_all_cookies_file_path(self):
        cookie_file_list = []
        cookies_path = os.path.join(settings.ROOT_PATH, 'spiders/zhihu/cookies')
        for f in os.listdir(cookies_path):
            if os.path.isfile(os.path.join(cookies_path, f)) and f.endswith('cookies'):
                cookie_file_list.append(os.path.join(cookies_path, f))
        return cookie_file_list

    def run(self):
        thread_req_num = 0
        while 1:
            try:
                task = self.task_queue.get_nowait()
                self.scrap(*task)
                self.task_queue.task_done()
                thread_req_num += 1
                if (thread_req_num + random.randint(1, 5)) % WorkContext.SWITCH_SESSION_INTERVAL == 0:
                    self.session = self.create_session()
            except queue.Empty:
                time.sleep(1)

    def save_db(self, raw_json, col_name):
        self.db[col_name].insert_one(raw_json)

    # def save_fail(self, raw_json, col_name):
    #     self.db[col_name].insert_one(raw_json)

    def scrap(self, url, answer_id, oid):
        offset_token = re.search(r'&offset=(.*)', url)
        offset = offset_token.group(1)
        try:
            json_resp = self.do_get(url, headers={
                'Referer': 'https://www.zhihu.com/',
                # 'accept': 'application/json, text/plain, */*',
                # 'Accept-Encoding': 'gzip, deflate, sdch, br',
                # 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
                # 'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
            })
            if json_resp:
                json_resp['offset_num'] = offset
                json_resp['answer_id'] = answer_id
                json_resp['raw_url'] = url
                if WorkContext.TABLE_NAME:
                    self.db['fix_fail_' + WorkContext.TABLE_NAME].insert_one(json_resp)
                    self.db[WorkContext.TABLE_NAME].delete_one({"_id": oid})
                is_end = json_resp['paging']['is_end']
                next_page = json_resp['paging']['next']
                if not is_end and WorkContext.TABLE_NAME:
                    self.db[WorkContext.TABLE_NAME].insert_one({'answer_id': answer_id, 'http_code': '200', 'error': 'new', 'url': next_page, 'resp_body': 'new_resp'})
            self.completed_task_counter.add()
            time.sleep(self.per_req_interval)
        except KeyboardInterrupt:
            exit(0)
        except CriticalRequestError as e:
            self.logger.error('[CriticalReqErr]%s', str(e))
            exit(-1)
        except Exception as e:
            self.db[WorkContext.TABLE_NAME].insert_one({'answer_id': answer_id, 'http_code': getattr(e, 'http_code', 'not_code'), 'error': getattr(e, 'error', str(e)), 'url': getattr(e, 'url', url), 'resp_body': getattr(e, 'resp_body', 'not_resp')})
            self.completed_task_counter.add()
            self.logger.error('[BadReqErr]%s', str(e))
            time.sleep(WorkContext.REQ_FAIL_SLEEP)



class Workshop(object):
    def __init__(self, offset=0, bulk_size=100, worker_count=8, check_work_process_interval=WorkContext.CHECK_WORK_PROCESS_INTERVAL, limit=0):
        self.offset = offset
        self.worker_count = worker_count
        self.worker_list = []
        self.bulk_size = bulk_size
        self.completed_task_counter = ThreadSafetyCounter()
        self.req_counter = ThreadSafetyCounter()
        self.task_queue = queue.Queue()
        self.check_work_process_interval = check_work_process_interval
        self.completed_count = 0
        self.limit = limit
        self.current_task_index = 0

        #logger
        module_name = os.path.split(__file__)[1].split('.')[0]
        self.log_file = os.path.join(settings.LOG_PATH, module_name)
        self.logger = create_logger(module_name, log_file=self.log_file, log_level=WorkContext.LOG_LEVEL)
        self.email_logger = create_email_logger('%s_email' % module_name, credentials=('singxle@163.com', 'Samgu@0!2%'), subject='host %s %s attention' % (HOSTNAME, module_name))
        #db
        self.db = MongoClient('localhost', 27017).zhihu_sample_database
        # self.db = MongoClient('localhost', 27017).zhihu_sample_database_bk
        self.logger.info('worker_count=%s, offset=%s, bulk_size=%s, limit=%s, check_work_process_interval=%s', self.worker_count, self.offset, self.bulk_size, self.limit, self.check_work_process_interval)
        assert self.limit % self.bulk_size == 0, 'limit must be integer multiple of bulk_size'

    def iter_batch_tasks(self, offset, limit, bulk_size):
        while 1:
            fail_rows = self.db[WorkContext.TABLE_NAME].find(filter={}, skip=0, limit=bulk_size, sort=[("_id", 1)])
            fail_rows = list(fail_rows)
            if len(fail_rows) == 0:
                break
            tasks = set()
            for i in fail_rows:
                tasks.add((i['url'], i['answer_id'], i['_id']))
            self.logger.info('in_one_iter fail_url=%s, tasks=%s', len(fail_rows), len(tasks))
            yield list(tasks)

    def add_one_worker(self):
        one_worker = Worker(self.logger, self.db, str(time.time()), self.task_queue, self.completed_task_counter, self.req_counter)
        one_worker.daemon = True
        one_worker.start()
        self.worker_list.append(one_worker)

    def check_worker_alive(self):
        dead_thread = 0
        alive_worker_list = []
        for w in self.worker_list:
            if not w.is_alive():
                dead_thread += 1
            else:
                alive_worker_list.append(w)
        if len(alive_worker_list) == 0:
            self.logger.error('crawl exception')
            last_two_line_log = subprocess.check_output('tail -n 2 %s' % self.log_file)
            self.email_logger.error(last_two_line_log)
            exit(-1)

        self.worker_list = alive_worker_list
        for i in range(dead_thread):
            self.add_one_worker()

    def fix_fail_record(self, oid_list, col_name):
        self.db[col_name].delete_many({"_id": {"$in": oid_list}})

    def reset_current_batch_task_result(self, answer_id_list, col_name):
        # self.db[col_name].delete_many({"answer_id": {"$in": answer_id_list}})
        self.db['fix_fail_' + WorkContext.TABLE_NAME].remove({})

    def start_work(self):
        start_work_time = time.time()
        for i in range(self.worker_count):
            self.add_one_worker()

        last_batch_completed_count = 0
        for batch_tasks in self.iter_batch_tasks(self.offset, self.limit, self.bulk_size):
            batch_tasks_length = len(batch_tasks)
            for url, answer_id, oid in batch_tasks:
                # start_url = 'https://www.zhihu.com/api/v4/answers/' + answer_id + WorkContext.PARTIAL_START_URL
                self.task_queue.put((url, answer_id, oid))

            st = time.time()
            req_count_one_batch = int(self.req_counter)
            while 1:
                req_count_interval = int(self.req_counter)
                try:
                    if int(self.completed_task_counter) == batch_tasks_length:
                        self.completed_count += int(self.completed_task_counter)
                        last_batch_completed_count = self.completed_count
                        cost_time = time.time() - st
                        req_count_per_batch = int(self.req_counter) - req_count_one_batch
                        self.logger.info('completed_one_batch total=%s, cost=%.2f, req_rate=%.2f/s' % (batch_tasks_length, cost_time, (req_count_per_batch / cost_time)))
                        self.completed_task_counter.reset()
                        break
                    self.check_worker_alive()
                    time.sleep(self.check_work_process_interval)
                    self.logger.info('in_time completed_tasks=%s, current_task_index=%s, total_req=%s, in_time_req_rate=%.2f/s',
                                last_batch_completed_count + int(self.completed_task_counter),
                                self.current_task_index,
                                int(self.req_counter),
                                (int(self.req_counter) - req_count_interval) / self.check_work_process_interval
                    )
                except KeyboardInterrupt:
                    self.logger.info('terminated by KeyboardInterrupt')
                    self.logger.info('terminated by KeyboardInterrupt fix_fail completed=%s, req_total=%s, req_rate=%.2f/s',
                                last_batch_completed_count + int(self.completed_task_counter),
                                int(self.req_counter),
                                int(self.req_counter) / (time.time() - start_work_time)
                    )
                    exit(0)

        self.logger.info('fix_fail_all completed=%s, req_total=%s, req_rate=%.2f/s',
                    self.completed_count,
                    self.req_counter,
                    int(self.req_counter) / (time.time() - start_work_time)
        )

if __name__ == '__main__':
    # refresh_login()
    offset = 0
    limit = 0
    WorkContext.PROXY_URL = sys.argv[1]
    WorkContext.COOKIE_FILE = sys.argv[2].split(',')
    WorkContext.TABLE_NAME = sys.argv[3]
    Workshop(bulk_size=10, offset=offset, limit=limit, worker_count=4).start_work()