#!/usr/bin/env python

# encoding: utf-8

'''
 * Create File facebook_script
 * Created by leixu on 2017/9/20
 * IDE PyCharm
'''
from selenium import webdriver
from luobocrawler.holder.holders import FaceBookSessionHolder
from selenium.webdriver.common.keys import Keys
import time
import string, random, traceback
import asyncio
import lxml.html
from furl import furl
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from asyncio import Lock
import pickle
import aiohttp
import arrow
from pymongo import MongoClient
from luobocrawler.selenium_script.crawler import Crawler


class FBCrawler(Crawler):
    name_list = set()
    name_num_one_time = 2

    def __init__(self):
        super(Crawler, self).__init__()
        self._session_holder = FaceBookSessionHolder()

    def init_env(self):
        # 指定selenium driver 的类型，默认为chrome
        self.driver_type = "firefox"
        # selenium drvier config,默认采用默认配置

        from luobocrawler.holder.driver_config import custom_chrome_config

        self.driver_config = custom_chrome_config(http_proxy='192.168.2.37:1080')
        # 默认配置配置的简单配置项目
        self.ref_config = {}

    async def rule_4(self):
        config = {'username': "15251446880", "password": "12qwaszx"}
        driver = self.driver
        driver.get("https://facebook.com")
        client = MongoClient('192.168.12.41').Face_Book
        lock = Lock()

        async def check_if_already_login():
            def check_login_then_login():
                try:
                    element = WebDriverWait(driver, 10).until(
                        EC.presence_of_element_located((By.XPATH, "//input[@data-testid='search_input' and @placeholder='搜索']")))
                    print("检测到搜索框，可以获取到数据")
                    return
                except:
                    pass
                try:
                    element = WebDriverWait(driver, 20).until(
                        EC.presence_of_element_located((By.ID, "loginbutton")))
                    print("检测登录框")
                    e_email = driver.find_element_by_id("email")
                    e_email.clear()
                    e_email.send_keys(config.get('username'))
                    e_pass = driver.find_element_by_id("pass")
                    e_pass.clear()
                    e_pass.send_keys(config.get('password'), Keys.RETURN)
                    print("登录")
                except Exception as e:
                    raise e

            try:
                check_login_then_login()
                await asyncio.sleep(3)
            except:
                traceback.print_exc()
                return

        await check_if_already_login()
        url_list = []

        async def gen_urls():
            keyword_list = ["有情却道无情", "美女", "北京", "天津", "河北", "辽宁", "上海", "江苏", "浙江", "福建", "山东", "广东", "广西", "海南", "重庆", "大连", "宁波", "厦门", "青岛", "深圳"]
            year_list = [2017, 2016]
            month_list = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]

            import urllib.parse
            url_list = []
            for _key_word in keyword_list:
                for _year in year_list:
                    for _month in month_list:
                        url_part = urllib.parse.quote(r'{"name":"creation_time","args":"{\"start_month\":\"' + f'{_year}-{_month}' + r'\",\"end_month\":\"' + f'{_year}-{_month}' + r'\"}"}')
                        url_list.append(f'''https://www.facebook.com/search/top/?q={_key_word}&filters_rp_creation_time={url_part}''')

            return url_list

        async def get_one_keyword(_url):
            end_id = "browse_end_of_results_footer"
            error_id = "empty_result_error"
            driver.get(_url)
            count_for_check = 0
            old_height = 0
            while True:
                count_for_check += 1
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                await asyncio.sleep(10)
                if count_for_check == 30:
                    if len(driver.find_elements_by_xpath("//div[@id='browse_end_of_results_footer']")) != 0 or len(driver.find_elements_by_xpath("//div[@id='empty_result_error']")) != 0 or len(
                            driver.find_elements_by_xpath("//div/div/div[text()='显示完毕']")) != 0:
                        _html_body = self.driver.find_element_by_xpath("//*").get_attribute("outerHTML")
                        client.html_body.insert_one({"url": _url, "html_body": _html_body})
                        break
                    else:
                        new_height = driver.execute_script("return document.body.offsetHeight;")
                        _html_body = self.driver.find_element_by_xpath("//*").get_attribute("outerHTML")
                        if new_height == old_height:
                            client.html_body.insert_one({"url": _url, "html_body": _html_body})
                            break
                        else:
                            old_height = new_height
                    count_for_check = 0

        _waiting_url_list = await gen_urls()

        random.shuffle(_waiting_url_list)
        for _url in _waiting_url_list:
            await get_one_keyword(_url)

    async def rule_3(self):
        config = {'username': "15251446880", "password": "12qwaszx"}
        driver = self.driver
        driver.get("https://m.facebook.com/home.php")
        from pymongo import MongoClient
        client = MongoClient('192.168.12.41').Face_Book
        lock = Lock()

        async def check_if_already_login():
            def check_login_then_login():
                try:
                    element = WebDriverWait(driver, 10).until(
                        EC.presence_of_element_located((By.ID, "mJewelNav")))
                    return
                except:
                    pass
                try:
                    element = WebDriverWait(driver, 20).until(
                        EC.presence_of_element_located((By.ID, "m_login_email")))
                    driver.find_element_by_id("m_login_email").send_keys(config.get('username'))
                    driver.find_element_by_id("m_login_password").send_keys(config.get('password'), Keys.RETURN)
                    driver.implicitly_wait(10)
                    driver.find_element_by_xpath("//div/button[@value='OK']").click()
                except Exception as e:
                    raise e

            try:
                check_login_then_login()
                await asyncio.sleep(3)
            except:
                traceback.print_exc()
                return

            pass

        await check_if_already_login()

        async def get_profile(_page_list):
            driver.get(_page_list[0])
            await asyncio.sleep(2)
            headers = ({"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
                        "accept-language": "zh-CN,zh;q=0.8",
                        "accept-encoding": "gzip, deflate, br",
                        "Host": "m.facebook.com",
                        "cache-control": "max-age=0",
                        "upgrade-insecure-requests": "1",
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                        "Connections": "keep-alive"})

            _cookies = {}
            for cookie in driver.get_cookies():
                _cookies[cookie['name']] = cookie['value']
            proxy_url = 'http://192.168.2.37:1080'
            _index = 0
            async with aiohttp.ClientSession(headers=headers, cookies=_cookies) as s:

                async def get_profile(profile_page, _index):
                    _sleep_time = random.randint(1, 30) / 10 + 3 * _index
                    await asyncio.sleep(_sleep_time)
                    async with s.get(profile_page, proxy=proxy_url) as _response:
                        import brotli
                        _now = arrow.now()
                        if _response.headers["Content-Encoding"] == "br":
                            html_text = brotli.decompress(await _response.read()).decode("utf8")
                        else:
                            html_text = await _response.text()
                        if _response.status != 200:
                            client.error.insert_one({"url": profile_page, "_response": pickle.dump(_response), "date": str(_now)})
                        if 'id="mobile_login_bar"' in html_text:
                            client.error.insert_one({"url": profile_page, "timestamp": arrow.now().timestamp, "date": str(_now), "reason": "needLogin", "_response": pickle.dump(_response)})
                        client.profile_html.insert_one({"url": profile_page, "html": html_text, "date": str(_now), "timestamp": _now.timestamp})

                    def get_friends():
                        pass

                    def get_posts():
                        pass

                await asyncio.wait([get_profile(_, c_index) for _, c_index in zip(_page_list, range(len(_page_list)))])
                print(f"all pages done,total page count：{len(_page_list)}")

        async def get_urls_from_mongo():
            count = client.profile_html.count()
            cursor = client.person.find({}, {"url": 1}).skip(count)
            request_one_time = random.randint(6, 10)
            count_index = 0
            page_list = []
            total_loop_count = 0
            for doc in cursor:
                count_index += 1
                page_list.append(f"https://{doc['url']}")
                if count_index == request_one_time:
                    import time
                    t1 = time.clock()
                    await get_profile(page_list)
                    t2 = time.clock()
                    print("==========================================================")
                    print(f"总耗时:" + str(t2 - t1))
                    print("==========================================================")
                    import math
                    t_interval = math.floor(t2 - t1)
                    sleep_time = 35 - t_interval
                    if t_interval < 35:
                        await asyncio.sleep(35 - t_interval)
                        print("==========================================================")
                        print(f"辅助休眠时间:" + str(sleep_time))
                        print("==========================================================")

                    count_index = 0
                    page_list = []
                    request_one_time = random.randint(6, 10)

                    if total_loop_count > 30:
                        break
                    else:
                        total_loop_count += 1

        await get_urls_from_mongo()

    # used for parse person list html
    async def rule_2(self):
        async def parse_html():

            from pymongo import MongoClient
            client = MongoClient('192.168.2.28').Face_Book
            lock = Lock()

            async def _scan_html_files():
                base_path = "../html"

                _file_path_list = []
                import os
                with os.scandir(base_path) as it:
                    for entry in it:
                        _file_path_list.append(os.path.join(base_path, entry.name))
                return _file_path_list

            async def _read_files(files_list):
                html_string_list = []
                for _item in files_list:
                    with open(_item, "r", encoding="utf-8") as f:
                        html_string_list.append(f.read())
                print("read_done")
                return html_string_list

            async def _parse_html(files_list):
                person_list = []
                _count = 0
                _ready_for_parse_list = []
                for _html_str in await _read_files(files_list):

                    async def do(_p_html):
                        html = lxml.html.fromstring(_p_html, huge_tree=True)
                        for i in html.xpath("//div/div/div[contains(@class,'item') and @data-sigil]"):
                            href = i.xpath("./a")[0].get("href")
                            try:
                                _profile_url = furl(f'm.facebook.com{href}')
                            except:
                                traceback.print_exc()
                                print(href)
                                break
                            _profile_url.remove(["slog", "seq", "rk", "st", "fbtype", "refid", "tsid", "fref"])
                            person_list.append({
                                "html": lxml.html.tostring(i),
                                "href": href,
                                "url": _profile_url.url,
                                "person_name": i.xpath("./a/div/div[@class='content']/div")[0].text_content()
                            })

                    if _count == 5:
                        await asyncio.wait([do(_) for _ in _ready_for_parse_list])
                        _count = 0
                        _ready_for_parse_list = []

                    _count += 1
                    _ready_for_parse_list.append(_html_str)

                await asyncio.wait([do(_) for _ in _ready_for_parse_list])

                async def save_to_mongo():
                    async with lock:
                        inserted_ids_list = client.person.insert_many(person_list).inserted_ids

                await save_to_mongo()

            import numpy as np
            all_html_files_list = await _scan_html_files()
            split_list = np.array_split(np.array(all_html_files_list), 30)
            for _item_in_list in split_list:
                await asyncio.wait([_parse_html(_) for _ in np.array_split(_item_in_list, 30)])
            client.close()

        await parse_html()

    async def rule_1(self):
        def name_gen():
            with open('1.txt', 'r+') as f:
                return f.read().splitlines()

        def remember_already_name(_name):
            with open('2.txt', 'a+') as f:
                f.write(_name + '\r')

        def get_already_name():
            with open('2.txt', 'r+') as f:
                return f.read().splitlines()

        wait_name_list = name_gen()
        for i in get_already_name():
            for j in wait_name_list:
                if i == j:
                    wait_name_list.remove(j)

        running_name_list = []
        for name in random.sample(wait_name_list, self.name_num_one_time):
            time.sleep(1)
            import re
            _str = f"window.open('https://m.facebook.com/search/peoples/?search=Search&search_source=search_bar&query={name}&=搜索')"
            print(_str)
            self.driver.execute_script(_str)
            time.sleep(random.randint(1, 3))
            running_name_list.append(name)
        handle_list = self.driver.window_handles
        # close home_page
        for i in range(self.name_num_one_time + 1):
            self.driver.switch_to_window(self.driver.window_handles[i])
            if self.driver.current_url == "https://m.facebook.com/":
                self.driver.close()
                break

        time.sleep(2)

        #
        state_list = [{"to_end": False} for i in range(self.name_num_one_time)]
        html_height_list = [0 for i in range(self.name_num_one_time)]
        try_times_list = [0 for i in range(self.name_num_one_time)]

        running_name_list.reverse()

        handle_list = self.driver.window_handles

        while True:
            if all(state["to_end"] for state in state_list):
                break
            for i in range(self.name_num_one_time):
                if state_list[i]["to_end"]:
                    continue
                self.driver.switch_to_window(handle_list[i])
                time.sleep(0.5)
                if len(self.driver.find_elements_by_xpath("//div[@id='objects_container']/div[@id='objects_container']/div[@id='more_objects']")) != 0:
                    if html_height_list[i] == self.driver.execute_script("return document.body.offsetHeight;"):
                        if try_times_list[i] >= 10:
                            state_list[i]["to_end"] = True
                        else:
                            try_times_list[i] += 1
                    else:
                        html_height_list[i] = self.driver.execute_script("return document.body.offsetHeight;")
                    self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")

                else:
                    _html_body = self.driver.find_element_by_xpath("//*").get_attribute("outerHTML")
                    with open(f"./html/{running_name_list[i]}.html", "w+", encoding="utf8") as html_f:
                        html_f.write(_html_body)
                        state_list[i]["to_end"] = True
                        remember_already_name(running_name_list[i])
            time.sleep(random.randint(1, 3))

    def _close_session(self):
        self._session_holder.close_driver()
