import json
import pickle
import re
import time
from urllib.parse import urljoin
import requests
from selenium.webdriver.support import expected_conditions as EC
from config import local_config as config
import logging
from tasks.save_act import create_file

cookie_path = config.ROOT_PATH + '\cookies1.pkl'
create_file()


def scrape_page(chan, condition, lecator):
    '''
    :param condition: 语句判断条件
    :param lecator: 具体定位的语法
    :function:用于确定元素加载了进来
    '''

    try:
        chan.wait.until(condition(lecator))
    except TimeoutError:
        logging.error('error occured while scrape ')


def save_cookie(chan):
    '''
    拿到cookie并保存
    '''
    cookies = get_cookie(chan)
    with open(cookie_path, 'wb') as f:
        pickle.dump(cookies, f)


def get_cookie(fei):
    '''
        拿到cookie并保存
        '''
    fei.browser.get(config.LOGIN_URL)
    fei.browser.implicitly_wait(10)

    # 点击注册/登录按钮
    fei.browser.find_element_by_css_selector('body > div.fg-container > div.header.header-v3.fg-header > div > div.header-btn > div > a').click()

    # 点击账户密码登录
    fei.browser.find_element_by_css_selector('#fb_login_qrcode > ul > li:nth-child(2) > a').click()

    # 模拟登录
    fei.browser.find_element_by_css_selector('#fb_login_phonepwd_tel').send_keys(
        '18780847079')
    fei.browser.find_element_by_css_selector('#fb_login_phonepwd_pwd').send_keys(
        '625superfuns')
    # 点击同意协议
    fei.browser.find_element_by_css_selector('#fb_login_phonepwd > p:nth-child(11) > div > ins').click()
    # 点击登录
    fei.browser.find_element_by_css_selector('#fb_login_phonepwd_btnyes').click()
    # 等待跳转
    fei.wait.until(EC.url_to_be(config.FIRSTPAGE_URL))
    cookies = fei.browser.get_cookies()
    return cookies


def get_data(url):
    session = requests.Session()

    with open(cookie_path, 'rb') as f:
        cookies = pickle.load(f)
        for cookie in cookies:
            session.cookies.set(cookie['name'], cookie['value'])

    try:

        response = session.get(url)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            result = response.json()
            return json.dumps(result, indent=4, ensure_ascii=False)
        logging.error('get invaild status code %s while scraping %s',
                      response.status_code, url)
    except requests.RequestException:
        logging.error('error occured while scrape %s', url)


def get_data_by_cookies(url, cookies):
    session = requests.Session()

    for cookie in cookies:
        session.cookies.set(cookie['name'], cookie['value'])

    try:

        response = session.get(url)
        if response.status_code == 200:
            result = response.json()
            return json.dumps(result, indent=4)
        logging.error('get invaild status code %s while scraping %s',
                      response.status_code, url)
    except requests.RequestException:
        logging.error('error occured while scrape %s', url)


if __name__ == '__main__':
    from tasks.init_selenium import Feigua

    fei = Feigua()
    save_cookie(fei)

