#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import re

import requests
from bs4 import BeautifulSoup
from openpyxl import Workbook
from requests import Session

FAKE_HEADER = {
    'User-Agent': 'xxx',
    'Cookie': 'xxx',

}
YEAR_PATTERN = re.compile(r'Total Compensation for Fiscal Year Ending in (\d{4}):')  # 正则表达式，匹配detail页中的年份
ENABLE_PROXY = False  # 是否启用代理服务器


def block_check(resp):
    if resp.status_code != 200 and 'Sorry, you have been blocked' in resp.text:
        print("blocked")
        exit(-1)


def get_proxies():
    """
    获取代理服务器
    :return:
    """
    return {
        'https': '127.0.0.1:1081',
        'http': '127.0.0.1:1081',
        # 'https': 'localhost:8080',
    } if ENABLE_PROXY else None


def get_details(session, ticker):
    """
    获取某个公司的详细信息，包括年份和其他八个信息
    :param ticker: 公司缩写
    :return: 列表返回，顺序固定
    """
    url = 'https://aflcio.org/paywatch/{}?_wrapper_format=drupal_modal'.format(ticker)
    resp = session.post(url, proxies=get_proxies(), verify=False)
    block_check(resp)
    html_doc = resp.json()[-1]['data']  # 获取html格式字符串
    m = YEAR_PATTERN.search(html_doc)  # 正则匹配
    year = m.group(1)  # 返回圆括号的内容
    soup = BeautifulSoup(html_doc, 'html.parser')  # bs4解析
    td_list = soup.find_all('td')  # 找出所有td标签，其实就是数据的单元格
    salary = td_list[0].get_text().strip()  # 按顺序获取，去掉前后空格
    bonus = td_list[1].get_text().strip()
    vsa = td_list[2].get_text().strip()
    voa = td_list[3].get_text().strip()
    neipc = td_list[4].get_text().strip()
    cpvdc = td_list[5].get_text().strip()
    aoc = td_list[6].get_text().strip()
    total = td_list[7].get_text().strip()
    return [year, salary, bonus, vsa, voa, neipc, cpvdc, aoc, total]  # 列表返回


def russell3000_one_page(session: Session, page):
    """
    获取第n页的数据
    :param page:
    :return:
    """
    url = 'https://aflcio.org/views/ajax?_wrapper_format=drupal_ajax'
    data = {
        'view_name': 'paywatch',
        'view_display_id': 'pay_ratio',
        'sp500': 0,
        'page': page - 1,
    }  # 构造post请求
    resp = session.post(url, data=data, proxies=get_proxies(), verify=False)  # 发起http请求
    block_check(resp)
    html_doc = resp.json()[-1]['data']  # 获取请求中的html格式字符串
    soup = BeautifulSoup(html_doc, 'html.parser')  # bs4解析
    tr_list = soup.find_all('tr')[1:]  # ignore header
    page_result_list = []
    for tr in tr_list:
        td_list = tr.find_all('td')  # 从tr中找td
        ticker = td_list[0].get_text().strip()  # 每个td获取内容，去掉前后空白字符
        company = td_list[1].get_text().strip()
        median_work_pay = td_list[2].get_text().strip()
        pay_ratio = td_list[3].get_text().strip()
        detail = get_details(session, ticker)  # 根据缩写，获取详细信息
        one_company = [ticker, company, median_work_pay, pay_ratio] + detail  # 一个公司，返回一个list
        print(one_company)  # 打印进展
        page_result_list.append(one_company)  # 该分页的公司存成list of list

    return page_result_list


def write_xlsx(result_list):
    """
    写入csv文件
    :param result_list:
    :param out:
    :return:
    """
    headers = ['Ticker', 'Company', 'Median Worker Pay', 'Pay Ratio', 'FS_Year', 'Salary', 'Bonus',
               'Value of Stock Awards', 'Value of Option Awards', 'Non-Equity Incentive Plan Compensation',
               'Change in Pension Value and Deferred Compensation Earnings', 'All Other Compensation', 'Total']
    result_list = [headers] + result_list  # 所有行写成list
    wb = Workbook()
    ws1 = wb.active
    for row in range(len(result_list)):
        for col in range(len(headers)):
            ws1.cell(column=col + 1, row=row + 1, value="{0}".format(result_list[row][col]))
    wb.save(filename='aflcio.xlsx')


if __name__ == '__main__':

    url = 'https://aflcio.org/executive-paywatch/company-pay-ratios'
    session = requests.session()
    session.headers.update(FAKE_HEADER)
    session.get(url)
    first_page, last_page = 1, 112  # 手动指定开始和结束页
    # first_page, last_page = 1, 1  # 手动指定开始和结束页
    result_list = []  # 把110页*20都写入同一list， 110*20也不等于3000呀？

    for p in range(first_page, last_page + 1, 1):  # 循环[0, 110)
        print('processing pages: {}/{}'.format(p, last_page))
        one_page_result = russell3000_one_page(session, p)
        result_list.extend(one_page_result)

    print("Total: " + str(len(result_list)))
    write_xlsx(result_list)
