# -*- coding: utf-8 -*-
import sys
import multiprocessing as mp
import time
from urllib.request import urlopen, urljoin
from bs4 import BeautifulSoup
import re
import requests
import csv
from lxml import etree

base_url = 'http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=all&rs=&gs=0&sc=6yzf&st=desc&sd=2020-10-26&ed=2021-10-26&qdii=&tabSubtype=,,,,,&pi=1&pn=50&dx=1&v=0.6866648917533151'
restricted_crawl = True


# DON'T OVER CRAWL THE WEBSITE OR YOU MAY NEVER VISIT AGAIN


def crawl(url):
    response = None
    try:
        response = urlopen(url)
    except:
        print(sys.exc_info())
    # print(response.read().decode('utf-8'))
    return response.read().decode('utf-8')
       # .decode('gb2312')
    # .decode("utf-8")


def parse(html):
    soup = BeautifulSoup(html, 'lxml')
    # urls = soup.find_all('a', {"href": re.compile('^/.+?/.+?$')})
    # urls = soup.find_all('a', {"href": re.compile('^/.+?/.+?$')})
    urls = soup.find_all('a', {"href": re.compile('^http://fund.eastmoney.com/.+?html$')})
    # title = soup.find('h1').get_text().strip()
    page_urls = set([urljoin(base_url, url['href']) for url in urls])
    # url = soup.find('meta', {'property': "og:url"})['content']
    # return title, page_urls, url
    return page_urls


def get_request(path="jh.txt"):
    f = open(path)
    line = f.readlines()
    print(len(set(line)))
    i = 1
    for url in line:
        new_url = url.replace('\n', '').replace('\r', '')
        payload = {}
        headers = {}
        try:
            response = requests.request("GET", new_url, headers=headers, data=payload, timeout=5)
            print(i, response, url)
        except:
            print(sys.exc_info())

        i = i + 1
        time.sleep(0.2)

def get_rank_handler():
    # rank_handler 中获取基金数据
    str = ""
    with open('rankhandler.txt', encoding='utf-8') as fund_list:
       fund = fund_list.readlines()
       # print(fund[0])
       str = fund[0]
    # it = re.findall('\d{6}', str)
    fun_list = re.split(',', str)
    number_list = []
    fun = ["", "", "", ""]
    # print(list)
    for i in range(194225, len(fun_list)):

        if i % 25 == 0:
            print(i)
            #  print(fun_list[i])
            number = re.split('"', fun_list[i], 1)[1]
            # print(number)
            fun[0] = number
            # number_list.append(number)
            url = "http://fund.eastmoney.com/{0}.html".format(number)
            company = get_company(url)
            fun[1] = company

        elif i % 25 == 1:
            # print(fun_list[i])
            fun[2] = fun_list[i]
            # company = get_company(url)
            # fun.append(company)

        elif i % 25 == 16:
            # print(fun_list[i])
            # date = fun_list[i].replace("-", "")
            fun[3] = fun_list[i]

            print(fun)
            unk= []
            unk.append(fun)
            fund_csv('fund.csv', unk)
            number_list.append(fun)
    # fund_csv('fund.csv', number_list)
    return number_list

def get_company(url):
    # 爬虫爬取公司信息
    resp = crawl(url)

    soup = BeautifulSoup(resp, "lxml")

    company_name = soup.find_all('a', {"href": re.compile('^http://fund.eastmoney.com/company/.+?html$')})
    # #body > div:nth-child(11) > div > div > div.fundDetail-main > div.fundInfoItem > div.infoOfFund > table > tbody > tr:nth-child(2) > td:nth-child(2) > a

    company = company_name[2].get_text()
    return company


def fund_csv(path, fund_list):
    # 存储数据到CSV
    with open(path, 'a', newline="") as f:
        writer = csv.writer(f, quoting=csv.QUOTE_ALL)
        # print(fund_list)
        writer.writerows(fund_list)

def open_csv(path):
    with open(path) as f:
        reader = csv.reader(f)
        # print(list(reader))
        date_list = list(reader)
    return date_list



class Creat_data:
    def __init__(self, path=""):
        self.base_json = {
            "Extranet_Show_Ind": "1",
            "PD_Grp_ECD": "040",
            "isExist": "1",
            "PD_Rsk_Grd_ID": "三盏指示灯（中等风险）",
            "PD_Grp_Nm": "理财产品",
            "ASPD_Anc_Vno": "",
            "PD_Mdlty_Cd": "01",
            "ASPD_StCd": "01",
            "Fnd_Co_Nm": "中国建设银行",
            "BsPD_Nm": "理财产品",
            "PdLn_ECD": "11",
            "MULTI_TENANCY_ID": "CN000",
            "Rs_MtdCd": "不涉及",
            "IvsmPd_Cd": "ZH07042106130D138",
            "ASPD_And_CCBIns_Rel": [],
            "BsPD_ECD": "115",
            "Agnc_Sale_Ind": "自有",
            "ASPD_Nm": "乾元-开鑫享2021-113(高净客户)理财产品",
            "Mkt_Cd": "0009",
            "PdLn_Nm": "投资理财",
            "Ivstr_TpCd": "面向符合相关资质的个人客户销售",
            "ASPD_ID": "10316397",
            "Fnd_Cd_ID": "20211023003015",
            "Fee_Std": "详见说明书/协议",
            "CMtd_Cd": "详见说明书/协议",
            "e_mail": "1377202355@qq.com",
            "mobile": "18868831932",
            "address": "云南省健市黄浦吕街r座4栋6单元82室",
            "company": "凌云科技有限公司",
            "u_id": "210100198312180000",
            "bank_card": "6226635942818210",
            "url": ""
        }
        # self.excel = Excel(path)
        self.data = open_csv('fund.csv')

    def creat_json(self):
        my_list = []
        for i in self.data:
            #
            self.base_json['Mkt_Cd'] = i [0]
            self.base_json['ASPD_ID'] = i[0]
            self.base_json['ASPD_Nm'] = i[2]
            self.base_json['Fnd_Co_Nm'] = i[1]
            self.base_json['Fnd_Cd_ID'] = i[3].replace("/", "") + i[0]
            # print(self.base_json['Fnd_Cd_ID'])
            self.base_json['e_mail'] = i[4]
            self.base_json['mobile'] = i[5]
            self.base_json['address'] = i[6]
            self.base_json['company'] = i[7]
            self.base_json['u_id'] = i[8]
            self.base_json['bank_card'] = i[9]

            res = re.split('/',i[3])

            self.base_json['url'] = "/fund/{0}/{1}/{2}/{3}num".format(res[0],res[1],res[2],i[0])
            print(self.base_json)
            my_list = []
            my_list.append(self.base_json)
            fund_csv('json.csv', my_list)
            #my_list.append(self.base_json)
            # print(my_list)

        return my_list

if __name__ == "__main__":
    # for i in range(26, 101):
    #     url = base_url.format(str(i))

    #    print(url)

    # html = crawl(base_url)
    # time.sleep(3)
    # url_list = list(set(parse(html)))
    #
    # print(url_list)
    # f = open("jh.txt", "a")
    # for url in url_list:
    #     f.write(url + "\n")
    # f.close()

    # get_request()
    # get_rank_handler()
    # get_company("http://fund.eastmoney.com/481010.html")
    cd = Creat_data()
    cd .creat_json()
    # open_csv('fund.csv')
