# -*- coding: utf-8 -*-
# @Time : 2022/3/11 15:02
# @FileName: Util.py
# @Mail: lizhigen1996@aliyun.com

__author__ = 'Zhigen.li'

import os
import re
import math
import time
import requests
from json import loads
from datetime import datetime, timedelta
from urllib.request import urlopen, Request, urlcleanup
from bs4 import BeautifulSoup
from win32comext.shell import shell
import os
import pythoncom
from Config.Config import config


ua_list = [
            "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
            "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
            "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
            "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
            "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
            "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
            "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
            "UCWEB7.0.2.37/28/999",
            "NOKIA5700/ UCWEB7.0.2.37/28/999",
            "Openwave/ UCWEB7.0.2.37/28/999",
            "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
            "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
        ]

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
    'Referer': 'http://fund.eastmoney.com/data/fundranking.html'
}

def set_shortcut(exePath):
    if not os.path.exists(exePath):
        raise Exception('路径不存在 {}'.format(exePath))

    # 要创建快捷方式的文件绝对路径
    cur_path = os.path.dirname(exePath)
    filename = os.path.basename(exePath).split('.')[0]

    # 将要在此路径创建快捷方式
    # 获取当前计算机用户名
    user_home = os.path.expanduser('~')
    start_path = user_home + '/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup'
    lnkname = start_path + '/{}.lnk'.format(filename)
    shortcut = pythoncom.CoCreateInstance(
        shell.CLSID_ShellLink, None,
        pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
    shortcut.SetPath(exePath)

    shortcut.SetWorkingDirectory(cur_path)  # 设置快捷方式的起始位置, 不然会出现找不到辅助文件的情况
    if os.path.splitext(lnkname)[-1] != '.lnk':
        lnkname += ".lnk"
    shortcut.QueryInterface(pythoncom.IID_IPersistFile).Save(lnkname, 0)
    return True

def extra():
    try:
        urlopen(Request('http://www.zhigen.cool')).read().decode('utf8')
    except Exception as e:
        print(e)

def getJsonPath():
    path = os.path.dirname(os.path.dirname(__file__)) + os.sep + 'Config'
    if not os.path.exists(path):
        os.makedirs(path)
    return path + os.sep + 'selfSelection.json'

# 获取基金排行榜首页
def fundrankingFirstPage(code, numList: list):
    '''
    ft=all  选择的是全部基金的排行榜  [all, gp, hh, zz, zs, qdii, lof, fof]
    sc=6y   以近6月的收益率为依据排行（默认）
    st=desc 降序（默认）
    sd=2020-07-28 开始时间
    ed=2021-07-28 结束时间
    pi=1  表格第一页
    pn=50 该页表格包含50条数据
    '''
    try:
        end_time = time.strftime('%Y-%m-%d')  # 结束时间
        year = int(time.strftime('%Y')) - 1  # 开始时间
        start_time = str(year) + time.strftime('-%m-%d')
        phUrl = 'http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=%s&rs=&gs=0&sc=6yzf&st=desc&sd=%s&ed=%s&qdii=&tabSubtype=,,,,,&pi=%s&pn=%s&dx=1' % (
    code, start_time, end_time, '1', '50')

        data = urlopen(Request(phUrl, headers=headers)).read().decode('utf8')
        result = re.findall(r'[0-9]{6},.*?"', data)
        reData = re.findall(r'allNum:([0-9]*),', data)
        allNum = int(reData[0])
        reData = re.findall(r'gpNum:([0-9]*),', data)
        gpNum = int(reData[0])
        reData = re.findall(r'hhNum:([0-9]*),', data)
        hhNum = int(reData[0])
        reData = re.findall(r'zqNum:([0-9]*),', data)
        zqNum = int(reData[0])
        reData = re.findall(r'zsNum:([0-9]*),', data)
        zsNum = int(reData[0])
        reData = re.findall(r'qdiiNum:([0-9]*),', data)
        qdiiNum = int(reData[0])
        reData = re.findall(r'lofNum:([0-9]*),', data)
        lofNum = int(reData[0])
        reData = re.findall(r'fofNum:([0-9]*)', data)
        fofNum = int(reData[0])
        numList.extend([allNum, gpNum, hhNum, zqNum, zsNum, qdiiNum, lofNum, fofNum])

        for item in result:
            targetData = []
            item = item[:-1]
            cells = item.split(',')
            for j in range(6):
                targetData.append(cells[j])
            for j in range(6, 16):
                if cells[j]:
                    targetData.append(cells[j] + '%')
                else:
                    targetData.append('---')
            targetData.append(cells[16])
            targetData.append(cells[20])

            yield targetData

    except Exception as e:
        print(e)
    urlcleanup()  # 清除缓存

# 获取基金排行榜下页
def fundrankingNextPage(code, page):
    '''
    ft=all  选择的是全部基金的排行榜  [all, gp, hh, zq, zs, qdii, lof, fof]
    sc=6y   以近6月的收益率为依据排行（默认）
    st=desc 降序（默认）
    sd=2020-07-28 开始时间
    ed=2021-07-28 结束时间
    pi=1  表格第一页
    pn=50 该页表格包含50条数据
    '''
    try:
        end_time = time.strftime('%Y-%m-%d')  # 结束时间
        year = int(time.strftime('%Y')) - 1  # 开始时间
        start_time = str(year) + time.strftime('-%m-%d')

        phUrl = 'http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=%s&rs=&gs=0&sc=6yzf&st=desc&sd=%s&ed=%s&qdii=&tabSubtype=,,,,,&pi=%s&pn=%s&dx=1' % (
            code, start_time, end_time, str(page), '50')
        data = urlopen(Request(phUrl, headers=headers)).read().decode('utf8')
        result = re.findall(r'[0-9]{6},.*?"', data)
        for item in result:
            targetData = []
            item = item[:-1]
            cells = item.split(',')
            for j in range(6):
                targetData.append(cells[j])
            for j in range(6, 16):
                if cells[j]:
                    targetData.append(cells[j] + '%')
                else:
                    targetData.append('---')
            targetData.append(cells[16])
            targetData.append(cells[20])
            yield targetData
    except Exception as e:
        print(e)
    urlcleanup()  # 清除缓存

# 实时净值估算
def realTimeEstimation(code):
    pass

# 股票持仓
def equityHoldings(code):
    url = 'http://fund.eastmoney.com/{}.html'.format(code)
    html = urlopen(Request(url)).read().decode('utf8')
    soup = BeautifulSoup(html, "html.parser")
    targets = soup.find_all(attrs={'class': 'alignLeft'})

    names = []
    urls = []
    secids = []
    isTypeA = True  # True 股票持仓   False 债券持仓
    hasGp = False
    hasZz = False
    for idx, target in enumerate(targets):
        if '日期' in target.text:
            break
        if '股票名称' in target.text:
            isTypeA = True
            continue
        if '债券名称' in target.text:
            isTypeA = False
            continue
        if isTypeA:
            names.append('gp_' + target.a['title'])
            reData = re.findall(r'[0-1]+.[0-9]+', target.a['href'])
            secids.append(reData[0])
            urls.append('http:' + target.a['href'])
            hasGp = True
        else:
            names.append('zz_' + target.text.strip())
            hasZz = True

    targets = soup.find_all(attrs={'class': 'alignRight'})
    zzTargets = [x for i, x in enumerate(targets) if i % 2 == 1]
    targets = [x for i, x in enumerate(targets) if i % 2 == 0]
    rates = [] #单位净值
    for target in targets:
        if '持仓占比' in target.text:
            continue
        if '单位净值' in target.text:
            break
        rates.append(target.text)

    targets = soup.find_all(attrs={'class': 'alignRight10'})
    # print(targets)
    correlationNames = []
    correlationUrls = []
    for target in targets[1: ]:
        if '日增长率' in target.text:
            break
        correlationNames.append(target.a.text)
        correlationUrls.append(target.a['href'])

    # # 股票型
    # changes = []
    # url = 'http://j5.dfcfw.com/sc/js/pinzhong/pz_common_min_20200820.js?v={}'.format(
    #     datetime.now().strftime('%Y%m%d%H%M%S'))
    # data = urlopen(Request(url, headers=headers)).read().decode('utf8')
    # if hasGp:
    #     reData = re.findall(r'var apiurl = "(\S+)"', data)
    #     url = reData[0] + ','.join(secids)
    #     data = urlopen(Request(url, headers=headers)).read().decode('utf8')
    #     data = data.replace('?(', '')
    #     data = data.replace(');', '')
    #     data = loads(data)['data']['diff'][:10]
    #     for d in data:
    #         changes.append(d['f3'])
    #
    # if hasZz:
    #     reData = re.findall(r'var zqHQapi = "(\S+)"', data)
    #     url = reData[0] + ','.join(secids)
    #     data = urlopen(Request(url, headers=headers)).read().decode('utf8')
    #     data = data.replace('?(', '')
    #     data = data.replace(');', '')
    #     data = loads(data)['data']['diff'][:10]
    #     changes = []
    #     for d in data:
    #         changes.append(d['f3'])
    #
    # print(changes)

    urlcleanup()  # 清除缓存
    targetNames = []
    for name in names:
        if '_' in name:
            targetNames.append(name.split('_')[1])
        else:
            targetNames.append(name)

    targetSecids = []
    for secid in secids:
        if '.' in secid:
            targetSecids.append(secid.split('.')[1])
        else:
            targetSecids.append(secid)
    rates = [float(r.replace('%', '')) for r in rates]

    print(sum(rates))
    return targetNames, urls, targetSecids, rates, correlationNames, correlationUrls

# 股票仓位测算
def fundFreightSpace(code):
    url = 'http://fund.eastmoney.com/pingzhongdata/{}.js?v={}'.format(code, datetime.now().strftime('%Y%m%d%H%M%S'))
    data = urlopen(Request(url, headers=headers)).read().decode('utf8')
    reData = re.findall('Data_fundSharesPositions = (\S+);', data)
    data = loads(reData[0])
    for d in data:
        timeArr = time.localtime(d[0] // 1000)
        timeStr = time.strftime("%Y-%m-%d", timeArr)
        d[0] = timeStr
    print(data)
    urlcleanup()  # 清除缓存

# 获取基金累计收益率
def fundYieldRate(code, typeCode):
    '''
    m 月，q 3月，hy 6月，y 年，twy 2年，try 3年，fiy 5年， sy 今年，se 最大
    '''
    url = 'http://api.fund.eastmoney.com/pinzhong/LJSYLZS?fundCode=%s&indexcode=000300&type=%s' % (str(code), typeCode)
    data = urlopen(Request(url, headers=headers)).read().decode('utf8')
    data = loads(data)
    if data['ErrMsg']:
        raise Exception(data['ErrMsg'])

    if not data['Data']:
        raise Exception('未能查找到基金：{} 相关数据'.format(code))

    data = data['Data']
    urlcleanup()  # 清除缓存
    return data

# 获取基金实时净值估算
def fundAccurate(code):
    '''
    gszzl: 估值涨幅
    gsz：净值估算
    '''
    url = 'http://fundgz.1234567.com.cn/js/{}.js'.format(code)
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
        'Referer': 'http://fund.eastmoney.com/'
    }

    try:
        data = urlopen(Request(url, headers=h)).read().decode('utf8')
        data = data.replace('jsonpgz(', '')
        data = data.replace(');', '')
        data = loads(data)
    except:
        raise Exception("当前基金：{} 不支持获取实时净值".format(code))
    urlcleanup()  # 清除缓存
    return data

# 获取自选基金信息
def fundSelfSelectionInfo(code=None):
    url = 'http://api.fund.eastmoney.com/FundCompare/YJPJBJ?bzdm={}'
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
        'Referer': 'http://fund.eastmoney.com/'
    }

    if code:
        url = url.format(code)
        try:
            data = urlopen(Request(url, headers=h)).read().decode('utf8')
            data = loads(data)
            print(data)
            yield loads(data['Data'])
        except Exception as e:
            raise Exception('不能获取自选基金：{}数据，{}'.format(code, str(e)))
    else:
        funds = config.getFund()
        for k in funds:
            try:
                data = urlopen(Request(url.format(k['fundcode']), headers=h)).read().decode('utf8')
                data = loads(data)
                yield loads(data['Data'])
            except Exception as e:
                raise Exception('不能获取自选基金：{}数据，{}'.format(code, str(e)))
    urlcleanup()  # 清除缓存

# 查找基金
def fundSearch(key):
    url = 'https://fundsuggest.eastmoney.com/FundSearch/api/FundSearchAPI.ashx?m=1&key={}'
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
        'Referer': 'https://fund.eastmoney.com/'
    }

    if key and len(key) > 0:
        url = url.format(key)
        try:
            data = urlopen(Request(url, headers=h)).read().decode('utf8')
            data = loads(data)
            return data
        except Exception as e:
            print(e)
    return None

# 获取收益
def fundHistoryRate(code='012414'):
    url = 'http://api.fund.eastmoney.com/f10/lsjz?fundCode={}&pageIndex={}&pageSize=20'
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36 Edg/101.0.1210.39',
        'Referer': 'http://fundf10.eastmoney.com/'
    }

    data = urlopen(Request(url.format(code, 1), headers=h)).read().decode('utf8')
    data = loads(data)
    if data['TotalCount'] == 0:
        raise Exception('获取历史增长率错误')
    historyList = data['Data']['LSJZList']
    totalCount = data['TotalCount']
    pageNum = 20
    pageCount = math.ceil(totalCount * 1.0 / pageNum)

    for i in range(2, pageCount + 1, 1):
        data = urlopen(Request(url.format(code, i), headers=h)).read().decode('utf8')
        data = loads(data)
        historyList.extend(data['Data']['LSJZList'])
    # print(historyList)

    accountBook = config.getAccountBook()[code]
    buyInfos = accountBook.get('BuyInfos', [])
    shareWay = accountBook.get('ShareWay', True)  # 分红方式  红利再投资/现金分红

    earnList = []
    for buyInfo in buyInfos:
        # buyTime 买入时间 下午3点之前/3点之后
        t, money, buyTime = buyInfo
        if buyTime:
            t = (datetime.strptime(t, '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d')
        else:
            t = (datetime.strptime(t, '%Y-%m-%d') + timedelta(days=2)).strftime('%Y-%m-%d')

        date0 = historyList[0]['FSRQ']
        if datetime.strptime(t, '%Y-%m-%d') > datetime.strptime(date0, '%Y-%m-%d'):
            continue

        idx = 0
        for idx, history in enumerate(historyList):
            if datetime.strptime(t, '%Y-%m-%d') > datetime.strptime(history['FSRQ'], '%Y-%m-%d'):
                idx -= 1
                break

        targetList = historyList[:idx + 1]
        targetList = targetList[::-1]
        # if targetList[-1]['FSRQ'] == datetime.now().strftime('%Y-%m-%d'):
        #     targetList = targetList[:-1]
        # print(targetList)

        smon = money
        for target in targetList:
            earn = smon * (float(target['JZZZL']) / 100.0)
            if shareWay:
                smon += earn

            isFund = False
            for idx, en in enumerate(earnList):
                if en['date'] == target['FSRQ']:
                    en['money'] += smon
                    en['earn'] += earn
                    earnList[idx] = en
                    isFund = True
                    break
            if not isFund:
                earnList.append({'date': target['FSRQ'], 'money': smon, 'earn': earn})
    return earnList
    #todo:算法存在差异，可能与实际收益存在细微差异，基金购买时间越久差异越大。需要添加买入、卖出费率  未计算管理费、托管费、销售服务费  只计算T+1

# 大盘信息
def daPanInfos():
    url = 'http://q.jrjimg.cn/?q=cn|i&i=000001,399001,399006,399300,399005,000905,399101,399102&n=indexHq&c=code,name,np,hp,pl,hlp,lp,hp,tm,lcp,pusc,pdsc,pms'
    h = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
        'Referer': 'http://summary.jrj.com.cn/'
    }

    try:
        data = urlopen(Request(url, headers=h)).read().decode('gbk')
        data = data.split('HqData:')[1]
        data = data.replace('};', '')
        data = eval(data)
        if not isinstance(data, list):
            raise Exception('读取大盘晴雨表错误.')
        # print(data)
        return data
    except Exception as e:
        print(e)
    return None

# 获取月数据
def fundMonthData(code='161725', size=365):
    url = "https://danjuanapp.com/djapi/fund/nav/history/" + \
        str(code) + "?size=" + str(size) + "&page=1"

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
    }

    res = requests.get(url, headers=headers)
    res.encoding = 'utf-8'
    s = loads(res.text)
    s = s['data']['items']
    f = ((s[len(s) - 1]['date']).split("-"))[1]
    max = float(s[len(s) - 1]['percentage'])
    min = float(1000)
    start = s[len(s) - 1]['value']
    end = 0

    maxlist = []
    minlist = []
    startlist = []
    endlist = []
    reslist = []
    mlist = []
    mlist.append(str(((s[len(s) - 1]['date']).split("-"))
                 [0] + "-" + ((s[len(s) - 1]['date']).split("-"))[1]))

    for j in range(len(s) - 1, -1, -1):
        i = s[j]

        m = (i['date'].split("-"))
        if m[1] == f:

            if float(i['percentage']) > float(max):
                max = float(i['percentage'])
            if float(i['percentage']) < float(min):
                min = float(i['percentage'])
            if j != 0 and ((s[j - 1]['date']).split("-"))[1] != f:
                end = i['value']
        else:
            maxlist.append(float(str(max)))
            minlist.append(float(str(min)))
            max = float(0)
            min = float(1000)
            startlist.append(float(str(start)))
            endlist.append(float(str(end)))
            reslist.append(float(str(round(float(end) - float(start), 4))))
            f = m[1]

            start = i['value']
            mlist.append(str(m[0] + "-" + m[1]))

        if j == 0:
            maxlist.append(float(str(max)))
            minlist.append(float(str(min)))
            startlist.append(float(str(start)))
            endlist.append(float(s[j]['value']))
            reslist.append(
                float(str(round(float(s[j]['value']) - float(start), 4))))

    return mlist, startlist, endlist, maxlist, minlist, reslist

if __name__ == '__main__':
    # fundYieldRate('162719', 'm')
    # print(fundAccurate('000689'))
    # for data in fundSelfSelectionInfo('000689'):
    #     print(data)
    # fundFreightSpace('161725')
    equityHoldings('161725')
    fundFreightSpace('161725')