#coding=utf-8
import codecs # To support chinese
import urllib

import easyquotation
import pandas as pd
from xml.etree.ElementTree import tostring
import re
import requests
from bs4 import BeautifulSoup
import os
import time
import csv
import lxml
from lxml import etree
import easyquotation
import os,sys,string
class  money163():


    headers = {'User-Agent':'Mozilla/5.0(Windows NT 6.1;WOW64)AppleWebKit/537.36(KHTML,like Gecko) Chrome/53.0.2785.104 Safari/537.3 Core/1.53.1708.400 QQBrowser/9.5.9635.400'}
    #parameter
    #shareCode/year/season:num


    #利润表
    def lrb(self,ts_code):
        lrb = pd.DataFrame(columns=['jlr', 'lxzc', 'sds','ebit'])
        try:
            url = 'http://quotes.money.163.com/f10/lrb_'+ts_code+'.html'
            data = requests.get(url, headers=self.headers)
            selector = etree.HTML(data.text)
            jlr =self.getVal(selector,'//*[@id="scrollTable"]/div[4]/table/tr[41]/td[1]')
            lxzc = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[4]/td[1]')
            sds = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[39]/td[1]')
            ebit=jlr+lxzc+sds
            lrb = lrb.append({'jlr': jlr, 'lxzc': lxzc, 'sds': sds,'ebit':ebit}, ignore_index=True)
        except Exception as err:
            print("Finished with error: ")
            return None
        return lrb


    #股票当前价格，接口调取
    def xq(self,ts_code):
        xq = pd.DataFrame(columns=['total_mv'])
        try:
            # 实时行情
            #quotation = easyquotation.use('sina')  # 新浪 ['sina'] 腾讯 ['tencent', 'qq']
            #newdata = quotation.real(ts_code, prefix=False).get(ts_code)  # 支持直接指定前缀，如 'sh000001'
            #newNow = float(newdata.get('now'))
            #print(newNow)
            url = 'https://api.money.126.net/data/feed/'+ts_code+',money.api'
            data = requests.get(url, headers=self.headers)
            selector = etree.HTML(data.text)
            zgb = self.getVal(selector, '/html/body/div[2]/div[22]/div[2]/p[8]')
            #total_mv=jg*zgb
            print(zgb)
            #xq = xq.append({'total_mv': total_mv}, ignore_index=True)
        except Exception as err:
            print("Finished with error: ",err)
            return None
        return xq


    def group_create (self) :
        try:
            body= { 'name':'144' }
            headers = {
                'Accept':'application/json, text/javascript, */*; q=0.01',
                'Accept-Encoding':'gzip, deflate',
                'Accept-Language':'zh-CN,zh;q=0.9',
                'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
                'Host': 'i.money.163.com',
                'Origin': 'http://i.money.163.com',
                'Referer': 'http://i.money.163.com/hs/setting.html',
                'Cookie': '__bid_n=183bcad89aba13818a4207; ne_analysis_trace_id=1665450501749; _antanalysis_s_id=1665450509314; NTES_SESS=LqcgV8GPUVLOQ1DM7KpeHeei4F_AmscdLTEanEtGw3koiAPuiaN2.Hstis_Iks2bmtWBo1HMNr.ZasFLN_G2Wb7rniT4likZ.KKIdYVYKOEwNwRV1LLmCuw8JhYHi4IhdjjJ5FKc3WZE8XfQBA8l1N2OfICj4IhlAbh5bLm52zYt8afXbDpX8diA2XocWEP_rAZNdHq6mHlTthRjJaoYVWtMnavPjwUP5; S_INFO=1665451453|0|3&80##|m18668071597; P_INFO=m18668071597@163.com|1665451453|0|163|00&99|null&null&null#zhj&330100#10#0#0|186597&1||18668071597@163.com; vjuids=-15386c7067.183c4a49bf2.0.516a8833e07ac; NTES_CMT_USER_INFO=306566444%7C%E6%9C%89%E6%80%81%E5%BA%A6%E7%BD%91%E5%8F%8B0ihtkI%7Chttp%3A%2F%2Fcms-bucket.nosdn.127.net%2F2018%2F08%2F13%2F078ea9f65d954410b62a52ac773875a1.jpeg%7Cfalse%7CbTE4NjY4MDcxNTk3QDE2My5jb20%3D; pver_n_f_l_n3=a; s_n_f_l_n3=7c811a6d015e789a1665484939403; cm_newmsg=user%3Dm18668071597%40163.com%26new%3D37%26total%3D104; pgr_n_f_l_n3=7c811a6d015e789a16655376444162731; vinfo_n_f_l_n3=7c811a6d015e789a.1.6.1665317898081.1665478904734.1665537648325; vjlast=1665451466.1665537649.13',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'}
            url = 'http://i.money.163.com/hs/group/create'
            data = requests.post ( url, headers=headers,data=body )
            print ( pd.DataFrame ( data.json() ) )
        except Exception as err:
            print ( "Finished with error: ", err )
            return None

    def follow (self) :
        try:
            body= { 'groupId':'7432927','code':'0600000' }
            headers = {
                'Accept':'application/json, text/javascript, */*; q=0.01',
                'Accept-Encoding':'gzip, deflate',
                'Accept-Language':'zh-CN,zh;q=0.9',
                'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
                'Host': 'i.money.163.com',
                'Origin': 'http://i.money.163.com',
                'Referer': 'http://i.money.163.com/hs/setting.html',
                'Cookie': '__bid_n=183bcad89aba13818a4207; ne_analysis_trace_id=1665450501749; _antanalysis_s_id=1665450509314; NTES_SESS=LqcgV8GPUVLOQ1DM7KpeHeei4F_AmscdLTEanEtGw3koiAPuiaN2.Hstis_Iks2bmtWBo1HMNr.ZasFLN_G2Wb7rniT4likZ.KKIdYVYKOEwNwRV1LLmCuw8JhYHi4IhdjjJ5FKc3WZE8XfQBA8l1N2OfICj4IhlAbh5bLm52zYt8afXbDpX8diA2XocWEP_rAZNdHq6mHlTthRjJaoYVWtMnavPjwUP5; S_INFO=1665451453|0|3&80##|m18668071597; P_INFO=m18668071597@163.com|1665451453|0|163|00&99|null&null&null#zhj&330100#10#0#0|186597&1||18668071597@163.com; vjuids=-15386c7067.183c4a49bf2.0.516a8833e07ac; NTES_CMT_USER_INFO=306566444%7C%E6%9C%89%E6%80%81%E5%BA%A6%E7%BD%91%E5%8F%8B0ihtkI%7Chttp%3A%2F%2Fcms-bucket.nosdn.127.net%2F2018%2F08%2F13%2F078ea9f65d954410b62a52ac773875a1.jpeg%7Cfalse%7CbTE4NjY4MDcxNTk3QDE2My5jb20%3D; pver_n_f_l_n3=a; s_n_f_l_n3=7c811a6d015e789a1665484939403; cm_newmsg=user%3Dm18668071597%40163.com%26new%3D37%26total%3D104; pgr_n_f_l_n3=7c811a6d015e789a16655376444162731; vinfo_n_f_l_n3=7c811a6d015e789a.1.6.1665317898081.1665478904734.1665537648325; vjlast=1665451466.1665537649.13',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'}
            url = 'http://i.money.163.com/hs/position/follow.json'
            data = requests.post ( url, headers=headers,data=body )
            print ( pd.DataFrame ( data.json() ) )
        except Exception as err:
            print ( "Finished with error: ", err )
            return None



    #资产负债表
    def zcfzb(self,ts_code):
        zcfzb = pd.DataFrame ( columns = [
            'fix_assets',
            'acct_payable',
            'adv_receipts',
            'payroll_payable',
            'taxes_payable',
            'oth_payable',
            'acc_exp',
            'deferred_inc',
            'oth_cur_liab',
            'yspj',
            'yszk',
            'oth_receiv',
            'prepayment',
            'inventories',
            'lt_eqt_invest',
            'invest_real_estate',
            'minority_int',
            'st_borr',
            'lt_borr',
            'non_cur_liab_due_1y',
            'bond_payable',
            'int_payable' ] )
        try :
            url = 'http://quotes.money.163.com/f10/zcfzb_'+ts_code+'.html'
            data = requests.get(url, headers = self.headers)
            selector = etree.HTML(data.text)
            fix_assets =self.getVal(selector,'//*[@id="scrollTable"]/div[4]/table/tr[41]/td[1]')
            acct_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[66]/td[1]')
            adv_receipts = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[67]/td[1]')
            payroll_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[70]/td[1]')
            taxes_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[71]/td[1]')
            oth_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[77]/td[1]')
            acc_exp = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[78]/td[1]')
            deferred_inc = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[86]/td[1]')
            oth_cur_liab = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[89]/td[1]')

            #accounts_receiv
            yspj = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[9]/td[1]')
            yszk = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[10]/td[1]')


            oth_receiv = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[17]/td[1]')
            prepayment = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[11]/td[1]')
            inventories = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[23]/td[1]')
            lt_eqt_invest = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[34]/td[1]')
            invest_real_estate = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[36]/td[1]')
            minority_int = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[114]/td[1]')

            #oth_eqt_tools

            st_borr = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[59]/td[1]')
            lt_borr = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[92]/td[1]')
            non_cur_liab_due_1y = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[88]/td[1]')
            bond_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[93]/td[1]')
            int_payable = self.getVal(selector, '//*[@id="scrollTable"]/div[4]/table/tr[72]/td[1]')

            zcfzb = zcfzb.append({'fix_assets':fix_assets,
                'acct_payable':acct_payable,
                'adv_receipts':adv_receipts,
                'payroll_payable':payroll_payable,
                'taxes_payable':taxes_payable,
                'oth_payable':oth_payable,
                'acc_exp':acc_exp,
                'deferred_inc':deferred_inc,
                'oth_cur_liab':oth_cur_liab,
                'yspj':yspj,
                'yszk':yszk,
                'oth_receiv':oth_receiv,
                'prepayment':prepayment,
                'inventories':inventories,
                'lt_eqt_invest':lt_eqt_invest,
                'invest_real_estate':invest_real_estate,
                'minority_int':minority_int,
                'st_borr':st_borr,
                'lt_borr':lt_borr,
                'non_cur_liab_due_1y':non_cur_liab_due_1y,
                'bond_payable':bond_payable,
                'int_payable':int_payable}, ignore_index=True)
        except Exception as err:
            print ( "Finished with error: " )
            return None
        #lrb = lrb.append({'jlr': jlr, 'lxzc': lxzc, 'sds': sds}, ignore_index=True)
        return zcfzb



    def getVal(self,selector,xpath):
        val = selector.xpath(xpath)[0]
        val = val.text
        val = ''.join(filter(str.isdigit, val))
        val = 0 if val is None or '' == val else val
        val = float(val)
        return  val

    def find_unchinese(self,file):
        pattern = re.compile(r'[\u4e00-\u9fa5]')
        unchinese = re.sub(pattern, "", file)
        return unchinese


    def shareCrawl(self,shareCode,year,season):
         shareCodeStr = str(shareCode)
         yearStr = str(year)
         seasonStr = str(season)
         url = 'http://quotes.money.163.com/trade/lsjysj_'+shareCodeStr+'.html?year='+yearStr+'&season='+seasonStr
         data = requests.get(url,headers=self.headers)
         # print data
         soup = BeautifulSoup(data.text,'lxml')
         # print soup

         table = soup.findAll('table',{'class':'table_bg001'})[0]
         # print table
         rows = table.findAll('tr')
         # print rows

         # for row in rows:
         #     if row.findAll('td') != []:
         #         for cell in row.findAll('td'):
         #             print 'dir:',dir(cell)
         #             print cell
         #     else:
         #         print 'not empty'
         return rows[::-1]
     # shareCrawl(600109,2013,4)
    def writeCSV(self,shareCode,beginYear,endYear):
         shareCodeStr = str(shareCode)
         beginYearStr = str(beginYear)
         endYearStr = str(endYear)
         url = 'http://quotes.money.163.com/trade/lsjysj_' + shareCodeStr + '.html'
         data = requests.get(url, headers=self.headers)
         soup = BeautifulSoup(data.text, 'lxml')
         name = soup.select('h1.name > a')[0].get_text()

         csvFile = open('./data/'+shareCodeStr+'_'+name+'_'+beginYearStr+'-'+endYearStr+'.csv','wb')
         csvFile.write(codecs.BOM_UTF8)
         writer = csv.writer(csvFile)
         writer.writerow(('日期','开盘价','最高价','最低价','收盘价','涨跌额','涨跌幅(%)','成交量(手)','成交金额(万元)','振幅(%)','换手率(%)'))

         try:
             for i in range(beginYear,endYear+1):
                 print( str(i) + 'is going')
                 time.sleep(4)
                 for j in range(1,5):
                     rows = self.shareCrawl(shareCode,i,j)
                     # print 'rows:',rows
                     for row in rows:
                         csvRow = []
                         #To judge if the data exist
                         if row.findAll('td') != []:
                             for cell in row.findAll('td'):
                                 # print dir(cell)
                                 csvRow.append(cell.get_text().replace(',',''))
                         if csvRow != []:
                             writer.writerow(csvRow)
                     time.sleep(3)
                     print (str(i) + '年'+str(j) + '季度is done')
         except:
             print ('---opps!Crawl error,the iteration didn`t run---')
         finally:
             csvFile.close()

#writeCSV(600109,2013,2017)

if __name__ == "__main__":
    money163 = money163()
    money163.follow()



    #money163 =money163()
    #money163.xq('601899')
