import requests
import bs4
import csv
import pandas as pd
import time
import random as r
import os

gameid = input("gameid=")
Cookies={"_session_id":"11f51789c3016bab2d8863405d5dc2be"}
headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'
} 
url = 'http://www.ibizsim.cn/games/public_report?gameid='+gameid
res = requests.get(url,headers=headers,cookies=Cookies)
bs = bs4.BeautifulSoup(res.text, 'html.parser')
bs = bs.find('ul', class_="b_pagination")
url_list=[]
URL=[]

for i in bs.find_all("a"):#找到日期的参数，并且构建
      n=str(i).find(">")
      z=int(str(i)[n+1:-4])
      if z>=9:
            new_url=url+str(i["href"])[i["href"].find("&"):]
            url_list.append(new_url)
            
for url_part in url_list:#url_list为不同日期，加入报表
      a=[]
      for i in ["","public_report_marketshare","public_report_projectorder","public_report_primary","public_report_income","public_report_cost","public_report_profit","public_report_tax","public_report_dividens","public_report_networth","public_report_score"]:
            if i=="":
                  url=url_part
            else:
                  url = url_part+"&render="+str(i)
            a.append(url)
      URL.append(a)#【【第一期】【第二期】【第三期】】

def csv_write_three(url,name):
      global headers
      global Cookies
      res = requests.get(url,headers=headers,cookies=Cookies)
      #print(res.text)
      bs = bs4.BeautifulSoup(res.text, 'html.parser')
      bs = bs.find('table', class_="table table-bordered")
      bs.find_all("a")
      count=1
      for i in bs.find_all("a"):
            data=i["href"]
            if count==1:
                  position=data.find("[")
                  b,a=eval(data[position:-1])
                  #用b,a来接<公司名>与<数据>
                  dataframe = pd.DataFrame({'公司':a,str(count):b})
                  #字典中的key值即为csv中列名，https://blog.csdn.net/weixin_43245453/article/details/90054820
                  count+=1
            else:
                  start_position=data.find("[")
                  end_position=data.find("]")+1
                  a=eval(data[start_position:end_position])
                  #print(a)
                  dataframe[str(count)]=a
                  #增加一行，https://www.cnblogs.com/wodexk/p/10316793.html
                  count+=1            
      dataframe.to_csv(str(name)+".csv",sep=',',index=False)

def csv_write_else(url,name):
      global headers
      global Cookies
      res = requests.get(url,headers=headers,cookies=Cookies)
      bs = bs4.BeautifulSoup(res.text, 'html.parser')
      bs = bs.find('table', class_="table table-bordered")
      bs.find_all("a")
      count=1
      for i in bs.find_all("a"):
            data=i["href"]
            start_position=data.find("(")+1
            position=data.find(",")
            position_next=data.find(",",position+1)
            a=data[start_position:position]
            b,c=eval(data[position_next+1:-1])#a为项目名，b为数据，c为公司名
            d = [float('{:.2f}'.format(i)) for i in b]#格式化数据
            b=d
            if count==1:
                  dataframe = pd.DataFrame({'公司':c[:len(b)],str(a):b})
                  #dataframe = pd.DataFrame({'公司':c,str(a):b})
                  count+=1
            else:
                  dataframe[str(a)]=b
      dataframe.to_csv(str(name)+".csv",sep=',',index=False)

def csv_write_only(url,name):
      global headers
      global Cookies
      res = requests.get(url,headers=headers,cookies=Cookies)
      bs = bs4.BeautifulSoup(res.text, 'html.parser')
      bs = bs.find('table', class_="table table-bordered")
      a=bs.find_all("th")
      a=str(a)
      a=a.replace("<th>","")
      a=a.replace("</th>","")
      a=(a[1:-1]).split(",")

      num=1
      lst=[]
      trs = bs.find_all('tr')[1:]
      for tr in trs:
            for i in tr:
                  i=str(i)
                  if i != "\n":
                        start_position=i.find(">")+1
                        end_position=i.find("<",int(start_position))
                        data=i[start_position:end_position]
                        lst.append(data)
      #print(lst)
      n=0
      data=[]
      ls=[]
      for i in lst:
            if n<12:
                  ls.append(i)
                  n+=1
            else:
                  n-=11
                  data.append(ls)
                  ls=[]
                  ls.append(i)
      else:
            data.append(ls)
      #print(data)

      dataframe = pd.DataFrame(columns= a,index=[0])
      # 插入一行，如果需要插入多行，加个for循环即可
      for singlelist in data:
            #print(singlelist)
            indexsize = 0
            dataframe.loc[indexsize] =singlelist
            dataframe.index = dataframe.index + 1
            #print(dataframe)
      dataframe.to_csv(str(name)+".csv",sep=',',index=False)

n,m=0,1
dir_name = gameid
if not os.path.exists(dir_name):
    os.mkdir(dir_name)
    os.chdir(dir_name)
else:
    os.chdir(dir_name)
for i in URL:
      n+=1
      m=1
      for j in i:
            time.sleep(r.random())
            if m<10:
                  name=str(n)+"0"+str(m)
            else:
                  name=str(n)+str(m)
            m+=1

            if "render" not in j:
                  print(j)
                  csv_write_three(j,name)
            elif "public_report_marketshare" in j:
                  csv_write_three(j,name)
            elif "public_report_primary" in j:
                  csv_write_three(j,name)
            elif "public_report_projectorder" in j:
                  csv_write_only(j,name)
            else:
                  print(j)
                  csv_write_else(j,name)
print("ok\nok\nok\nok\nok\nok\nok\nok\nok\nok\nok\nok\nok")
