# -*- coding: utf-8 -*-
"""
Spyder 编辑器

这是一个临时脚本文件。
"""
# In[]
import requests
import json
import time
import fake_useragent as fu    #动态的浏览器标识
ua = fu.UserAgent()
header = {'User-Agent':ua.random }
import chardet    #网页编码检测
import pytesseract    #验证码识别模块
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import os
os.chdir(r"D:\Document\doing")

# In[A股]
url_1 = "http://datacenter.eastmoney.com/api/data/get?type=RPTA_WEB_GETHGLIST&sty=ALL&source=WEB&p="
page=str(1)
url_2 = "&ps=50&st=dim_date&sr=-1"
url = url_1+page+url_2
response = requests.get(url)
json_text = json.loads(response.text)
df_date = pd.DataFrame(json_text['result']['data'])

for page in np.arange(2,22,1):
    time.sleep(5)
    url = url_1+str(page)+url_2
    response = requests.get(url)
    json_text = json.loads(response.text)
    df_date = pd.concat([df_date,pd.DataFrame(json_text['result']['data'])])

df_date.to_csv("东方财富A股回购数据.csv")
# In[中小板]
url_1 = "http://datacenter.eastmoney.com/api/data/get?type=RPTA_WEB_GETHGLIST&sty=ALL&source=WEB&p="
page=str(1)
url_2 = "&ps=50&st=dim_date&sr=-1&filter=(market=%221%22)"
url = url_1+page+url_2
response = requests.get(url)
json_text = json.loads(response.text)
df_date = pd.DataFrame(json_text['result']['data'])

page_limit = json_text['result']['pages']+1
for page in np.arange(2,page_limit,1):
    time.sleep(5)
    url = url_1+str(page)+url_2
    response = requests.get(url)
    json_text = json.loads(response.text)
    df_date = pd.concat([df_date,pd.DataFrame(json_text['result']['data'])])

df_date.to_csv("东方财富中小板回购数据.csv")
# In[创业板]
url_1 = "http://datacenter.eastmoney.com/api/data/get?type=RPTA_WEB_GETHGLIST&sty=ALL&source=WEB&p="
page=str(1)
url_2 = "&ps=50&st=dim_date&sr=-1&filter=(market=%222%22)"
url = url_1+page+url_2
response = requests.get(url)
json_text = json.loads(response.text)
df_date = pd.DataFrame(json_text['result']['data'])
page_limit = json_text['result']['pages']+1
for page in np.arange(2,page_limit,1):
    time.sleep(5)
    url = url_1+str(page)+url_2
    response = requests.get(url)
    json_text = json.loads(response.text)
    df_date = pd.concat([df_date,pd.DataFrame(json_text['result']['data'])])

df_date.to_csv("东方财富创业板回购数据.csv")
# In[]
#
len(json_text)
json_text.keys()
json_text['result'].keys()
json_text['result']['pages']    #显示总共有多少页
json_text['result']['count']    #显示总共有多少页显示总共有多少条数据

json_text['result']['data']    #回购数据列表




