import requests
from lxml import etree
import urllib
import re
import os
import shutil   # 文件操作
import zipfile
import threading  # 多线程
import user_agent
import pandas as pd
import xlrd
from sqlalchemy import create_engine
#  最近xlrd更新到了2.0.1版本，只支持.xls文件, 所以用1.2.0

#%%
# 所有的页面
# 创建文件夹，专门下载
try:
    os.mkdir('./savefiles')
except:
    pass

# 设置计数变量
global_f = 0
global_g = 0
fail_folder = set()

#%%
# 建立解压目录
try:
    os.mkdir('./unpacked')
except:
    pass


#%%
# 封装解压函数
def ziprelease(projectname):
    # 拼接压缩文件路径
    i = os.path.join('./savefiles', projectname +'.zip')
    try:
        zip_file_contents = zipfile.ZipFile(i, 'r')    # 第一步，打开压缩包
    except:
        return None
    for file in zip_file_contents.namelist():
        try:
            zip_file_contents.extract(file, './unpacked/%s' % projectname)  # 第二步，解压
            filename = file.encode('cp437').decode('gbk')  # 新文件名
            file = './unpacked/{}/{}'.format(projectname, file)    # 旧文件路径
            filename = './unpacked/{}/{}'.format(projectname, filename)   # 新文件路径
            os.rename(file, filename)  # 替换文件名
        except:
            fail_folder.add(projectname)
            print('解压失败:' + projectname)

            continue
    t2 = threading.Thread(target = write_status, args = ([projectname]))
    t2.setDaemon(False)
    t2.start()

# 封装写状态函数
def write_status(projectname):
    if re.match('.(\w){4}', projectname):
        status = re.match('(\w){4}',projectname).group()
        with open('./unpacked/%s/status.txt'%(projectname), 'w') as f:
            f.write(status)


#%%


for j in range(1, 3):
    url = "http://ecp.sgcc.com.cn/ecp1.0/project_list.jsp?site=global&column_code=014001001&project_type=1&company_id=&status=&project_name=&pageNo=%d"%j
    response = requests.get(url, headers={'User-Agent': user_agent.get_user_agent_pc()})
    res = etree.HTML(response.text).xpath('//tr[@align = "left"]')
    del res[0]
    local_a = 0
    local_b = 0

    # 每一个标书页面开始循环
    for i in res:
        try:
            projectname = i.xpath('./td[@class = "black40"]/a/@title')[0]
            link = i.xpath('./td[@class = "black40"]/a/@onclick')[0]
            num = re.search('showProjectDetail\((.*?),\'(.*?)\'\);', link)  # \(  \) \' 转义字符， （.*？）两个坑
            num = num.group(2)  # 第二个坑
            url2 = "http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/" + num + ".html"
            response2 = requests.get(url2, headers={'User-Agent': user_agent.get_user_agent_pc()})
            project_status = etree.HTML(response2.text).xpath('//tr/td/text()')[1].strip()  # alternative: //td[2]/text()[1]
            projectname = project_status + projectname
            # re.search('(\S+.*?)', t).group(1)  \S 表示除了空白字符外所有字符
            # print(projectname, project_status)
            # 下载地址, 需要拼接
            url3 = etree.HTML(response2.text).xpath('//td[2]/a/@href')[0]
            root = 'http://ecp.sgcc.com.cn'
            downloadurl = root + url3  ## 在Network里clearall, 点网页上点下载连接，会出现一个network
        except:
            # 可能的网络问题失败， 下载一次标书失败
            print('本页面有问题',url2)
            continue


        # 用代码下载文件
        try:
            urllib.request.urlretrieve(url=downloadurl, filename='./savefiles/%s.zip' %projectname)
            # 开启多线程
            t1 = threading.Thread(target = ziprelease, args = ([projectname]))   # target 需要传递函数名
            t1.setDaemon(False)
            t1.start()
            global_f += 1
            local_a +=1
        except:
            print('{}页{}下载失败'.format(j, projectname))
            global_g += 1
            continue

    print('第%d页全部下载成功%d份标书'%(j, local_a))
    # mark一下, 防止下载中止，之前的都白下了
    print('page-%d is done'%j)
    print('截止此刻下载成功%d份标书'%global_f)

#%%

print('共下载成功文件数量',global_f)
print('共下载失败文件数量',global_g)
print('共解压失败标书数量', len(fail_folder))
print(fail_folder)

#%%
# deal with failure
for i in fail_folder:
    path = './unpacked/%s'%i
    for file in os.listdir(path):
        if file.startswith('.') or file.startswith('s'):
            continue
        for j in os.listdir('./unpacked/%s/%s'%(i, file)):
            newname = j.encode('cp437').decode('gbk')
            newpath = './unpacked/{}/{}'.format(i, newname)
            oldpath = './unpacked/{}/{}/{}'.format(i, file, j)
            os.rename(oldpath, newpath)

#%%
# 存储每个标文件夹的名称
listdir = []
for file in os.listdir('./unpacked'):
    if not file.startswith('.'):
        listdir.append(file)

#%%

dataframe = []
for i in listdir:
    i = os.path.join('./unpacked', i)   # 拼接文件路径
    for j in os.listdir(i):
        # 任何包括'数据清单'里任何一个汉字的excel文件
        if re.search('[货物清单].*?.xls', j) and not j.startswith('.'):
            #if not j.startswith('.'):
                # excel = re.search('[货物清单].*?.xls', j).group()
                # j 是文件名，需要找到他的路径
            excel = os.path.join(i, j)  # 文件路径
            # print(excel)
            sheetall = pd.ExcelFile(excel)
            # 读取状态
            with open('%s/status.txt'%i, 'r') as f:
                sta = f.read()
            # each sheet has a name
            for sheetname in sheetall.sheet_names:
            # 一个 sheet就是一个dataframe
                df = sheetall.parse(sheet_name = sheetname, skiprows = 1)   # 读取数据sheet
                df['项目状态'] = sta
                dataframe.append(df)

#%%
# 竖着拼接dataframe
df = pd.concat(dataframe)

# 选择需要的列
col = ['包号', '网省采购申请行号', '项目单位', '需求单位', '项目名称', '工程电压等级', '物资名称', '物资描述','单位', '数量', '交货日期', '交货地点', '备注', '技术规范ID', '项目状态']
df1 = df[col]

#%%
# delete NaN
df1 = df1.dropna(subset = ['包号'])
df1.info()

#%%
# 写入数据库
# 1. MySQL 建立数据库
'''
create database guojiadianwang;
use guojiadianwang; 
alter database guojiadianwang character set utf8mb4;
'''
#%%
import sqlalchemy
import pymysql

engine = create_engine("mysql+pymysql://{}:{}@{}/{}?charset={}"
                       .format('root',
                               'dajipang666',
                               'localhost',
                               'guojiadianwang',
                               'utf8mb4')
                      )
#%%
df1.to_sql('df1', con = engine, if_exists = 'replace', index = 'True' )

