from main_copy import crawl

if __name__ == '__main__':
    # crawl_xlsx('./xlsx/补.xlsx', index=16)

    # pages = [
    #     ['通知公告', 'http://www.wuyang.gov.cn/wuyangdongtai/tongzhigonggao/'],
    #     ['公告公示', 'http://www.wuyang.gov.cn/zhengwugongkai/gonggaogongshi/'],
    #     ['政府文件', 'http://www.wuyang.gov.cn/zhengwugongkai/zhengfuwenjian/'],
    #     ['政策解读', 'http://www.wuyang.gov.cn/zhengwugongkai/zhengcejiedu/'],
    #     ['政府采购', 'http://www.wuyang.gov.cn/zhongdianlingyuxinxigongkai/caizhengzijin/zhengfucaigou/'],
    #     ['发展规划', 'http://www.wuyang.gov.cn/zhengwugongkai/fazhanguihua/'],
    # ]
    # data = format_data('4101482', pages)
    # print(len(data))
    # page_point_upload(data)

    sql_text = 'SELECT * FROM sys_org WHERE `status` = 1 and city_code = 410100 and url is not null'
    crawl(sql_text, index=0)

# 411300
# 411400
# 411500
# 411600
# 411700
# 411900