# -*- coding: utf-8 -*-
"""
侵删，本代码用于爬取http://www.landchina.com/default.aspx?tabid=263，网站的中国土地交易数据
大部分参考了知乎的代码https://www.zhihu.com/question/36722967/answer/69408380，适用于python3
与源代码的区别是增加了我想下载的城市的代码，改造自知乎源代码，侵删

"""

#import json
import re
import xlwt
import requests
from bs4 import BeautifulSoup
import time
import random
import sys

def write(sheet, data):
#    try:
#        data = json.loads(data)
#    except Exception as ex:
#        print("data error!")
#        print(ex)
#        return
    global colid, row, fields
    data_list = data.items()
    for key, value in data_list:
        sheet.write(row, 0, key)
        col =  0
        for k, v in value.items():
            if k not in fields:
                #print(colid)
                fields[k] = colid
                sheet.write(0, colid, k)
                colid += 1
            sheet.write(row, fields[k], v)
        row += 1

def get_post_data(url, headers):
    # 访问一次网页,获取post需要的信息
    data = {
        'TAB_QuerySubmitSortData': '',
        'TAB_RowButtonActionControl': '',
    }

    try:
        req = requests.get(url, headers=headers)
    except Exception:
        print ('get baseurl failed, try again!')
        sys.exit(1)
    try:
        soup = BeautifulSoup(req.text, "html.parser")
        TAB_QueryConditionItem270 = soup.find(
            'input', id="TAB_QueryConditionItem270").get('value')
        data['TAB_QueryConditionItem270'] = TAB_QueryConditionItem270
        
        data['TAB_QueryConditionItem256'] = '42ad98ae-c46a-40aa-aacc-c0884036eeaf'
        data['TAB_QueryConditionItem212'] = 'ec9f9d83-914e-4c57-8c8d-2c57185e912a'
        data['TAB_QueryConditionItem215'] = '8fd0232c-aff0-45d1-a726-63fc4c3d8ea9'
        data['TAB_QueryConditionItem218'] = '20f50617-f7d0-4d6c-b0df-7c24fcc5eed6'
        data['TAB_QueryConditionItem269'] = '566b2f6d-5ef5-4ccf-8683-53492916fd2f'
        data['TAB_QueryConditionItem284'] = 'df7c2cd2-7afc-4f50-b52c-3c7ac512f5a6'
        TAB_QuerySortItemList = soup.find(
            'input', id="TAB_QuerySort0").get('value')
        data['TAB_QuerySortItemList'] = TAB_QuerySortItemList
        data['TAB_QuerySubmitOrderData'] = TAB_QuerySortItemList
        
        #data['TAB_QuerySubmitConditionData'] = soup.find('input',id="TAB_QuerySubmitConditionData").get('value')
        TAB_queryTblEnumItem_256 = soup.find('input', id="TAB_queryTblEnumItem_256").get('value')
        data['TAB_queryTblEnumItem_256'] = TAB_queryTblEnumItem_256
        
        TAB_queryTblEnumItem_212 = soup.find('input', id="TAB_queryTblEnumItem_212").get('value')
        data['TAB_queryTblEnumItem_212'] = TAB_queryTblEnumItem_212
        
        TAB_queryTblEnumItem_215 = soup.find('input', id="TAB_queryTblEnumItem_215").get('value')
        data['TAB_queryTblEnumItem_215'] = TAB_queryTblEnumItem_215
        
        __EVENTVALIDATION = soup.find(
            'input', id='__EVENTVALIDATION').get('value')
        data['__EVENTVALIDATION'] = __EVENTVALIDATION
        __VIEWSTATE = soup.find('input', id='__VIEWSTATE').get('value')
        data['__VIEWSTATE'] = __VIEWSTATE
    except Exception:
        print('get post data failed, try again!')
        sys.exit(1)

    return data


def get_info(url, headers):
    
    req = requests.get(url, headers=headers)#.content.decode('gb2312','ignore')
    cishu = 1
    
    soup = BeautifulSoup(req.text, "html.parser")
        
    items = soup.find('table', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1")
    
    while items == None :
        print('爬取失败，正在重复第{0}次'.format(cishu))
        req = requests.get(url, headers=headers)
        soup = BeautifulSoup(req.text, "html.parser")
        items = soup.find('table', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1")
        cishu = cishu + 1
        
        
    # 所需信息组成字典
    info = {}

    # 行政区
    division = items.find('span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c2_ctrl").get_text().encode('utf-8')
    info['XingZhengQu'] = str(division,encoding='utf-8')

    # 项目位置

    location = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r16_c2_ctrl").get_text().encode('utf-8')
    info['XiangMuWeiZhi'] = str(location,encoding='utf-8')

    # 面积(公顷)
    square = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r2_c2_ctrl").get_text().encode('utf-8')
    info['MianJi'] = str(square,encoding='utf-8')

    # 土地用途
    purpose = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r3_c2_ctrl").get_text().encode('utf-8')
    info['TuDiYongTu'] = str(purpose,encoding='utf-8')

    # 供地方式
    source = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r3_c4_ctrl").get_text().encode('utf-8')
    info['GongDiFangShi'] = str(source,encoding='utf-8')

    # 成交价格(万元)
    price = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r20_c4_ctrl").get_text().encode('utf-8')
    info['ChengJiaoJiaGe'] = str(price,encoding='utf-8')
    
    #合同签订日期
    signdate = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r14_c4_ctrl").get_text().encode('utf-8')
    info['Qiandingriqi'] = str(signdate,encoding='utf-8')
    
    #土地使用权人
    author = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r9_c2_ctrl").get_text().encode('utf-8')
    info['ShiYongQuanRen'] = str(author,encoding='utf-8')
    # print info
    
    # 用唯一值的电子监管号当key, 所需信息当value的字典
    all_info = {}
    Key_ID = items.find(
        'span', id="mainModuleContainer_1855_1856_ctl00_ctl00_p1_f1_r1_c4_ctrl").get_text().encode('utf-8')
    Key_Id = str(Key_ID,encoding='utf-8')
    all_info[Key_Id] = info
    return all_info


def get_pages(baseurl, headers, post_data, date):#更改post_data能下载不同城市的数据，输入网页城市的编码即可
    print('date', date)
    # 补全post data
    post_data['TAB_QuerySubmitConditionData'] ='9f2c3acd-0256-4da2-a659-6949c4671a2a:2015-1-1~2018-3-24|42ad98ae-c46a-40aa-aacc-c0884036eeaf:320684~海门市|8fd0232c-aff0-45d1-a726-63fc4c3d8ea9:2~招拍挂出让'#|ec9f9d83-914e-4c57-8c8d-2c57185e912a:05~商服用地|8fd0232c-aff0-45d1-a726-63fc4c3d8ea9:2~招拍挂出让'#9f2c3acd-0256-4da2-a659-6949c4671a2a:2015-1-1~2018-3-19"#|ec9f9d83-914e-4c57-8c8d-2c57185e912a:05|8fd0232c-aff0-45d1-a726-63fc4c3d8ea9:2"
     #post_data['TAB_QueryConditionItem'] + ':' + date + "|" + post_data[]42ad98ae-c46a-40aa-aacc-c0884036eeaf:320482~8"#
    #'9f2c3acd-0256-4da2-a659-6949c4671a2a:2015-1-1~2018-3-19|42ad98ae-c46a-40aa-aacc-c0884036eeaf:320482&#9619~金坛市|ec9f9d83-914e-4c57-8c8d-2c57185e912a:05&#9619~商服用地|8fd0232c-aff0-45d1-a726-63fc4c3d8ea9:2&#9619~招拍挂出让'
    
    page = 1 #9
    time.sleep(random.random() * 3)
    post_data['TAB_QuerySubmitPagerData'] = str(page)
    req = requests.post(baseurl, data=post_data, headers=headers)#.content.decode('gbk','ignore')
    soup = BeautifulSoup(req.text, "html.parser")
    index = re.findall(r"([1-9][0-9])页",soup.find_all(align="right")[-1].text)
    print( '总共{0}页'.format(index))
    while page<int(index[0]):
        print ('     page {0}'.format(page))
        
        # 休息一下,防止被网页识别为爬虫机器人
        time.sleep(random.random() * 3)
        post_data['TAB_QuerySubmitPagerData'] = str(page)
        req = requests.post(baseurl, data=post_data, headers=headers)
        soup = BeautifulSoup(req.text, "html.parser")
        index = re.findall(r"([1-9][0-9])页",soup.find_all(align="right")[-1].text)
        items = soup.find('table', id="TAB_contentTable").find_all(
            'tr', onmouseover=True)
        # print items
        for item in items:
            print(item.find('td').get_text())
            link = item.find('a').get('href')
            if link:
                print(item.find('a').text)
                url = 'http://www.landchina.com/' + item.find('a').get('href')
                dt.append(get_info(url, headers))
            else:
                print ('no content, this ten days over')
                return
        page += 1
     
        
        
if __name__ == "__main__":
    # time.time()
    baseurl = 'http://www.landchina.com/default.aspx?tabid=263'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
        'Host': 'www.landchina.com'
    }
    dt=[]
    post_data = (get_post_data(baseurl, headers))
    
    date = '2015-1-1~2018-4-30'#可以更改日期区间
    
    get_pages(baseurl, headers, post_data, date)
    
    colid=1
    row=1
    fields={}
    global fieldid
    workbook = xlwt.Workbook(encoding='utf-8')
    worksheet = workbook.add_sheet("sheet")
    for i in range(len(dt)):
        write(worksheet, dt[i])
    
    #write(worksheet, "{\"2100\":{\"XingZhengQu\":\"haha\",\"TuDiYongtu\":\"haha\"}}")
    workbook.save("test.xls")
    print('写入文件成功!')