{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import ee\n",
    "ee.Initialize()\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import threading\n",
    "from concurrent import futures\n",
    "import os\n",
    "from osgeo import gdal\n",
    "from osgeo import osr\n",
    "import time\n",
    "from keras.models import Model\n",
    "from keras.layers import *\n",
    "from keras import optimizers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def summer_img(l8, st_year, ed_year, path, row):\n",
    "    \"\"\"\n",
    "        inputs: imagecollection, start year, edn year, path, and row\n",
    "        outputs: imagecollection\n",
    "    \"\"\"\n",
    "    landsat = l8.filter(ee.Filter.eq('WRS_PATH', int(path)))\\\n",
    "            .filter(ee.Filter.eq('WRS_ROW', int(row)))\\\n",
    "            .filter(ee.Filter.calendarRange(int(st_year),int(ed_year),'year'))\\\n",
    "            .filter(ee.Filter.calendarRange(6,8,'month'))\\\n",
    "            .sort('CLOUD_COVER')\n",
    "    return landsat\n",
    "def generate_grid(xmin, ymin, xmax, ymax, dx, dy):\n",
    "    \"\"\"\n",
    "        inputs: the range of lon and lat, and the interval of lon and lat\n",
    "        outputs: grids\n",
    "    \"\"\"\n",
    "    xx = np.arange(xmin, xmax, dx)\n",
    "    yy = np.arange(ymin, ymax, dy)\n",
    "    rect=[]\n",
    "    for i in yy:\n",
    "        for j in xx:\n",
    "            y1 = i\n",
    "            y2 = i+dx\n",
    "            x1 = j\n",
    "            x2 = j+dy\n",
    "            coords = ee.List([x1, y1, x2, y2])\n",
    "            rect.append(ee.Algorithms.GeometryConstructors.Rectangle(coords))\n",
    "    cells = list(rect)\n",
    "    return ee.FeatureCollection(cells)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def project(path, row):\n",
    "    \"\"\"\n",
    "        inputs: path, row\n",
    "        outputs: None(store masks, images and labels)\n",
    "    \"\"\"\n",
    "    def predict(x):\n",
    "        pre = np.argmin(model.predict(x[np.newaxis,:])[0],axis=-1)\n",
    "        if pre.sum() <= 10:\n",
    "            return 0\n",
    "        else:\n",
    "            return pre\n",
    "    def get_array_from_image(image,grids,LDC = False):\n",
    "\n",
    "        def acquire_img(i):\n",
    "            results = {}\n",
    "            try:\n",
    "                band_arrs =image.sampleRectangle(region=ee.Geometry(ee.Geometry.Polygon(i['geometry']['coordinates'])))\n",
    "                if not LDC:\n",
    "                    band_arr = band_arrs.get('SR_B2')\n",
    "                else:\n",
    "                    band_arr = band_arrs.get('QA_PIXEL')\n",
    "                np_arr = np.array(band_arr.getInfo())[:,:,np.newaxis]\n",
    "                crs_transform = image.clip(ee.Geometry.Polygon(i['geometry']['coordinates'])).getInfo()['bands'][0]['crs_transform']\n",
    "                if LDC==False:\n",
    "                    for j in [3,4,5,6,98,99]:#range(2,8):\n",
    "                        band_arr = band_arrs.get('SR_B'+str(j))\n",
    "                        np_arr = np.concatenate([np_arr,np.array(band_arr.getInfo())[:,:,np.newaxis]],axis=-1)\n",
    "                    band_arr = band_arrs.get('elevation')\n",
    "                    np_arr = np.concatenate([np_arr,np.array(band_arr.getInfo())[:,:,np.newaxis]],axis=-1)\n",
    "                else:\n",
    "                    pass\n",
    "                results[i['id']] = {}\n",
    "                results[i['id']]['geo']=i\n",
    "                results[i['id']]['value']=np_arr\n",
    "                results[i['id']]['dim'] = np_arr.shape\n",
    "            except:\n",
    "                results = {}\n",
    "            return results\n",
    "        results = acquire_img(grids[0])\n",
    "        if results == {}:\n",
    "            return 0\n",
    "        return results\n",
    "    def write_tiff(im,pre):\n",
    "        target = osr.SpatialReference()\n",
    "        target.ImportFromEPSG(4326)\n",
    "        originX = min(im['geo']['geometry']['coordinates'][0])[0]#lat\n",
    "        originY = max(im['geo']['geometry']['coordinates'][0])[1]#lon\n",
    "        rows = im['dim'][0]\n",
    "        cols = im['dim'][1]\n",
    "        c = np.array(im['geo']['geometry']['coordinates'][0])\n",
    "        pixelWidth = (c[:,0].max()-c[:,0].min())/cols#lon\n",
    "        pixelHieght = (c[:,1].min()-c[:,1].max())/rows#lat\n",
    "        transform = (originX, pixelWidth, 0, originY, 0, pixelHieght)\n",
    "        shape = pre.shape\n",
    "        if len(shape) == 3:\n",
    "            dimention = shape[2]\n",
    "            if dimention > 1:\n",
    "                driver=gdal.GetDriverByName('Gtiff')\n",
    "                fn = r'[folder]'+'\\\\im\\\\im%.4f' % originX + '%.4f' % originY + '.tif'\n",
    "                ##################################################\n",
    "                #folder should be given\n",
    "                ##################################################\n",
    "                outRaster = driver.Create(fn,cols,rows,dimention,6)\n",
    "                outRaster.SetGeoTransform(transform)\n",
    "                outRaster.SetProjection(target.ExportToWkt())\n",
    "                for i in range(dimention):\n",
    "                    outband=outRaster.GetRasterBand(i+1)\n",
    "                    outband.WriteArray(pre[:,:,i])\n",
    "            else:\n",
    "                driver=gdal.GetDriverByName('Gtiff')\n",
    "                fn = r'[folder]'+'\\\\mask\\\\mask%.4f' % originX + '%.4f' % originY + '.tif'\n",
    "                ##################################################\n",
    "                #folder should be given\n",
    "                ##################################################\n",
    "                outRaster = driver.Create(fn,cols,rows,1,1)\n",
    "                outRaster.SetGeoTransform(transform)\n",
    "                outRaster.SetProjection(target.ExportToWkt())\n",
    "                outband=outRaster.GetRasterBand(1)\n",
    "                outband.WriteArray(pre[:,:,0])\n",
    "        elif len(shape) == 2:\n",
    "            driver=gdal.GetDriverByName('Gtiff')\n",
    "            fn = r'[folder]'+'\\\\mask\\\\mask%.4f' % originX + '%.4f' % originY + '.tif'\n",
    "            ##################################################\n",
    "            #folder should be given\n",
    "            ##################################################\n",
    "            outRaster = driver.Create(fn,cols,rows,1,1)\n",
    "            outRaster.SetGeoTransform(transform)\n",
    "            outRaster.SetProjection(target.ExportToWkt())\n",
    "            outband=outRaster.GetRasterBand(1)\n",
    "            outband.WriteArray(pre)\n",
    "        else:\n",
    "            pass\n",
    "        return fn\n",
    "\n",
    "\n",
    "    def read_tiff(fn):\n",
    "        \"\"\"\n",
    "            inputs: tiff file path\n",
    "            outpots: data of this image\n",
    "        \"\"\"\n",
    "        dataset = gdal.Open(fn,gdal.GA_ReadOnly)\n",
    "        im_width = dataset.RasterXSize #column number\n",
    "        im_height = dataset.RasterYSize #row number\n",
    "        im_bands = dataset.RasterCount #bands number\n",
    "        im_data = dataset.ReadAsArray(0,0,im_width,im_height)\n",
    "        if len(im_data.shape)==2:\n",
    "            return im_data\n",
    "        else:\n",
    "            return im_data.transpose([1,2,0])\n",
    "    def CNN():\n",
    "        \"\"\"\n",
    "            inputs: None\n",
    "            outpots: CNN model \n",
    "        \"\"\"\n",
    "        inputs = Input((7,7,7))\n",
    "        x = Conv2D(16, (3, 3), activation = 'relu')(inputs)\n",
    "        x = Conv2D(32, (3, 3), activation = 'relu')(x)\n",
    "        x = concatenate([x,inputs[:,2:-2,2:-2,:]])\n",
    "        x = Conv2D(64, (3, 3), activation = 'relu')(x)\n",
    "        x = concatenate([x,inputs[:,3:-3,3:-3,:]])\n",
    "        x = Conv2D(128, (1, 1), activation = 'relu')(x)\n",
    "        outputs = Conv2D(2,(1,1),activation='softmax')(x)\n",
    "        model = Model(inputs=inputs, outputs=outputs)\n",
    "        return model\n",
    "    def noise_correct(mask,image,threshold):\n",
    "        \"\"\"\n",
    "            inputs: 预测掩膜图像，特征影像\n",
    "            outputs: 纠正后的图像\n",
    "        \"\"\"\n",
    "        water_index = np.where(mask[:,:,0]>threshold)\n",
    "        water_pixels = image[water_index[0],water_index[1],:]\n",
    "        mean = water_pixels.mean(axis=0)\n",
    "        std = water_pixels.std(ddof=1,axis=0)\n",
    "        min_r = mean-3*std\n",
    "        max_r = mean+3*std\n",
    "        water_mk = np.zeros(image.shape[:2])\n",
    "        for i,band in enumerate(image.transpose([2,0,1])):\n",
    "            water_mk+=(band>min_r[i]) * (band< max_r[i])\n",
    "        return water_mk[3:-3,3:-3]*(mask[:,:,0]>threshold)\n",
    "    def features(dt):\n",
    "        \"\"\"\n",
    "            inputs: image data\n",
    "            outputs: feature bands of images for noise correction\n",
    "        \"\"\"        \n",
    "        Blue = dt[:,:,1].reshape(dt.shape[:2]+(1,))\n",
    "        Green = dt[:,:,2].reshape(dt.shape[:2]+(1,))\n",
    "        Red = dt[:,:,3].reshape(dt.shape[:2]+(1,))\n",
    "        NIR = dt[:,:,4].reshape(dt.shape[:2]+(1,))\n",
    "        ndvi = ((dt[:,:,4]-dt[:,:,3])/(dt[:,:,4]+dt[:,:,3])).reshape(dt.shape[:2]+(1,))\n",
    "        mndwi = ((dt[:,:,2]-dt[:,:,5])/(dt[:,:,2]+dt[:,:,5])).reshape(dt.shape[:2]+(1,))\n",
    "        dem = dt[:,:,7].reshape(dt.shape[:2]+(1,))\n",
    "        return np.concatenate([Blue,Green,Red,NIR,ndvi,mndwi,dem],axis=-1)\n",
    "    def weather_in_normal(im,mk):\n",
    "        \"\"\"\n",
    "            inputs: image, mask\n",
    "            oututs: Confidence interval left and right thresholds\n",
    "        \"\"\"\n",
    "        m = np.array([i.mean() for i in (im.transpose([2,0,1])*mk)])\n",
    "        s = np.array([i.std(ddof=1) for i in (im.transpose([2,0,1])*mk)])\n",
    "        left = m-3*s\n",
    "        right = m+3*s\n",
    "        return left,right\n",
    "    def write_std_mask(mask,im):\n",
    "        \"\"\"\n",
    "            inputs: mask file after noise correction, im\n",
    "            outputs: None\n",
    "        \"\"\"\n",
    "        target = osr.SpatialReference()\n",
    "        target.ImportFromEPSG(4326)\n",
    "        #tmp_tr = image.getInfo()['bands'][0]['crs_transform']\n",
    "        originX = min(im['geo']['geometry']['coordinates'][0])[0]#lat\n",
    "        originY = max(im['geo']['geometry']['coordinates'][0])[1]#lon\n",
    "        shape = im['value'].shape\n",
    "        rows = shape[0]\n",
    "        cols = shape[1]\n",
    "        pre = np.zeros(shape[:2])\n",
    "        pre[3:-3,3:-3] = mask\n",
    "        c = np.array(im['geo']['geometry']['coordinates'][0])\n",
    "        pixelWidth = (c[:,0].max()-c[:,0].min())/cols#lon\n",
    "        pixelHieght = (c[:,1].min()-c[:,1].max())/rows#lat\n",
    "        #SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))\n",
    "        transform = (originX, pixelWidth, 0, originY, 0, pixelHieght)\n",
    "        driver=gdal.GetDriverByName('Gtiff')\n",
    "        outRaster = driver.Create(r'[folder]'+'\\\\labels\\\\%.4f' % originX + '%.4f' % originY + '.tif',cols,rows,1,1)\n",
    "        ##################################################\n",
    "        #folder should be given\n",
    "        ##################################################\n",
    "        outRaster.SetGeoTransform(transform)\n",
    "        outRaster.SetProjection(target.ExportToWkt())\n",
    "        outband=outRaster.GetRasterBand(1)\n",
    "        outband.WriteArray(pre)\n",
    "    def cloud_free(image):\n",
    "        \"\"\"\n",
    "            inputs: image in ee format\n",
    "            output: free-cloud image added NDWI and LEB bands\n",
    "        \"\"\"\n",
    "        #weights1,weights2,weights3 and weights4 are convolution kernels for Linear enhancement\n",
    "        weights1 = [[0,0,-1,0,0],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [0,0, 2,0,0],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [0,0, -1,0,0]]\n",
    "        weights2 = [[-1,0,0,0,0],\n",
    "                    [0,0, 0,0,0],\n",
    "                    [0,0, 2,0,0],\n",
    "                    [0,0, 0,0,0],\n",
    "                    [0,0,0,0, -1]]\n",
    "        weights3 = [[0,0,0,0,-1],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [0,0, 2,0,0],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [-1,0, 0,0,0]]\n",
    "        weights4 = [[0,0,0,0,0],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [-1,0, 2,0,-1],\n",
    "                   [0,0, 0,0,0],\n",
    "                   [0,0, 0,0,0]]\n",
    "        cloud = image.select('QA_PIXEL').rightShift(1).bitwiseAnd(15)#0b1111)\n",
    "        image = image.select('SR_B.').multiply(0.0000275).add(-0.2).addBands(image.select('QA_PIXEL').rightShift(7).bitwiseAnd(1))\n",
    "        image_freecloud = image.updateMask(cloud.expression('b==0',{'b':cloud}))\n",
    "        ndwi = image_freecloud.normalizedDifference(['SR_B3','SR_B6'])\n",
    "        smooth1 = ndwi.convolve(ee.Kernel.fixed(weights=ee.List(weights1)))\n",
    "        smooth2 = ndwi.convolve(ee.Kernel.fixed(weights=ee.List(weights2)))\n",
    "        smooth3 = ndwi.convolve(ee.Kernel.fixed(weights=ee.List(weights3)))\n",
    "        smooth4 = ndwi.convolve(ee.Kernel.fixed(weights=ee.List(weights4)))\n",
    "        smooth = ee.Image([smooth1,smooth2,smooth3,smooth4]).reduce(ee.Reducer.max()).rename(['SR_B99'])\n",
    "        out_y = image_freecloud.addBands(ndwi.rename(['SR_B98'])).addBands(smooth)\n",
    "        return out_y\n",
    "    def write_grid_image(grid):\n",
    "        mask = get_array_from_image(water,[grid],LDC=True)\n",
    "        im = get_array_from_image(image,[grid])\n",
    "        if im:\n",
    "            tmp_mk= mask.popitem()\n",
    "            tmp_im = im.popitem()\n",
    "            if (tmp_mk[1]['value'] > 0).sum()>100:\n",
    "                with open(r'[filename]','a+') as f:\n",
    "                    ##################################################\n",
    "                    #log filename should be given\n",
    "                    ##################################################\n",
    "                    f.write(str(path)+' '+str(row)+' '+grid['id']+' start!\\n')\n",
    "                mask_file = write_tiff(tmp_mk[1],(tmp_mk[1]['value']>0)*255)\n",
    "                im_file = write_tiff(tmp_im[1],tmp_im[1]['value'])\n",
    "                \n",
    "                #noise correction\n",
    "                dt = tmp_im[1]['value']\n",
    "                dt = features(dt)\n",
    "                mk = tmp_mk[1]['value'][:,:,0]\n",
    "                t = (mk[3:-3,3:-3]==1).copy()*1\n",
    "                water_pixel_num = (t==1).sum()\n",
    "                threshold = 0.5\n",
    "                model = CNN()\n",
    "                model.compile(optimizer=optimizers.Adam(lr=1e-5), loss='categorical_crossentropy',metrics=['acc'])\n",
    "                left,right = weather_in_normal(dt,mk)\n",
    "\n",
    "                for times in range(3):\n",
    "                    water_pixel_num = (t==1).sum()\n",
    "                    loc_x = np.where(t!=1)[0]\n",
    "                    loc_y = np.where(t!=1)[1]\n",
    "                    if water_pixel_num < ((t.shape[0]*t.shape[1])/2):\n",
    "                        loc_length = len(loc_x)\n",
    "                        loc_choice = np.random.choice(np.arange(loc_length),water_pixel_num,replace=False)\n",
    "                        loc_x = loc_x[loc_choice]\n",
    "                        loc_y = loc_y[loc_choice]\n",
    "                    t[loc_x,loc_y]=2\n",
    "                    nums = (t!=0).sum()\n",
    "                    im_train = np.zeros((nums, 7,7,7))\n",
    "                    mk_train = np.zeros((nums,1,1,2))\n",
    "                    mk_copy = np.zeros(mk.shape)\n",
    "                    mk_copy[3:-3,3:-3]=t.copy()\n",
    "                    count=0\n",
    "                    for idx,loc_idx in enumerate(zip(np.where(t!=0)[0],np.where(t!=0)[1])):\n",
    "                        im_train[idx] = dt[loc_idx[0]:loc_idx[0]+7,loc_idx[1]:loc_idx[1]+7]\n",
    "                        tmp = mk_copy[loc_idx[0]+3,loc_idx[1]+3]*(im_train[idx,3,3,-1].__lt__(0.35))\n",
    "                        #\n",
    "                        if tmp == 1:\n",
    "                            mk_train[idx,:,:,0] = 1\n",
    "                        elif tmp == 2:\n",
    "                            mk_train[idx,:,:,1] = 1\n",
    "                    model = CNN()\n",
    "                    model.compile(optimizer=optimizers.Adam(lr=1e-5), loss='categorical_crossentropy',metrics=['acc'])\n",
    "                    if times == 0:\n",
    "                        epochs = 30\n",
    "                        batch = 16\n",
    "                    else:\n",
    "                        epochs = 5\n",
    "                        batch = 16\n",
    "                    model.fit(im_train, mk_train, batch_size=batch,epochs = epochs,shuffle = True)#32 30\n",
    "                    p = model.predict(dt[np.newaxis,:])[0]\n",
    "                    if times == 10:\n",
    "                        t = (p[:,:,0]>p[:,:,1])*1\n",
    "                    else:\n",
    "                        t = (noise_correct(p,dt,0.95)>5)*1\n",
    "                write_std_mask((p[:,:,0]>p[:,:,1])*255,tmp_mk[1])\n",
    "            elif (tmp_mk[1]['value'] > 0).sum()>0:\n",
    "                with open(r'D:\\selenge_dem1\\jl.txt','a+') as f:\n",
    "                    f.write(str(path)+' '+str(row)+' '+grid['id']+' pass\\n')\n",
    "                pass\n",
    "\n",
    "        else:\n",
    "            pass\n",
    "    with open(r'[filename]','a+') as f:\n",
    "        ##################################################\n",
    "        #log filename should be given\n",
    "        ##################################################\n",
    "        f.write(str(path)+' '+str(row)+' start！\\n')\n",
    "\n",
    "    imagec = summer_img(l8, st_year, ed_year, path, row)\n",
    "    geo = imagec.first().geometry()\n",
    "    DEM_0 = ee.Image('CGIAR/SRTM90_V4').reproject(crs = 'EPSG:4326',scale=30)\n",
    "    DEM = DEM_0.divide(4000)\n",
    "    coor = np.array(geo.getInfo()['coordinates'][0])\n",
    "    dx = 0.135\n",
    "    dy = 0.135\n",
    "    xmin = coor[:,0].min()\n",
    "    xmax = coor[:,0].max()\n",
    "    ymin = coor[:,1].min()\n",
    "    ymax = coor[:,1].max()\n",
    "    grid = generate_grid(xmin, ymin, xmax, ymax, dx, dy)\n",
    "    grid = grid.filterBounds(geo)\n",
    "    grids = grid.getInfo()['features']\n",
    "    image_freecloud = imagec.map(cloud_free).median().reproject(crs = 'EPSG:4326',scale=30)\n",
    "    image = image_freecloud.select('SR_B.*').addBands(DEM.clip(geo)).reproject(crs = 'EPSG:4326',scale=30)\n",
    "    image = image.setDefaultProjection(image.projection())\n",
    "    water = image_freecloud.select('QA_PIXEL')\n",
    "\n",
    "    st = time.time()\n",
    "    times_num = 100\n",
    "    interval = int(len(grids)/times_num) \n",
    "    if len(grids)//times_num:\n",
    "        times_num+=1\n",
    "    for times in range(times_num):   \n",
    "        i0 = int(interval*times)\n",
    "        i1 = int(min(interval*(times+1),len(grids)))\n",
    "        grid_threads = []\n",
    "        for idx,i in enumerate(grids[i0:i1]):\n",
    "            grid_t = threading.Thread(target=write_grid_image, args=[i])\n",
    "            grid_t.start()\n",
    "            grid_threads.append(grid_t)\n",
    "        for grid_thread in grid_threads:\n",
    "            grid_thread.join()\n",
    "        print('%.2f' % (times/times_num*100)+'%')\n",
    "    long = time.time()-st\n",
    "    hour = int(long/3600)\n",
    "    minute = int((long-3600*hour)/60)\n",
    "    second = long-60*int(long/60)\n",
    "    print(str(hour)+':'+str(minute)+':'+str(second))\n",
    "    with open(r'[filename]','a+') as f:\n",
    "        ##################################################\n",
    "        #log filename should be given\n",
    "        ##################################################\n",
    "        f.write(str(path)+' '+str(row)+' write done！ Time consuming: %.4f\\n' % long)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "path_row = pd.read_csv(r'[pathrow]')\n",
    "path_row = path_row[(path_row['PATH']==131) | (path_row['PATH']==135)]\n",
    "##################################################\n",
    "# we use a file to store path and row, \n",
    "# and images of path==131 and path==135 \n",
    "# is our training samples\n",
    "# you can modify the path row by yourself\n",
    "##################################################\n",
    "path = np.array(path_row['PATH'])\n",
    "row = np.array(path_row['ROW'])\n",
    "l8 = ee.ImageCollection('LANDSAT/LC08/C02/T1_L2')\n",
    "st_year = 2020\n",
    "ed_year = 2020"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "project_threads = []\n",
    "for i,j in zip(path[4:8],row[4:8]):\n",
    "    thread_ = threading.Thread(target=project, args=[i,j,])\n",
    "    thread_.start()\n",
    "    project_threads.append(thread_)\n",
    "for thread_t in project_threads:\n",
    "    thread_t.join()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
