File size: 9,496 Bytes
1997c01
 
 
 
 
d189e4c
1997c01
edc370b
3b30238
c1b14f2
 
3b30238
 
 
 
c1b14f2
3b30238
c1b14f2
cb65773
c1b14f2
cb65773
c1b14f2
cad05f7
6dffaaa
fc47506
b3ce4c2
5afb200
c1b14f2
 
7260f6e
cb65773
79b6783
28d21d7
fc47506
 
 
79b6783
 
 
28d21d7
79b6783
 
fc47506
 
 
79b6783
28d21d7
79b6783
 
 
 
 
 
 
 
 
28d21d7
cb65773
79b6783
 
 
 
 
 
 
 
 
fc47506
3b30238
 
 
d22b489
 
 
 
 
 
 
d1715e3
83bc4a4
d1715e3
83bc4a4
552f08c
f252c3c
315dd1d
aa33587
 
 
d4d777b
 
4e4e55b
d4d777b
552f08c
e3665b6
552f08c
 
 
 
 
 
 
 
aa33587
be8fbbb
e3665b6
cb65773
7260f6e
 
552f08c
 
 
 
 
 
 
51b1074
552f08c
 
 
 
 
edc370b
1997c01
 
 
 
 
 
 
6866b1f
7260f6e
51b1074
83cd13d
fa181bd
 
ea91bd2
fa181bd
 
0939b73
51b1074
 
 
 
0939b73
d77dbe7
 
a4d5793
d8e3d53
0b6419d
d8e3d53
0b6419d
d8e3d53
c024d74
e517d5e
8a12b0f
 
a4d5793
7c424e1
 
fdcef48
8a12b0f
7c424e1
 
fdcef48
 
7c424e1
 
 
 
98e3569
 
 
 
 
 
8a12b0f
0391643
be8fbbb
10608aa
 
 
 
 
4dcad40
10608aa
 
 
 
0391643
7260f6e
0391643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7260f6e
0391643
 
 
 
 
 
 
 
c23380f
fbce734
1729426
 
 
980735d
1729426
be8fbbb
3ea2200
fbce734
3ea2200
7260f6e
 
980735d
be8fbbb
e688670
157cf08
51b1074
e688670
1729426
f83432c
7a4a991
 
 
be8fbbb
7260f6e
c6a5618
7260f6e
 
98e3569
157cf08
c6a5618
1997c01
 
c6a5618
9c17797
fc47506
0fc8c61
7c424e1
a8e8fec
994e9a3
1997c01
 
83cd13d
19ae57e
1997c01
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import gradio as gr
import pandas as pd
import numpy as np
import json
from io import StringIO
from collections import OrderedDict



import os

# ---------------------- Accessing data from Notion ---------------------- #


from notion_client import Client as client_notion

notionToken = os.getenv('notionToken')
if notionToken is None:
    raise Exception("Notion token not found. Please check the environment variables.")
else:
    print("Notion token found successfully!")

from config import landuseDatabaseId , subdomainAttributesDatabaseId 
from imports_utils import fetch_all_database_pages
from imports_utils import get_property_value
from imports_utils import notion

landuse_attributes  = fetch_all_database_pages(notion, landuseDatabaseId)
livability_attributes  = fetch_all_database_pages(notion, subdomainAttributesDatabaseId)


# fetch the dictionary with landuse - domain pairs 
landuseMapperDict ={}
subdomains_unique = []

for page in landuse_attributes:
    value_landuse = get_property_value(page, "LANDUSE")
    value_subdomain = get_property_value(page, "SUBDOMAIN_LIVEABILITY")
    if value_subdomain and value_landuse:
        landuseMapperDict[value_landuse] = value_subdomain
    if value_subdomain != "":
        subdomains_unique.append(value_subdomain)



# fetch the dictionary with subdomain attribute data
attributeMapperDict ={}
domains_unique = []

for page in livability_attributes:
    subdomain = get_property_value(page, "SUBDOMAIN_UNIQUE")
    sqm_per_employee = get_property_value(page, "SQM PER EMPL")
    thresholds = get_property_value(page, "MANHATTAN THRESHOLD")
    max_points = get_property_value(page, "LIVABILITY MAX POINT")
    domain = get_property_value(page, "DOMAIN")
    if  thresholds:   
        attributeMapperDict[subdomain] = {
        'sqmPerEmpl': sqm_per_employee if sqm_per_employee != "" else 0,
        'thresholds': thresholds,
        'max_points': max_points,
        'domain': [domain if domain != "" else 0]
        }
    if domain != "":
        domains_unique.append(domain)




# ---------------------- Accessing data from Speckle ---------------------- #


from specklepy.api.client import SpeckleClient
from specklepy.api.credentials import get_default_account, get_local_accounts
from specklepy.transports.server import ServerTransport
from specklepy.api import operations
from specklepy.objects.geometry import Polyline, Point
from specklepy.objects import Base

import imports_utils
import speckle_utils
import data_utils

from config import landuseDatabaseId , streamId,  dmBranchName, dmCommitId, luBranchName, luCommitId
from imports_utils import speckleToken
from imports_utils import fetchDistanceMatrices
from config import distanceMatrixActivityNodes
from config import distanceMatrixTransportStops

CLIENT = SpeckleClient(host="https://speckle.xyz/")
account = get_default_account()
CLIENT.authenticate_with_token(token=speckleToken)

streamDistanceMatrices = speckle_utils.getSpeckleStream(streamId,dmBranchName,CLIENT, dmCommitId)
matrices = fetchDistanceMatrices (streamDistanceMatrices)

streamLanduses = speckle_utils.getSpeckleStream(streamId,luBranchName,CLIENT, luCommitId)
streamData = streamLanduses["@Data"]["@{0}"]
df_speckle_lu = speckle_utils.get_dataframe(streamData, return_original_df=False)
df_lu = df_speckle_lu.copy()
# set index column
df_lu =  df_lu.set_index("ids", drop=False)

df_dm = matrices[distanceMatrixActivityNodes]
df_dm_transport = matrices[distanceMatrixTransportStops]
dm_dictionary = df_dm.to_dict('index')
df_dm_transport_dictionary = df_dm_transport.to_dict('index')


# filter activity nodes attributes
mask_connected = df_dm.index.tolist()
lu_columns = []
for name in df_lu.columns:
  if name.startswith("lu+"):
    lu_columns.append(name)
df_lu_filtered = df_lu[lu_columns].loc[mask_connected]
df_lu_filtered.columns = [col.replace('lu+', '') for col in df_lu_filtered.columns]

df_lu_filtered_dict = df_lu_filtered.to_dict('index')




def test(input_json):
    print("Received input")
    # Parse the input JSON string
    try:
        inputs = json.loads(input_json)
    except json.JSONDecodeError:
        inputs = json.loads(input_json.replace("'", '"'))

    
    # ------------------------- Accessing input data from Grasshopper ------------------------- #
    
    #if df_dm is None:
    matrix = inputs['input']["matrix"]
        
    #if df_dm_transport is None:
    matrix_transport = inputs['input']["transportMatrix"]  
    
    if df_lu_filtered is None:
        landuses = inputs['input']["landuse_areas"]    
    else:
        landuses = df_lu_filtered
    
    attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
    landuseMapperDict_gh = inputs['input']["landuseMapperDict"]
    
    alpha = inputs['input']["alpha"]
    alpha = float(alpha)
    threshold = inputs['input']["threshold"]
    threshold = float(threshold)
    
    df_matrix = pd.DataFrame(matrix).T
    df_matrix = df_matrix.round(0).astype(int)

    df_landuses = pd.DataFrame(landuses).T
    df_landuses = df_landuses.round(0).astype(int)


    from imports_utils import splitDictByStrFragmentInColumnName
    
    # List containing the substrings to check against
    tranportModes = ["DRT", "GMT", "HSR"]

    result_dicts = splitDictByStrFragmentInColumnName(df_dm_transport_dictionary, tranportModes)

    # Accessing each dictionary
    art_dict = result_dicts["DRT"]
    gmt_dict = result_dicts["GMT"]

    df_art_matrix = pd.DataFrame(art_dict).T
    df_art_matrix = df_art_matrix.round(0).astype(int)  
    df_gmt_matrix = pd.DataFrame(gmt_dict).T
    df_gmt_matrix = df_art_matrix.round(0).astype(int)     
    

    # create a mask based on the matrix size and ids, crop activity nodes to the mask
    mask_connected = df_dm.index.tolist()

    valid_indexes = [idx for idx in mask_connected if idx in df_landuses.index]
    # Identify and report missing indexes
    missing_indexes = set(mask_connected) - set(valid_indexes)
    if missing_indexes:
        print(f"Error: The following indexes were not found in the DataFrame: {missing_indexes}, length: {len(missing_indexes)}")
    
    # Apply the filtered mask
    df_landuses_filtered = df_landuses.loc[valid_indexes]


    # find a set of unique domains, to which subdomains are aggregated    
    temp = []
    for key, values in attributeMapperDict.items():
      domain = attributeMapperDict[key]['domain']
      for item in domain:
        if ',' in item:
          domain_list = item.split(',')
          attributeMapperDict[key]['domain'] = domain_list
          for domain in domain_list:
            temp.append(domain) 
        else:
          if item != 0: 
              temp.append(item)  
    
    domainsUnique = list(set(temp))


    # find a list of unique subdomains, to which land uses are aggregated
    temp = []    
    for key, values in landuseMapperDict.items():
      subdomain = str(landuseMapperDict[key])
      if subdomain != 0: 
        temp.append(subdomain) 
        
    subdomainsUnique = list(set(temp))
    
    
    from imports_utils import landusesToSubdomains
    from imports_utils import FindWorkplacesNumber
    from imports_utils import computeAccessibility
    from imports_utils import computeAccessibility_pointOfInterest    
    from imports_utils import remap  
    from imports_utils import accessibilityToLivability
    
    LivabilitySubdomainsWeights = landusesToSubdomains(df_dm,df_landuses_filtered,landuseMapperDict,subdomainsUnique)
    
    WorkplacesNumber = FindWorkplacesNumber(df_dm,attributeMapperDict,LivabilitySubdomainsWeights,subdomainsUnique)
    
    # prepare an input weights dataframe for the parameter LivabilitySubdomainsInputs
    LivabilitySubdomainsInputs =pd.concat([LivabilitySubdomainsWeights, WorkplacesNumber], axis=1)
   
    subdomainsAccessibility = computeAccessibility(df_dm,LivabilitySubdomainsInputs,alpha,threshold)   
    artAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'ART',alpha,threshold)
    gmtAccessibility = computeAccessibility_pointOfInterest(df_gmt_matrix,'GMT+HSR',alpha,threshold)
    
    AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
        

    if 'jobs' not in subdomainsAccessibility.columns:
        print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")

    livability = accessibilityToLivability(df_dm,AccessibilityInputs,attributeMapperDict,domainsUnique)
    

    livability_dictionary = livability.to_dict('index')
    LivabilitySubdomainsInputs_dictionary = LivabilitySubdomainsInputs.to_dict('index')
    subdomainsAccessibility_dictionary = AccessibilityInputs.to_dict('index')
    artmatrix = df_art_matrix.to_dict('index')
    
    # Prepare the output
    output = {
        "subdomainsAccessibility_dictionary": subdomainsAccessibility_dictionary,
        "livability_dictionary": livability_dictionary,
        "subdomainsWeights_dictionary": LivabilitySubdomainsInputs_dictionary,
        "luDomainMapper": landuseMapperDict,
        "attributeMapper": attributeMapperDict,
        "fetchDm": dm_dictionary,
        "landuses":df_lu_filtered_dict 
    }


    
    return json.dumps(output)

    # Define the Gradio interface with a single JSON input
iface = gr.Interface(
    fn=test,
    inputs=gr.Textbox(label="Input JSON", lines=20, placeholder="Enter JSON with all parameters here..."),
    outputs=gr.JSON(label="Output JSON"),
    title="testspace"
)

iface.launch()