The first is here Part 3 is here University of Helsinki Teaching Materials We will summarize the answers and supplements for Week3-Week4.
Week3
The goal is to find the number of residents living 1.5km around a large shopping center in Helsinki, that is, the population of the trade area. You have to search the net yourself to find out the address of the mall. Introducing geocoding to convert addresses to coordinates. In Japan, CSV address matching service provided by the University of Tokyo > Is famous. There will also be some familiar QGIS operations such as buffering and spatial coupling.
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import requests
import geojson
from shapely.geometry import Polygon, LineString, Point
from pyproj import CRS
import os
#Data read
data = pd.read_table('shopping_centers.txt', sep=';', header=None)
data.index.name = 'id'
data.columns=['name', 'addr']
#Geocoding
geo = geocode(data['addr'], provider='nominatim', user_agent='autogis_xx', timeout=4)
#Data combination
geo = geo.to_crs(CRS.from_epsg(3879))
geodata = geo.join(data)
#Buffering
geodata['buffer']=None
geodata['buffer'] = geodata['geometry'].buffer(distance=1500)
geodata['geometry'] = geodata['buffer']
#Acquisition of population grid data
url = 'https://kartta.hsy.fi/geoserver/wfs'
params = dict(service='WFS',version='2.0.0',request='GetFeature',
typeName='asuminen_ja_maankaytto:Vaestotietoruudukko_2018',outputFormat='json')
r = requests.get(url, params=params)
pop = gpd.GeoDataFrame.from_features(geojson.loads(r.content))
#Coordinate system conversion
pop = pop[['geometry', 'asukkaita']]
pop.crs = CRS.from_epsg(3879).to_wkt()
geodata = geodata.to_crs(pop.crs)
#Spatial coupling
join = gpd.sjoin(geodata, pop, how="inner", op="intersects")
#Calculation of trade area population
grouped = join.groupby('name')
for key, group in grouped:
print('store: ', key,"\n", 'population:', sum(group['asukkaita']))
Find the nearest shopping center from your home and office. Unless you live in Finland, put a suitable Helsinki spot as your starting address. (Importing the library is omitted below.)
#Data reading
home = pd.read_table('activity_locations.txt', sep=';', header=None)
home.index.name='id'
home.columns = ['name', 'addr']
shop = pd.read_table('shopping_centers.txt', sep=';', header=None)
shop.index.name = 'id'
shop.columns=['name', 'addr']
#Geocoding
geo_home = geocode(home['addr'], provider='nominatim', user_agent='autogis_xx', timeout=4)
geo_shop = geocode(shop['addr'], provider='nominatim', user_agent='autogis_xx', timeout=4)
#Find the nearest store
destinations = MultiPoint(list(geo_shop['geometry']))
for home in geo_home['geometry']:
nearest_geoms = nearest_points(home, destinations)
print(nearest_geoms[1])
Week4
Visualize accessibility by combining travel time data with subway network data.
#Data reading
grid = gpd.read_file('data/MetropAccess_YKR_grid_EurefFIN.shp')
data = pd.read_table('data/TravelTimes_to_5944003_Itis.txt', sep=';')
data = data[["pt_r_t", "car_r_t", "from_id", "to_id"]]
#Data join
data_geo = grid.merge(data, left_on='YKR_ID', right_on='from_id')
#Invalid data(-1)Exclusion
import numpy as np
data_geo = data_geo.replace(-1, np.nan)
data_geo = data_geo.dropna()
#Data tiering
import mapclassify
bins = [15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180]
classifier = mapclassify.UserDefined.make(bins = bins)
data_geo['pt_r_t_cl'] = data_geo[['pt_r_t']].apply(classifier)
data_geo['car_r_t_cl'] = data_geo[['car_r_t']].apply(classifier)
#Visualization
fig = plt.figure(figsize=(10,10))
ax1 = fig.add_subplot(1, 2, 1) #Public transportation
data_geo.plot(ax=ax1, column='pt_r_t_cl')
ax1.set_title("Itis-Travel times by PT")
ax2 = fig.add_subplot(1, 2, 2) #Private car
data_geo.plot(ax=ax2, column='car_r_t_cl')
ax2.set_title("Itis-Travel times by Car")
plt.tight_layout()
plt.show()
fig.savefig('itis_accessibility.png')
It will be displayed like this.
Based on the accessibility data obtained in 4-1 we aim to visualize the sphere of influence by finding the nearest shopping mall on each grid.
#Data reading
filepaths = glob.glob('data/TravelTimes*.txt')
for path in filepaths:
data = pd.read_table(path, sep=';')
data = data[['from_id', 'pt_r_t']]
data = data.rename(columns={'from_id':'YKR_ID'})
#Column name'pt_r_t_{store}'change to
newname = path.replace('data/TravelTimes_to_', '')
newname = newname.replace('.txt', '')
newname = re.sub('\d{7}_', '', newname)
data = data.rename(columns={'pt_r_t':'pt_r_t_'+newname})
grid = grid.merge(data) #Data combination
grid = gpd.read_file('data/MetropAccess_YKR_grid_EurefFIN.shp')
#Invalid data(-1)Exclusion
import numpy as np
grid = grid.replace(-1, np.nan)
grid = grid.dropna()
#Shortest distance to the mall on each grid ・ Mall name
grid['min_t'] = None
grid['dominant_service'] = None
columns = ['pt_r_t_Ruoholahti', 'pt_r_t_Myyrmanni','pt_r_t_Itis', 'pt_r_t_Jumbo', 'pt_r_t_IsoOmena', 'pt_r_t_Dixi','pt_r_t_Forum']
mini = lambda row:row[columns].min()
idx = lambda row:row[columns].astype(float).idxmin()
grid['min_t'] = grid.apply(mini, axis=1)
grid['dominant_service'] = grid.apply(idx, axis=1)
It is a solid thing to aggregate the grid data using dissolves and to find the population in the sphere at the intersection. The part that overlaps with 4-2 is omitted.
#4-Take step 2,Create pop data including spheres
#Dissolve and intersect
dissolved = grid.dissolve(by = 'dominant_service')
pop = pop[['geometry', 'asukkaita']] #Limited to the required data
intersection = gpd.overlay(grid, pop, how='intersection')
#Grouping in the sphere to find the sphere population
grouped = intersection.groupby('dominant_service')
for key, group in grouped:
print(key, ':', sum(group['asukkaita']))
Recommended Posts