IBM Watson Studio Connect to IBM DB2 for ArcGIS Database

import pandas as pd # library for data analsysis
import ibm_db
from opencage.geocoder import OpenCageGeocode
key = '<key>'
geocoder = OpenCageGeocode(key)
query = 'Jakarta, Indonesia'
results = geocoder.geocode(query)
nIn = 0
nf = 0
  • Sintax dari Watson Studio on Cloud
import types
import pandas as pd
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share the notebook.
client_d5ced05bc5a549ff90097ff09dfed91c = ibm_boto3.client(service_name='s3',
ibm_api_key_id='<ibm_api_key_id>',
ibm_auth_endpoint="https://iam.cloud.ibm.com/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='<endpoint_url>')
body = client_d5ced05bc5a549ff90097ff09dfed91c.get_object(Bucket='project1-donotdelete-pr-itsbl4harfajoy',Key='StandarKelurahanDataCorona_28MEI2020Pukul09.00.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_cases = pd.read_csv(body)
df_cases.head()
  • Sintax dari Watson Studio Desktop
# Read in the data Covid-19 cases per district (28 May,2020)
df_cases = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_cases
FID_cases = []for index, row in df_cases.iterrows():
nf = nf + 1
cases = nf
FID_cases.append(cases)
print(FID_cases)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267]
df_cases.insert(0,"FID",FID_cases)
df_cases
# Get latitude and longitude of all districts
list_lat = [] # create empty lists for latitude
list_long = [] # create empty lists for longitude
for index, row in df_cases.iterrows(): # iterate over rows in dataframe
District = row['nama_kelurahan']
query = str(District)+', Jakarta'
results = geocoder.geocode(query)
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
list_lat.append(lat)
list_long.append(long)
# create new columns from lists
df_cases ['Latitude'] = list_lat
df_cases ['Longitude'] = list_long
df_cases
print(nIn)
print(nf)
0
267
conn=ibm_db.connect("DATABASE=<DATABASE>;HOSTNAME=<HOSTNAME>; PORT=<port>;PROTOCOL=TCPIP;UID=<UID>;PWD=<Password>",'','')
connState = ibm_db.active(conn)
print(connState)
True
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.COVIDMAY VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_cases.to_dict()
#print(len(dict_of_df['FID']))
#print(type(dict_of_df['FID'][key]))
for key in range(len(dict_of_df['FID'])):
ibm_db.bind_param(stmt, 1, dict_of_df['FID'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['Tanggal'][key])
ibm_db.bind_param(stmt, 3, str(dict_of_df['ID_KEL'][key]))
ibm_db.bind_param(stmt, 4, dict_of_df['Nama_provinsi'][key])
ibm_db.bind_param(stmt, 5, dict_of_df['nama_kota'][key])
ibm_db.bind_param(stmt, 6, dict_of_df['nama_kecamatan'][key])
ibm_db.bind_param(stmt, 7, dict_of_df['nama_kelurahan'][key])
ibm_db.bind_param(stmt, 8, dict_of_df['ODP'][key])
ibm_db.bind_param(stmt, 9, dict_of_df['Proses_Pemantauan'][key])
ibm_db.bind_param(stmt, 10, dict_of_df['Selesai_Pemantauan'][key])
ibm_db.bind_param(stmt, 11, dict_of_df['PDP'][key])
ibm_db.bind_param(stmt, 12, dict_of_df['Masih_Dirawat'][key])
ibm_db.bind_param(stmt, 13, dict_of_df['Pulang_dan_Sehat'][key])
ibm_db.bind_param(stmt, 14, dict_of_df['POSITIF'][key])
ibm_db.bind_param(stmt, 15, dict_of_df['Dirawat'][key])
ibm_db.bind_param(stmt, 16, dict_of_df['Sembuh'][key])
ibm_db.bind_param(stmt, 17, dict_of_df['Meninggal'][key])
ibm_db.bind_param(stmt, 18, dict_of_df['Self_Isolation'][key])
ibm_db.bind_param(stmt, 19, dict_of_df['Latitude'][key])
ibm_db.bind_param(stmt, 20, dict_of_df['Longitude'][key])
ibm_db.execute(stmt)
print('success')
success
nIn = 267
nf = 267
  • Sintax dari Watson Studio on Cloud
body = client_bc74604e908a4c55969cf7bc90b3ec9c.get_object(Bucket='ibmsolutionssummit2020-donotdelete-pr-6zhytmdqrk5avi',Key='StandarKelurahanDataCorona_29MEI2020Pukul09.00.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_cases = pd.read_csv(body)
df_cases.head()
  • Sintax dari Watson Studio Desktop
# Read in the data Covid-19 cases per district (2020)
df_cases = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_cases
FID_cases = []for index, row in df_cases.iterrows():
nf = nf + 1
cases = nf
FID_cases.append(cases)
print(FID_cases)
[268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534]
df_cases.insert(0,"FID",FID_cases)
df_cases
# Get latitude and longitude of all districts
list_lat = [] # create empty lists for latitude
list_long = [] # create empty lists for longitude
for index, row in df_cases.iterrows(): # iterate over rows in dataframe
District = row['nama_kelurahan']
query = str(District)+', Jakarta'
results = geocoder.geocode(query)
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
list_lat.append(lat)
list_long.append(long)
# create new columns from lists
df_cases ['Latitude'] = list_lat
df_cases ['Longitude'] = list_long
df_cases
print(nIn)
print(nf)
267
534
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.COVIDMAY VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_cases.to_dict()
for key in range(len(dict_of_df['FID'])):
ibm_db.bind_param(stmt, 1, dict_of_df['FID'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['Tanggal'][key])
ibm_db.bind_param(stmt, 3, str(dict_of_df['ID_KEL'][key]))
ibm_db.bind_param(stmt, 4, dict_of_df['Nama_provinsi'][key])
ibm_db.bind_param(stmt, 5, dict_of_df['nama_kota'][key])
ibm_db.bind_param(stmt, 6, dict_of_df['nama_kecamatan'][key])
ibm_db.bind_param(stmt, 7, dict_of_df['nama_kelurahan'][key])
ibm_db.bind_param(stmt, 8, dict_of_df['ODP'][key])
ibm_db.bind_param(stmt, 9, dict_of_df['Proses_Pemantauan'][key])
ibm_db.bind_param(stmt, 10, dict_of_df['Selesai_Pemantauan'][key])
ibm_db.bind_param(stmt, 11, dict_of_df['PDP'][key])
ibm_db.bind_param(stmt, 12, dict_of_df['Masih_Dirawat'][key])
ibm_db.bind_param(stmt, 13, dict_of_df['Pulang_dan_Sehat'][key])
ibm_db.bind_param(stmt, 14, dict_of_df['POSITIF'][key])
ibm_db.bind_param(stmt, 15, dict_of_df['Dirawat'][key])
ibm_db.bind_param(stmt, 16, dict_of_df['Sembuh'][key])
ibm_db.bind_param(stmt, 17, dict_of_df['Meninggal'][key])
ibm_db.bind_param(stmt, 18, dict_of_df['Self_Isolation'][key])
ibm_db.bind_param(stmt, 19, dict_of_df['Latitude'][key])
ibm_db.bind_param(stmt, 20, dict_of_df['Longitude'][key])
ibm_db.execute(stmt)
print('success')
success
  • Sintax dari Watson Studio on Cloud
body = client_bc74604e908a4c55969cf7bc90b3ec9c.get_object(Bucket='ibmsolutionssummit2020-donotdelete-pr-6zhytmdqrk5avi',Key='Hospital_for_treatment_covid19.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_hospital = pd.read_csv(body)
df_hospital
  • Sintax dari Watson Studio Desktop
# Read in the data Hospital for treatment covid-19
df_hospital = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_hospital
list2_lat = []   # create empty lists for latitude
list2_long = [] # create empty lists for longitude
for index, row in df_hospital.iterrows(): # iterate over rows in dataframe
hosp = row['Hospital']
distr = row['District']
query = str(hosp) + ', ' + str(distr) + ', Jakarta'
results = geocoder.geocode(query)
lat = results[0]['geometry']['lat']
long = results[0]['geometry']['lng']
list2_lat.append(lat)
list2_long.append(long)
# create new columns from lists
df_hospital['Latitude'] = list2_lat
df_hospital['Longitude'] = list2_long
df_hospital
FID_value = []
n = 0
for index, row in df_hospital.iterrows():
#FID = row['FID']
n = n + 1
fid = n
FID_value.append(fid)
print(FID_value)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
df_hospital.insert(0,"FID",FID_value)
df_hospital
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.HOSPITALTB VALUES(?,?,?,?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_hospital.to_dict()
for key in range(len(dict_of_df['FID'])):
ibm_db.bind_param(stmt, 1, dict_of_df['FID'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['Hospital'][key])
ibm_db.bind_param(stmt, 3, dict_of_df['City'][key])
ibm_db.bind_param(stmt, 4, dict_of_df['District'][key])
ibm_db.bind_param(stmt, 5, dict_of_df['Latitude'][key])
ibm_db.bind_param(stmt, 6, dict_of_df['Longitude'][key])
ibm_db.execute(stmt)
print('success')
success
  • Sintax dari Watson Studio on Cloud
body = client_bc74604e908a4c55969cf7bc90b3ec9c.get_object(Bucket='ibmsolutionssummit2020-donotdelete-pr-6zhytmdqrk5avi',Key='population2020_DKI_Jakarta.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_population = pd.read_csv(body)
df_population
  • Sintax dari Watson Studio Desktop
df_population = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_population
Index_value = []
In = 0
for index, row in df_population.iterrows():
#FID = row['FID']
In = In + 1
Index = In
Index_value.append(Index)
print(Index_value)
[1, 2, 3, 4, 5, 6]
df_population.insert(0,"Index",Index_value)
df_population
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.POPULATIONTB VALUES(?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_population.to_dict()
for key in range(len(dict_of_df['Index'])):
ibm_db.bind_param(stmt, 1, dict_of_df['Index'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['City'][key])
ibm_db.bind_param(stmt, 3, dict_of_df['Total_population_2020_people_per_km2'][key])
ibm_db.execute(stmt)
print('success')
success
  • Sintax dari Watson Studio on Cloud
body = client_bc74604e908a4c55969cf7bc90b3ec9c.get_object(Bucket='ibmsolutionssummit2020-donotdelete-pr-6zhytmdqrk5avi',Key='10_kelurahan terpadat_DKI_Jakarta.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_most_population = pd.read_csv(body)
df_most_population.head()
  • Sintax dari Watson Studio Desktop
df_most_population = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_most_population
Index_value = []
In = 0
for index, row in df_most_population.iterrows():
#FID = row['FID']
In = In + 1
Index = In
Index_value.append(Index)
print(Index_value)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
df_most_population.insert(0,"Index",Index_value)
df_most_population
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.MOSTPOPULATIONTB VALUES(?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_most_population.to_dict()
for key in range(len(dict_of_df['Index'])):
ibm_db.bind_param(stmt, 1, dict_of_df['Index'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['district'][key])
ibm_db.bind_param(stmt, 3, dict_of_df['Total_population_2020_people_per_km2'][key])
ibm_db.execute(stmt)
print('success')
success
  • Sintax dari Watson Studio on Cloud
body = client_bc74604e908a4c55969cf7bc90b3ec9c.get_object(Bucket='ibmsolutionssummit2020-donotdelete-pr-6zhytmdqrk5avi',Key='venues.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_venue = pd.read_csv(body)
df_venue.head()
  • Sintax dari Watson Studio Desktop
df_venue = pd.read_csv("<path dari data>")
# View the top rows of the dataset
df_venue
clientinfo = ibm_db.client_info(conn)
if clientinfo:
print("DRIVER_NAME: string(%d) \"%s\"" % (len(clientinfo.DRIVER_NAME), clientinfo.DRIVER_NAME))
print("DRIVER_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_VER), clientinfo.DRIVER_VER))
print("DATA_SOURCE_NAME: string(%d) \"%s\"" % (len(clientinfo.DATA_SOURCE_NAME), clientinfo.DATA_SOURCE_NAME))
print("DRIVER_ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.DRIVER_ODBC_VER), clientinfo.DRIVER_ODBC_VER))
print("ODBC_VER: string(%d) \"%s\"" % (len(clientinfo.ODBC_VER), clientinfo.ODBC_VER))
print("ODBC_SQL_CONFORMANCE: string(%d) \"%s\"" % (len(clientinfo.ODBC_SQL_CONFORMANCE), clientinfo.ODBC_SQL_CONFORMANCE))
print("APPL_CODEPAGE: int(%s)" % clientinfo.APPL_CODEPAGE)
print("CONN_CODEPAGE: int(%s)" % clientinfo.CONN_CODEPAGE)
DRIVER_NAME: string(8) "libdb2.a"
DRIVER_VER: string(10) "11.05.0400"
DATA_SOURCE_NAME: string(5) "BLUDB"
DRIVER_ODBC_VER: string(5) "03.51"
ODBC_VER: string(10) "03.01.0000"
ODBC_SQL_CONFORMANCE: string(8) "EXTENDED"
APPL_CODEPAGE: int(1208)
CONN_CODEPAGE: int(1208)
sql_stmt = "INSERT INTO <skema>.VANUETB VALUES(?,?,?,?,?);"
stmt = ibm_db.prepare(conn, sql_stmt)
dict_of_df = df_venue.to_dict()
for key in range(len(dict_of_df['FID'])):
ibm_db.bind_param(stmt, 1, dict_of_df['FID'][key])
ibm_db.bind_param(stmt, 2, dict_of_df['name'][key])
ibm_db.bind_param(stmt, 3, dict_of_df['categories'][key])
ibm_db.bind_param(stmt, 4, dict_of_df['Latitude'][key])
ibm_db.bind_param(stmt, 5, dict_of_df['Longitude'][key])
ibm_db.execute(stmt)
print('success')
success
  • Tampilan IBM DB2 pada IBM DB2 Desktop
  • Tampilan IBM DB2 pada IBM DB2 on Cloud
Peta persebaran kasus positif Covid19 bulan 28 Mei 2020, lokasi rumah sakit dan Venue yang saling berdekatan.
  • Setting ArcGIS yang menggunakan IBM DB2 Desktop
  • Setting ArcGIS yang menggunakan IBM DB2 on Cloud

--

--

--

Love podcasts or audiobooks? Learn on the go with our new app.

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store
cahyati sangaji (cahya)

cahyati sangaji (cahya)

More from Medium

Spring Week at ISU returns to something like a normal tradition

PRESS RELEASE: Onsiter launches game-changing vendor management system for enterprise staffing —…

Two male professionals discussing an assignment

ACTION vs. Motion

解封 | Shanghai Lockdown To Be Lifted Soon?