igdb/code/database/parser.py

63 lines
1.7 KiB
Python
Raw Normal View History

from iso3166 import countries as co
2020-01-06 06:34:35 +01:00
from pandas import DataFrame, concat, read_csv
from csv import QUOTE_NONNUMERIC
2020-01-06 08:31:21 +01:00
from constants import ADMIN_PW
def country_conversion(political_unit) -> str:
codes = co.get(political_unit)
return codes.name
def select_columns() -> DataFrame:
min_year = 2010
fields = [
"POLITICAL_UNIT",
2020-01-06 08:31:21 +01:00
"NAME",
"WGMS_ID",
"YEAR",
"AREA_SURVEY_YEAR",
"AREA_CHANGE",
2020-01-06 06:34:35 +01:00
"THICKNESS_CHG",
"VOLUME_CHANGE",
]
iter_csv = read_csv(
2020-01-06 06:34:35 +01:00
"../data/WGMS-FoG-2019-12-D-CHANGE.csv",
skipinitialspace=True,
usecols=fields,
iterator=True,
chunksize=100,
2020-01-06 06:34:35 +01:00
converters={"POLITICAL_UNIT": country_conversion},
)
data = concat([chunk[chunk["YEAR"] > min_year] for chunk in iter_csv])
return data
2020-01-06 06:34:35 +01:00
2020-01-06 08:31:21 +01:00
def create_databases(df):
users = {"UID": [7843], "USERNAME": ["admin"], "PASSWORD": [ADMIN_PW]}
files = {
"glacier": "../data/glacier.csv",
"annual_data": "../data/annual_data.csv",
"annual_change": "../data/annual_change.csv",
"users": "../data/users.csv",
}
dataframes = {
"glacier": df[["POLITICAL_UNIT", "NAME", "WGMS_ID"]].drop_duplicates(),
"annual_data": df[["WGMS_ID", "YEAR", "AREA_SURVEY_YEAR"]],
"annual_change": df[
["WGMS_ID", "YEAR", "AREA_CHANGE", "THICKNESS_CHG", "VOLUME_CHANGE"]
],
"users": DataFrame(users),
}
for key, val in dataframes.items():
val.to_csv(files[key], index=False, quoting=QUOTE_NONNUMERIC)
2020-01-06 06:34:35 +01:00
def main():
df = select_columns()
2020-01-06 08:31:21 +01:00
create_databases(df)
2020-01-06 06:34:35 +01:00
if __name__ == "__main__":
main()