Rename code folder and add filters to parser
This commit is contained in:
parent
7c4ba27dc5
commit
c2d6c2363b
|
@ -1,14 +1,16 @@
|
|||
from subprocess import call
|
||||
from subprocess import run
|
||||
from sys import argv
|
||||
from flask_sqlalchemy import SQLAlchemy
|
||||
from flask import Flask
|
||||
from fields import Glacier, Annual_Data, Annual_Change, User
|
||||
|
||||
global db
|
||||
|
||||
|
||||
def create_database(db_name, db_user, db_pw):
|
||||
script = "mariadb_setup.sh"
|
||||
output = call([script + db_name + db_user + db_pw])
|
||||
if output != 0:
|
||||
output = run([script + db_name + db_user + db_pw])
|
||||
if output.returncode != 0:
|
||||
print("Error: couldn't create database")
|
||||
exit()
|
||||
|
||||
|
@ -31,9 +33,7 @@ def create_tables(db):
|
|||
|
||||
def main():
|
||||
if len(argv) != 3:
|
||||
print(
|
||||
"Usage: " + argv[0] + " <database name> <database user> <database password>"
|
||||
)
|
||||
print("Usage: " + argv[0] + " <db name> <db user> <db password>")
|
||||
exit()
|
||||
|
||||
db_name = str(argv[1])
|
||||
|
|
|
@ -1,16 +1,30 @@
|
|||
from pandas import read_csv, concat, DataFrame
|
||||
from iso3166 import countries
|
||||
from iso3166 import countries as co
|
||||
|
||||
|
||||
def select_columns() -> [DataFrame]:
|
||||
def country_conversion(political_unit) -> str:
|
||||
codes = co.get(political_unit)
|
||||
return codes.name
|
||||
|
||||
|
||||
def select_columns() -> DataFrame:
|
||||
min_year = 2010
|
||||
fields = ["POLITICAL_UNIT", "WGMS_ID", "YEAR"]
|
||||
fields = [
|
||||
"POLITICAL_UNIT",
|
||||
"WGMS_ID",
|
||||
"YEAR",
|
||||
"AREA_SURVEY_YEAR",
|
||||
"AREA_CHANGE",
|
||||
"THICKNESS CHANGE",
|
||||
"VOLUME_CHANGE",
|
||||
]
|
||||
iter_csv = read_csv(
|
||||
"../../Assets/WGMS-FoG-2019-12-D-CHANGE.csv",
|
||||
"../../data/WGMS-FoG-2019-12-D-CHANGE.csv",
|
||||
skipinitialspace=True,
|
||||
usecols=fields,
|
||||
iterator=True,
|
||||
chunksize=100,
|
||||
converters={"YEAR": country_conversion},
|
||||
)
|
||||
data = concat([chunk[chunk["YEAR"] > min_year] for chunk in iter_csv])
|
||||
return data
|
||||
|
|
|
@ -51,9 +51,9 @@ CLOSED: [2020-01-03 Fri 00:44]
|
|||
- [X] Connection
|
||||
- [X] Creation from script
|
||||
- [X] Creation of tables via class
|
||||
**** TODO Parser [0/4] [0%]
|
||||
- [ ] Select useful fiels
|
||||
- [ ] Convert PU to Country (ISO 3166)
|
||||
**** TODO Parser [2/4] [50%]
|
||||
- [X] Select useful fiels
|
||||
- [X] Convert PU to Country (ISO 3166)
|
||||
- [ ] Arithmetic operations for yearly changes
|
||||
- [ ] Insert into database
|
||||
**** NEXT Flask framework
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
from subprocess import call
|
||||
from sys import argv
|
||||
from flask_sqlalchemy import SQLAlchemy
|
||||
from flask import Flask
|
||||
from fields import Glacier, Annual_Data, Annual_Change, User
|
||||
|
||||
global db
|
||||
|
||||
|
||||
def create_database(db_name, db_user, db_pw):
|
||||
script = "mariadb_setup.sh"
|
||||
output = call([script + db_name + db_user + db_pw])
|
||||
if output != 0:
|
||||
print("Error: couldn't create database")
|
||||
exit()
|
||||
|
||||
|
||||
def create_connection(db_name, db_user, db_pw):
|
||||
host = "localhost:3306"
|
||||
connection_uri = "mysql+pymysql://{user}:{pw}@{url}/{db}".format(
|
||||
user=db_user, pw=db_pw, url=host, db=db_name
|
||||
)
|
||||
app = Flask(__name__)
|
||||
app.config["SQLALCHEMY_DATABASE_URI"] = connection_uri
|
||||
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
||||
global db
|
||||
db = SQLAlchemy(app)
|
||||
|
||||
|
||||
def create_tables(db):
|
||||
db.create_all()
|
||||
|
||||
|
||||
def main():
|
||||
if len(argv) != 3:
|
||||
print("Usage: " + argv[0] + " <db name> <db user> <db password>")
|
||||
exit()
|
||||
|
||||
db_name = str(argv[1])
|
||||
db_user = str(argv[2])
|
||||
db_password = str(argv[3])
|
||||
|
||||
create_database(db_name, db_user, db_password)
|
||||
create_connection(db_name, db_user, db_password)
|
||||
create_tables(db)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,16 @@
|
|||
from pandas import read_csv, concat, DataFrame
|
||||
from iso3166 import countries
|
||||
|
||||
|
||||
def select_columns() -> [DataFrame]:
|
||||
min_year = 2010
|
||||
fields = ["POLITICAL_UNIT", "WGMS_ID", "YEAR"]
|
||||
iter_csv = read_csv(
|
||||
"../../Assets/WGMS-FoG-2019-12-D-CHANGE.csv",
|
||||
skipinitialspace=True,
|
||||
usecols=fields,
|
||||
iterator=True,
|
||||
chunksize=100,
|
||||
)
|
||||
data = concat([chunk[chunk["YEAR"] > min_year] for chunk in iter_csv])
|
||||
return data
|
|
@ -19,7 +19,7 @@ datos relevantes para estudios acerca del cambio climático, y acotando éstos a
|
|||
|
||||
1. *RD1*: Datos del glaciar
|
||||
- País - /Cadena de 30 caracteres máximo/
|
||||
- Nombre del glaciar - /Cadena de 30 caracteres máximo/
|
||||
- Nombre del glaciar - /Cadena de 60 caracteres máximo/
|
||||
- ID del glaciar (Compatible con la WGMS) - /Entero de 5 dígitos/
|
||||
|
||||
2. *RD2*: Datos anuales de un glaciar
|
||||
|
|
Loading…
Reference in New Issue