Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 06404d2

Browse files
committed
adding sampleids properly
1 parent ebd5bd6 commit 06404d2

File tree

7 files changed

+127
-155
lines changed

7 files changed

+127
-155
lines changed

neotomaUploader/insert_collunit.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ def insert_collunit(cur, yml_dict, csv_template, uploader):
4747
_gpslongitude := %(ew)s)""",
4848
{'handle': collname[:10], # Must be smaller than 10 chars
4949
'collname': collname,
50-
'siteid' : 4,#uploader.get('siteid'), Change
51-
'colltypeid': 3,
52-
'depenvtid': 19,
50+
'siteid' : uploader.get('siteid'),
51+
'colltypeid': 3, # to do: put it as input
52+
'depenvtid': 19, # to do: put it as input
5353
'newdate': inputs['colldate'][0],
5454
'location': inputs['location'][0],
5555
'ns': coords[0],

neotomaUploader/insert_data.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ def insert_data(cur, yml_dict, csv_template, uploader):
1111
inputs = pull_params(params, yml_dict, csv_template, 'ndb.data')
1212

1313
data_points = []
14-
counter = 0
1514
for i, val_dict in enumerate(inputs):
1615
val_dict['value'] = [None if item == 'NA' else item for item in val_dict['value']]
1716
for j, val in enumerate(val_dict['unitcolumn']):
@@ -20,12 +19,11 @@ def insert_data(cur, yml_dict, csv_template, uploader):
2019
cur.execute(get_varid, {'units': val_dict['unitcolumn'][j]})
2120
varid = cur.fetchone()[0]
2221

23-
cur.execute(data_query, {'sampleid': int(uploader['samples'][counter]),
22+
cur.execute(data_query, {'sampleid': int(uploader['samples'][i]),
2423
'variableid': int(varid),
2524
'value': val_dict['value'][i]})
2625

2726
result = cur.fetchone()[0]
28-
counter +=1
2927
data_points.append(result)
3028

3129
return data_points

neotomaUploader/insert_dataset_database.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ def insert_dataset_database(cur, yml_dict, uploader):
77
SELECT ts.insertdatasetdatabase(_datasetid := %(datasetid)s,
88
_databaseid := %(databaseid)s)
99
"""
10-
10+
# Put it in the XLXs
1111
databaseid = yml_dict['databaseid']
1212

1313
cur.execute(db_query, {'datasetid': int(uploader['datasetid']),

neotomaUploader/insert_dataset_repository.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,10 @@
55
def insert_dataset_repository(cur, yml_dict, csv_template, uploader):
66
params = ['contactid']
77
inputs = pull_params(params, yml_dict, csv_template, 'ndb.sampleanalysts')
8-
repo_query = """SELECT ts.insertrepositoryinstitution(_acronym:= %(acronym)s,
8+
repo_query = """SELECT ts.insertdatasetrepository(_acronym:= %(acronym)s,
99
_repository := %(repository)s,
1010
_notes := %(notes)s);"""
11-
11+
#_datasetid integer, _repositoryid integer, _notes character varying DEFAULT NULL::character varying)
12+
1213

1314
return None

neotomaUploader/insert_sample.py

Lines changed: 23 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import logging
22
from .pull_params import pull_params
3+
import numpy as np
34
import datetime
45

56
def insert_sample(cur, yml_dict, csv_template, uploader):
@@ -22,63 +23,34 @@ def insert_sample(cur, yml_dict, csv_template, uploader):
2223
params = ['value']
2324
val_inputs = pull_params(params, yml_dict, csv_template, 'ndb.data')
2425

25-
params2 = ['lab_number', 'sampledate', 'analysisdate', 'labnumber', 'prepmethod', 'notes']
26-
inputs2 = pull_params(params2, yml_dict, csv_template, 'ndb.data')
26+
params2 = ['lab_number', 'sampledate', 'analysisdate', 'labnumber', 'prepmethod', 'notes', 'taxonname', 'samplename']
27+
inputs2 = pull_params(params2, yml_dict, csv_template, 'ndb.samples')
2728
inputs2 = dict(map(lambda item: (item[0], None if all([i is None for i in item[1]]) else item[1]),
2829
inputs2.items()))
2930

3031
# There might be several loops so I might need a for loop here
3132
samples = []
3233

33-
for j, val in enumerate(val_inputs):
34-
for i, value in enumerate(uploader['anunits']):
35-
get_taxonid = """SELECT * FROM ndb.taxa WHERE taxonname %% %(taxonname)s;"""
36-
cur.execute(get_taxonid, {'taxonname': val_inputs[j]['taxonname']})
37-
taxonid = cur.fetchone()
34+
# Assert aunits and samples are same in length
35+
for j, val in enumerate(uploader['anunits']):
36+
get_taxonid = """SELECT * FROM ndb.taxa WHERE taxonname %% %(taxonname)s;"""
37+
cur.execute(get_taxonid, {'taxonname': inputs2['taxonname']})
38+
taxonid = cur.fetchone()
39+
if taxonid != None:
40+
taxonid = int(taxonid[0])
41+
else:
42+
taxonid = None
3843

39-
if taxonid is None:
40-
# Inserts taxonid in taxonname if it didn't exist ???
41-
# How does this behave with Tilia
42-
assigntaxonID = """
43-
SELECT ts.inserttaxon(_code := %(code)s,
44-
_name := %(name)s,
45-
_extinct := %(extinct)s,
46-
_groupid := %(groupid)s,
47-
_author := %(author)s,
48-
_valid := %(valid)s,
49-
_higherid := %(higherid)s,
50-
_pubid := %(pubid)s,
51-
_validatorid := %(validatorid)s,
52-
_validatedate := %(validatedate)s,
53-
_notes := %(notes)s)
54-
"""
55-
#cur.execute(assigntaxonID, {code: code,
56-
# name: name,
57-
# extinct: extinct,
58-
# groupid: groupid,
59-
# author: author,
60-
# valid: valid,
61-
# higherid: higherid,
62-
# pubid: pubid,
63-
# validatorid: validatorid,
64-
# validatedate: validatedate,
65-
# notes: notes})
66-
# taxonid = cur.fetchone()[0]
67-
taxonid = 5
68-
else:
69-
taxonid = taxonid[0]
70-
71-
cur.execute(sample_query, {'analysisunitid': int(uploader['anunits'][i]),
72-
'datasetid': int(uploader['datasetid']),
73-
'samplename': val_inputs[j]['taxonname'],
74-
'sampledate': inputs2['sampledate'], # datetime.datetime.today().date(),
75-
'analysisdate': inputs2['analysisdate'],
76-
#'taxonid': int(val_inputs[i]['taxonid']),
77-
'taxonid': int(taxonid),
78-
'labnumber': inputs2['lab_number'],
79-
'prepmethod': inputs2['prepmethod'],
80-
'notes': inputs2['notes']})
81-
sampleid = cur.fetchone()[0]
82-
samples.append(sampleid)
44+
cur.execute(sample_query, {'analysisunitid': int(uploader['anunits'][j]),
45+
'datasetid': int(uploader['datasetid']),
46+
'samplename': inputs2['samplename'],
47+
'sampledate': inputs2['sampledate'], # datetime.datetime.today().date(),
48+
'analysisdate': inputs2['analysisdate'],
49+
'taxonid': taxonid,
50+
'labnumber': inputs2['lab_number'],
51+
'prepmethod': inputs2['prepmethod'],
52+
'notes': inputs2['notes']})
53+
sampleid = cur.fetchone()[0]
54+
samples.append(sampleid)
8355

8456
return samples

neotomaUploader/valid_taxa.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def valid_taxa(cur, csv_template, yml_dict):
3131
cur.execute(nameQuery, {'taxonname': taxonname})
3232
result = {'name': taxonname, 'match': cur.fetchall() or []}
3333
taxamatch.append(result)
34-
34+
3535
matches = []
3636
for taxon in taxamatch:
3737
if len(taxon['match']) ==0:

template_upload.py

Lines changed: 95 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -38,123 +38,124 @@
3838
# Verify that the CSV columns and the YML keys match
3939
csv_valid = nu.csv_validator(filename = filename,
4040
yml_data = yml_data)
41-
try:
42-
logfile.append('=== Inserting new Site ===')
43-
uploader['siteid'] = nu.insert_site(cur = cur,
44-
yml_dict = yml_dict,
45-
csv_template = csv_template)
46-
logfile.append(f"siteid: {uploader['siteid']}")
47-
48-
# logfile.append('=== Inserting Site Geopol ===')
49-
# uploader['geopolid'] = nu.insert_geopol(cur = cur,
50-
# yml_dict = yml_dict,
51-
# csv_template = csv_template,
52-
# uploader = uploader)
53-
# logfile.append(f"Geopolitical Unit: {uploader['geopolid']}")
54-
55-
logfile.append('=== Inserting Collection Units ===')
56-
uploader['collunitid'] = nu.insert_collunit(cur = cur,
41+
#try:
42+
logfile.append('=== Inserting new Site ===')
43+
uploader['siteid'] = nu.insert_site(cur = cur,
44+
yml_dict = yml_dict,
45+
csv_template = csv_template)
46+
logfile.append(f"siteid: {uploader['siteid']}")
47+
48+
# logfile.append('=== Inserting Site Geopol ===')
49+
# uploader['geopolid'] = nu.insert_geopol(cur = cur,
50+
# yml_dict = yml_dict,
51+
# csv_template = csv_template,
52+
# uploader = uploader)
53+
# logfile.append(f"Geopolitical Unit: {uploader['geopolid']}")
54+
55+
logfile.append('=== Inserting Collection Units ===')
56+
uploader['collunitid'] = nu.insert_collunit(cur = cur,
57+
yml_dict = yml_dict,
58+
csv_template = csv_template,
59+
uploader = uploader)
60+
logfile.append(f"collunitid: {uploader['collunitid']}")
61+
62+
logfile.append('=== Inserting Analysis Units ===')
63+
uploader['anunits'] = nu.insert_analysisunit(cur = cur,
5764
yml_dict = yml_dict,
5865
csv_template = csv_template,
5966
uploader = uploader)
60-
logfile.append(f"collunitid: {uploader['collunitid']}")
67+
logfile.append(f"anunits: {uploader['anunits']}")
6168

62-
logfile.append('=== Inserting Analysis Units ===')
63-
uploader['anunits'] = nu.insert_analysisunit(cur = cur,
64-
yml_dict = yml_dict,
65-
csv_template = csv_template,
66-
uploader = uploader)
67-
logfile.append(f"anunits: {uploader['anunits']}")
69+
logfile.append('=== Inserting Chronology ===')
70+
uploader['chronology'] = nu.insert_chronology(cur = cur,
71+
yml_dict = yml_dict,
72+
csv_template = csv_template,
73+
uploader = uploader)
74+
logfile.append(f"chronology: {uploader['chronology']}")
6875

69-
logfile.append('=== Inserting Chronology ===')
70-
uploader['chronology'] = nu.insert_chronology(cur = cur,
76+
logfile.append('=== Inserting Chroncontrol ===')
77+
uploader['chroncontrol'] = nu.insert_chron_control(cur = cur,
7178
yml_dict = yml_dict,
7279
csv_template = csv_template,
7380
uploader = uploader)
74-
logfile.append(f"chronology: {uploader['chronology']}")
81+
logfile.append(f"chroncontrol: {uploader['chroncontrol']}")
7582

76-
logfile.append('=== Inserting Chroncontrol ===')
77-
uploader['chroncontrol'] = nu.insert_chron_control(cur = cur,
78-
yml_dict = yml_dict,
79-
csv_template = csv_template,
80-
uploader = uploader)
81-
logfile.append(f"chroncontrol: {uploader['chroncontrol']}")
83+
logfile.append('=== Inserting Dataset ===')
84+
uploader['datasetid'] = nu.insert_dataset(cur = cur,
85+
yml_dict = yml_dict,
86+
csv_template = csv_template,
87+
uploader = uploader)
88+
logfile.append(f"datasetid: {uploader['datasetid']}")
8289

83-
logfile.append('=== Inserting Dataset ===')
84-
uploader['datasetid'] = nu.insert_dataset(cur = cur,
90+
logfile.append('=== Inserting Dataset PI ===')
91+
uploader['datasetpi'] = nu.insert_dataset_pi(cur = cur,
8592
yml_dict = yml_dict,
8693
csv_template = csv_template,
8794
uploader = uploader)
88-
logfile.append(f"datasetid: {uploader['datasetid']}")
95+
logfile.append(f"datasetPI: {uploader['datasetpi']}")
8996

90-
logfile.append('=== Inserting Dataset PI ===')
91-
uploader['datasetpi'] = nu.insert_dataset_pi(cur = cur,
97+
logfile.append('=== Inserting Data Processor ===')
98+
uploader['processor'] = nu.insert_data_processor(cur = cur,
9299
yml_dict = yml_dict,
93100
csv_template = csv_template,
94101
uploader = uploader)
95-
logfile.append(f"datasetPI: {uploader['datasetpi']}")
96-
97-
logfile.append('=== Inserting Data Processor ===')
98-
uploader['processor'] = nu.insert_data_processor(cur = cur,
99-
yml_dict = yml_dict,
100-
csv_template = csv_template,
101-
uploader = uploader)
102-
logfile.append(f"dataset Processor: {uploader['processor']}")
103-
104-
# Not sure where to get this information from
105-
# logfile.append('=== Inserting Repository ===')
106-
# uploader['repository'] = nu.insert_dataset_repository(cur = cur,
107-
# yml_dict = yml_dict,
108-
# csv_template = csv_template,
109-
# uploader = uploader)
110-
# logfile.append(f"dataset Processor: {uploader['repository']}")
111-
112-
logfile.append('=== Inserting Dataset Database ===')
113-
uploader['database'] = nu.insert_dataset_database(cur = cur,
114-
yml_dict = yml_dict,
115-
uploader = uploader)
116-
logfile.append(f"Dataset Database: {uploader['database']}")
117-
118-
logfile.append('=== Inserting Samples ===')
119-
uploader['samples'] = nu.insert_sample(cur,
120-
yml_dict = yml_dict,
121-
csv_template = csv_template,
122-
uploader = uploader)
123-
logfile.append(f"Dataset Samples: {uploader['samples']}")
102+
logfile.append(f"dataset Processor: {uploader['processor']}")
103+
104+
# Not sure where to get this information from
105+
# logfile.append('=== Inserting Repository ===')
106+
# uploader['repository'] = nu.insert_dataset_repository(cur = cur,
107+
# yml_dict = yml_dict,
108+
# csv_template = csv_template,
109+
# uploader = uploader)
110+
# logfile.append(f"dataset Processor: {uploader['repository']}")
111+
112+
logfile.append('=== Inserting Dataset Database ===')
113+
uploader['database'] = nu.insert_dataset_database(cur = cur,
114+
yml_dict = yml_dict,
115+
uploader = uploader)
116+
logfile.append(f"Dataset Database: {uploader['database']}")
124117

125-
logfile.append('=== Inserting Sample Analyst ===')
126-
uploader['sampleAnalyst'] = nu.insert_sample_analyst(cur,
127-
yml_dict = yml_dict,
128-
csv_template = csv_template,
129-
uploader = uploader)
130-
logfile.append(f"Sample Analyst: {uploader['sampleAnalyst']}")
118+
logfile.append('=== Inserting Samples ===')
119+
uploader['samples'] = nu.insert_sample(cur,
120+
yml_dict = yml_dict,
121+
csv_template = csv_template,
122+
uploader = uploader)
123+
logfile.append(f"Dataset Samples: {uploader['samples']}")
124+
print(uploader['samples'])
131125

132-
logfile.append('=== Inserting Sample Age ===')
133-
uploader['sampleAge'] = nu.insert_sample_age(cur,
134-
yml_dict = yml_dict,
135-
csv_template = csv_template,
136-
uploader = uploader)
137-
logfile.append(f"Sample Age: {uploader['sampleAge']}")
126+
logfile.append('=== Inserting Sample Analyst ===')
127+
uploader['sampleAnalyst'] = nu.insert_sample_analyst(cur,
128+
yml_dict = yml_dict,
129+
csv_template = csv_template,
130+
uploader = uploader)
131+
logfile.append(f"Sample Analyst: {uploader['sampleAnalyst']}")
138132

139-
logfile.append('=== Inserting Data ===')
140-
uploader['data'] = nu.insert_data(cur,
133+
logfile.append('=== Inserting Sample Age ===')
134+
uploader['sampleAge'] = nu.insert_sample_age(cur,
141135
yml_dict = yml_dict,
142136
csv_template = csv_template,
143137
uploader = uploader)
144-
logfile.append(f"Data: {uploader['data']}")
145-
146-
with open(filename + '.upload.log', 'w', encoding = "utf-8") as writer:
147-
for i in logfile:
148-
writer.write(i)
149-
writer.write('\n')
150-
151-
except Exception as e:
152-
logfile.append(f"File: {filename} could not be uploaded. Review logs.")
153-
logfile.append(f"An exception occurred: {str(e)}")
154-
with open(filename + '.upload.log', 'w', encoding = "utf-8") as writer:
155-
for i in logfile:
156-
writer.write(i)
157-
writer.write('\n')
138+
logfile.append(f"Sample Age: {uploader['sampleAge']}")
139+
140+
logfile.append('=== Inserting Data ===')
141+
uploader['data'] = nu.insert_data(cur,
142+
yml_dict = yml_dict,
143+
csv_template = csv_template,
144+
uploader = uploader)
145+
logfile.append(f"Data: {uploader['data']}")
146+
147+
with open(filename + '.upload.log', 'w', encoding = "utf-8") as writer:
148+
for i in logfile:
149+
writer.write(i)
150+
writer.write('\n')
151+
152+
#except Exception as e:
153+
# logfile.append(f"File: {filename} could not be uploaded. Review logs.")
154+
# logfile.append(f"An exception occurred: {str(e)}")
155+
# with open(filename + '.upload.log', 'w', encoding = "utf-8") as writer:
156+
# for i in logfile:
157+
# writer.write(i)
158+
# writer.write('\n')
158159

159160

160161
# conn.commit()

0 commit comments

Comments
 (0)