Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
163 changes: 85 additions & 78 deletions src/python/T0/RunConfig/RunConfigAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,7 @@ def configureRunStream(tier0Config, run, stream, specDirectory, dqmUploadProxy):
#
# for PhEDEx subscription settings
#
subscriptions = { 'Express' : [],
'Bulk' : [] }
subscriptions = []

# some hardcoded PhEDEx defaults
expressPhEDExInjectNode = "T2_CH_CERN"
Expand Down Expand Up @@ -278,16 +277,15 @@ def configureRunStream(tier0Config, run, stream, specDirectory, dqmUploadProxy):

bindsPhEDExConfig.append( { 'RUN' : run,
'PRIMDS' : specialDataset,
'NODE' : expressPhEDExSubscribeNode,
'CUSTODIAL' : 1,
'REQ_ONLY' : "n",
'PRIO' : "high" } )
'ARCHIVAL_NODE' : None,
'TAPE_NODE' : None,
'DISK_NODE' : expressPhEDExSubscribeNode } )

subscriptions['Express'].append( { 'custodialSites' : [],
'nonCustodialSites' : [expressPhEDExSubscribeNode],
'autoApproveSites' : [expressPhEDExSubscribeNode],
'priority' : "high",
'primaryDataset' : specialDataset } )
subscriptions.append( { 'custodialSites' : [],
'nonCustodialSites' : [ expressPhEDExSubscribeNode ],
'autoApproveSites' : [ expressPhEDExSubscribeNode ],
'priority' : "high",
'primaryDataset' : specialDataset } )

alcaSkim = None
if "ALCARECO" in streamConfig.Express.DataTiers:
Expand Down Expand Up @@ -374,49 +372,32 @@ def configureRunStream(tier0Config, run, stream, specDirectory, dqmUploadProxy):
'selectEvents' : selectEvents,
'primaryDataset' : dataset } )

bindsPhEDExConfig.append( { 'RUN' : run,
'PRIMDS' : dataset,
'ARCHIVAL_NODE' : datasetConfig.ArchivalNode,
'TAPE_NODE' : datasetConfig.TapeNode,
'DISK_NODE' : datasetConfig.DiskNode } )

custodialSites = []
nonCustodialSites = []
autoApproveSites = []

if datasetConfig.CustodialNode != None:

custodialSites.append(datasetConfig.CustodialNode)

requestOnly = "y"
if datasetConfig.CustodialAutoApprove:
requestOnly = "n"
autoApproveSites.append(datasetConfig.CustodialNode)

bindsStorageNode.append( { 'NODE' : datasetConfig.CustodialNode } )

bindsPhEDExConfig.append( { 'RUN' : run,
'PRIMDS' : dataset,
'NODE' : datasetConfig.CustodialNode,
'CUSTODIAL' : 1,
'REQ_ONLY' : requestOnly,
'PRIO' : datasetConfig.CustodialPriority } )

if datasetConfig.ArchivalNode != None:

bindsStorageNode.append( { 'NODE' : datasetConfig.ArchivalNode } )
custodialSites.append(datasetConfig.ArchivalNode)
autoApproveSites.append(datasetConfig.ArchivalNode)

bindsStorageNode.append( { 'NODE' : datasetConfig.ArchivalNode } )

bindsPhEDExConfig.append( { 'RUN' : run,
'PRIMDS' : dataset,
'NODE' : datasetConfig.ArchivalNode,
'CUSTODIAL' : 1,
'REQ_ONLY' : "n",
'PRIO' : datasetConfig.CustodialPriority } )

if len(custodialSites) + len(nonCustodialSites) > 0:
subscriptions['Bulk'].append( { 'custodialSites' : custodialSites,
'custodialSubType' : "Replica",
'nonCustodialSites' : nonCustodialSites,
'autoApproveSites' : autoApproveSites,
'priority' : datasetConfig.CustodialPriority,
'primaryDataset' : dataset } )
if datasetConfig.TapeNode != None:
bindsStorageNode.append( { 'NODE' : datasetConfig.TapeNode } )
custodialSites.append(datasetConfig.TapeNode)
if datasetConfig.DiskNode != None:
bindsStorageNode.append( { 'NODE' : datasetConfig.DiskNode } )

if len(custodialSites) > 0:
subscriptions.append( { 'custodialSites' : custodialSites,
'custodialSubType' : "Replica",
'nonCustodialSites' : [],
'autoApproveSites' : autoApproveSites,
'priority' : "high",
'primaryDataset' : dataset,
'dataTier' : "RAW" } )

elif streamConfig.ProcessingStyle == "Express":

Expand All @@ -430,16 +411,15 @@ def configureRunStream(tier0Config, run, stream, specDirectory, dqmUploadProxy):

bindsPhEDExConfig.append( { 'RUN' : run,
'PRIMDS' : dataset,
'NODE' : expressPhEDExSubscribeNode,
'CUSTODIAL' : 1,
'REQ_ONLY' : "n",
'PRIO' : "high" } )

subscriptions['Express'].append( { 'custodialSites' : [],
'nonCustodialSites' : [expressPhEDExSubscribeNode],
'autoApproveSites' : [expressPhEDExSubscribeNode],
'priority' : "high",
'primaryDataset' : dataset } )
'ARCHIVAL_NODE' : None,
'TAPE_NODE' : None,
'DISK_NODE' : expressPhEDExSubscribeNode } )

subscriptions.append( { 'custodialSites' : [],
'nonCustodialSites' : [ expressPhEDExSubscribeNode ],
'autoApproveSites' : [ expressPhEDExSubscribeNode ],
'priority' : "high",
'primaryDataset' : dataset } )

#
# finally create WMSpec
Expand Down Expand Up @@ -539,13 +519,13 @@ def configureRunStream(tier0Config, run, stream, specDirectory, dqmUploadProxy):
factory = RepackWorkloadFactory()
wmSpec = factory.factoryWorkloadConstruction(workflowName, specArguments)
wmSpec.setPhEDExInjectionOverride(runInfo['bulk_data_loc'])
for subscription in subscriptions['Bulk']:
for subscription in subscriptions:
wmSpec.setSubscriptionInformation(**subscription)
elif streamConfig.ProcessingStyle == "Express":
factory = ExpressWorkloadFactory()
wmSpec = factory.factoryWorkloadConstruction(workflowName, specArguments)
wmSpec.setPhEDExInjectionOverride(expressPhEDExInjectNode)
for subscription in subscriptions['Express']:
for subscription in subscriptions:
wmSpec.setSubscriptionInformation(**subscription)

if streamConfig.ProcessingStyle in [ 'Bulk', 'Express' ]:
Expand Down Expand Up @@ -720,28 +700,55 @@ def releasePromptReco(tier0Config, specDirectory, dqmUploadProxy):
'SCRAM_ARCH' : datasetConfig.ScramArch,
'GLOBAL_TAG' : datasetConfig.GlobalTag } )

phedexConfig = phedexConfigs.get(dataset, {})

custodialSites = []
nonCustodialSites = []
autoApproveSites = []
phedexConfig = phedexConfigs[dataset]

for node, config in phedexConfig.items():
if datasetConfig.WriteAOD:

if config['custodial'] == 1:
custodialSites.append(node)
else:
nonCustodialSites.append(node)
custodialSites = []
nonCustodialSites = []
autoApproveSites = []

if config['request_only'] == "n":
autoApproveSites.append(node)
if phedexConfig['tape_node'] != None:
custodialSites.append(phedexConfig['tape_node'])
if phedexConfig['disk_node'] != None:
nonCustodialSites.append(phedexConfig['disk_node'])
autoApproveSites.append(phedexConfig['disk_node'])

if len(custodialSites) + len(nonCustodialSites) > 0:
subscriptions.append( { 'custodialSites' : custodialSites,
'nonCustodialSites' : nonCustodialSites,
'autoApproveSites' : autoApproveSites,
'priority' : config['priority'],
'primaryDataset' : dataset } )
'custodialSubType' : "Replica",
'nonCustodialSites' : nonCustodialSites,
'autoApproveSites' : autoApproveSites,
'priority' : "high",
'primaryDataset' : dataset,
'dataTier' : "AOD" } )

if len(datasetConfig.AlcaSkims) > 0:
if phedexConfig['tape_node'] != None:
subscriptions.append( { 'custodialSites' : [phedexConfig['tape_node']],
'custodialSubType' : "Replica",
'nonCustodialSites' : [],
'autoApproveSites' : [],
'priority' : "high",
'primaryDataset' : dataset,
'dataTier' : "ALCARECO" } )
if datasetConfig.WriteDQM:
if phedexConfig['tape_node'] != None:
subscriptions.append( { 'custodialSites' : [phedexConfig['tape_node']],
'custodialSubType' : "Replica",
'nonCustodialSites' : [],
'autoApproveSites' : [],
'priority' : "high",
'primaryDataset' : dataset,
'dataTier' : "DQM" } )

if datasetConfig.WriteRECO:
if phedexConfig['disk_node'] != None:
subscriptions.append( { 'custodialSites' : [],
'nonCustodialSites' : [phedexConfig['disk_node']],
'autoApproveSites' : [phedexConfig['disk_node']],
'priority' : "high",
'primaryDataset' : dataset,
'dataTier' : "RECO" } )

writeTiers = []
if datasetConfig.WriteRECO:
Expand Down
21 changes: 10 additions & 11 deletions src/python/T0/RunConfig/Tier0Config.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,14 +160,11 @@
|--> RecoDelayOffset - Time before PromptReco is released for which
| settings are locked in and reco looks released
|
|--> CustodialNode - The custodial PhEDEx storage node for this dataset
|
|--> ArchivalNode - The archival PhEDEx node (should always be CERN T0)
|
|--> CustodialPriority - The priority of the custodial subscription
|--> TapeNode - The tape PhEDEx node (should be T1 _MSS)
|
|--> CustodialAutoApprove - Determine whether or not the custodial
| subscription will be auto approved.
|--> DiskNode - The disk PhEDEx node (should be T1 _Disk)
|
|--> ProcessingVersion - Used for all output from PromptReco
|
Expand Down Expand Up @@ -371,14 +368,14 @@ def addDataset(config, datasetName, **settings):
msg = "Tier0Config.addDataset : no write_dqm defined for dataset %s or Default" % datasetName
raise RuntimeError, msg

#
# some optional parameters, Default rules are still used
#
if hasattr(datasetConfig, "ArchivalNode"):
datasetConfig.ArchivalNode = settings.get('archival_node', datasetConfig.ArchivalNode)
else:
datasetConfig.ArchivalNode = settings.get('archival_node', None)

#
# optional parameter, Default rule is still used
#
if hasattr(datasetConfig, "BlockCloseDelay"):
datasetConfig.BlockCloseDelay = settings.get("blockCloseDelay", datasetConfig.BlockCloseDelay)
else:
Expand All @@ -390,9 +387,8 @@ def addDataset(config, datasetName, **settings):
datasetConfig.AlcaSkims = settings.get("alca_producers", [])
datasetConfig.DqmSequences = settings.get("dqm_sequences", [])

datasetConfig.CustodialNode = settings.get("custodial_node", None)
datasetConfig.CustodialPriority = settings.get("custodial_priority", "high")
datasetConfig.CustodialAutoApprove = settings.get("custodial_auto_approve", False)
datasetConfig.TapeNode = settings.get("tape_node", None)
datasetConfig.DiskNode = settings.get("disk_node", None)

return

Expand Down Expand Up @@ -571,6 +567,9 @@ def addRepackConfig(config, streamName, **options):
else:
streamConfig.Repack.BlockCloseDelay = options.get("blockCloseDelay", 24 * 3600)

streamConfig.Repack.TapeNodes = options.get("tapeNodes", [])
streamConfig.Repack.DiskNodes = options.get("diskNodes", [])

return

def addExpressConfig(config, streamName, **options):
Expand Down
29 changes: 20 additions & 9 deletions src/python/T0/WMBS/Oracle/Create.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,13 +326,12 @@ def __init__(self, logger = None, dbi = None, params = None):

self.create[len(self.create)] = \
"""CREATE TABLE phedex_config (
run_id int not null,
primds_id int not null,
node_id int not null,
custodial int not null,
request_only char(1) not null,
priority varchar2(10) not null,
primary key (run_id, primds_id, node_id)
run_id int not null,
primds_id int not null,
archival_node_id int,
tape_node_id int,
disk_node_id int,
primary key (run_id, primds_id)
) ORGANIZATION INDEX"""

self.create[len(self.create)] = \
Expand Down Expand Up @@ -762,8 +761,20 @@ def __init__(self, logger = None, dbi = None, params = None):

self.constraints[len(self.constraints)] = \
"""ALTER TABLE phedex_config
ADD CONSTRAINT phe_con_nod_id_fk
FOREIGN KEY (node_id)
ADD CONSTRAINT phe_con_arc_nod_id_fk
FOREIGN KEY (archival_node_id)
REFERENCES storage_node(id)"""

self.constraints[len(self.constraints)] = \
"""ALTER TABLE phedex_config
ADD CONSTRAINT phe_con_tap_nod_id_fk
FOREIGN KEY (tape_node_id)
REFERENCES storage_node(id)"""

self.constraints[len(self.constraints)] = \
"""ALTER TABLE phedex_config
ADD CONSTRAINT phe_con_dis_nod_id_fk
FOREIGN KEY (disk_node_id)
REFERENCES storage_node(id)"""

self.constraints[len(self.constraints)] = \
Expand Down
30 changes: 14 additions & 16 deletions src/python/T0/WMBS/Oracle/RunConfig/GetPhEDExConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,19 +14,19 @@ class GetPhEDExConfig(DBFormatter):
def execute(self, run, conn = None, transaction = False):

sql = """SELECT primary_dataset.name,
storage_node.name,
phedex_config.custodial,
phedex_config.request_only,
phedex_config.priority
FROM run_primds_stream_assoc
INNER JOIN phedex_config ON
phedex_config.run_id = run_primds_stream_assoc.run_id AND
phedex_config.primds_id = run_primds_stream_assoc.primds_id
archival_node.name,
tape_node.name,
disk_node.name
FROM phedex_config
INNER JOIN primary_dataset ON
primary_dataset.id = phedex_config.primds_id
INNER JOIN storage_node ON
storage_node.id = phedex_config.node_id
WHERE run_primds_stream_assoc.run_id = :RUN
LEFT OUTER JOIN storage_node archival_node ON
archival_node.id = phedex_config.archival_node_id
LEFT OUTER JOIN storage_node tape_node ON
tape_node.id = phedex_config.tape_node_id
LEFT OUTER JOIN storage_node disk_node ON
disk_node.id = phedex_config.disk_node_id
WHERE phedex_config.run_id = :RUN
"""

binds = { 'RUN' : run }
Expand All @@ -38,14 +38,12 @@ def execute(self, run, conn = None, transaction = False):
for result in results:

primds = result[0]
node = result[1]

if not resultDict.has_key(primds):
resultDict[primds] = {}

resultDict[primds][node] = {}
resultDict[primds][node]['custodial'] = result[2]
resultDict[primds][node]['request_only'] = result[3]
resultDict[primds][node]['priority'] = result[4]
resultDict[primds]['archival_node'] = result[1]
resultDict[primds]['tape_node'] = result[2]
resultDict[primds]['disk_node'] = result[3]

return resultDict
Loading