@@ -121,7 +121,7 @@ def get_kinesis_streams(filter='.*', pool={}, env=None):
121
121
pool [arn ] = stream
122
122
stream .shards = get_kinesis_shards (stream_details = details , env = env )
123
123
result .append (stream )
124
- except socket . error :
124
+ except Exception :
125
125
pass
126
126
return result
127
127
@@ -153,7 +153,7 @@ def get_sqs_queues(filter='.*', pool={}, env=None):
153
153
if re .match (filter , name ):
154
154
queue = SqsQueue (arn )
155
155
result .append (queue )
156
- except socket . error :
156
+ except Exception :
157
157
pass
158
158
return result
159
159
@@ -237,7 +237,7 @@ def handle(func):
237
237
out = cmd_lambda ('list-functions' , env )
238
238
out = json .loads (out )
239
239
parallelize (handle , out ['Functions' ])
240
- except socket . error :
240
+ except Exception :
241
241
pass
242
242
return result
243
243
@@ -320,7 +320,7 @@ def handle(domain):
320
320
result .append (es )
321
321
pool [arn ] = es
322
322
parallelize (handle , out ['DomainNames' ])
323
- except socket . error :
323
+ except Exception :
324
324
pass
325
325
326
326
return result
@@ -344,7 +344,7 @@ def handle(table):
344
344
result .append (db )
345
345
pool [arn ] = db
346
346
parallelize (handle , out ['TableNames' ])
347
- except socket . error :
347
+ except Exception :
348
348
pass
349
349
return result
350
350
@@ -377,7 +377,7 @@ def handle(bucket):
377
377
out = cmd_s3api ('list-buckets' , env )
378
378
out = json .loads (out )
379
379
parallelize (handle , out ['Buckets' ])
380
- except socket . error :
380
+ except Exception :
381
381
pass
382
382
return result
383
383
@@ -399,7 +399,7 @@ def get_firehose_streams(filter='.*', pool={}, env=None):
399
399
bucket = EventSource .get (dest_s3 , pool = pool )
400
400
s .destinations .append (bucket )
401
401
result .append (s )
402
- except socket . error :
402
+ except Exception :
403
403
pass
404
404
return result
405
405
@@ -413,87 +413,84 @@ def read_kinesis_iterator(shard_iterator, max_results=10, env=None):
413
413
414
414
415
415
def get_kinesis_events (stream_name , shard_id , max_results = 10 , env = None ):
416
- env = aws_stack .get_environment (env )
417
- records = aws_stack .kinesis_get_latest_records (stream_name , shard_id , count = max_results , env = env )
418
- for r in records :
419
- r ['ApproximateArrivalTimestamp' ] = mktime (r ['ApproximateArrivalTimestamp' ])
420
- result = {
421
- 'events' : records
422
- }
416
+ records = []
417
+ try :
418
+ env = aws_stack .get_environment (env )
419
+ records = aws_stack .kinesis_get_latest_records (stream_name , shard_id , count = max_results , env = env )
420
+ for r in records :
421
+ r ['ApproximateArrivalTimestamp' ] = mktime (r ['ApproximateArrivalTimestamp' ])
422
+ except Exception :
423
+ pass
424
+ result = {'events' : records }
423
425
return result
424
426
425
427
426
- def get_graph (name_filter = '.*' , env = None ):
428
+ def get_graph (name_filter = '.*' , env = None , ** kwargs ):
427
429
result = {
428
430
'nodes' : [],
429
431
'edges' : []
430
432
}
431
433
432
434
pool = {}
433
-
434
- if True :
435
- result = {
436
- 'nodes' : [],
437
- 'edges' : []
438
- }
439
- node_ids = {}
440
- # Make sure we load components in the right order:
441
- # (ES,DynamoDB,S3) -> (Kinesis,Lambda)
442
- domains = get_elasticsearch_domains (name_filter , pool = pool , env = env )
443
- dbs = get_dynamo_dbs (name_filter , pool = pool , env = env )
444
- buckets = get_s3_buckets (name_filter , details = True , pool = pool , env = env )
445
- streams = get_kinesis_streams (name_filter , pool = pool , env = env )
446
- firehoses = get_firehose_streams (name_filter , pool = pool , env = env )
447
- lambdas = get_lambda_functions (name_filter , details = True , pool = pool , env = env )
448
- queues = get_sqs_queues (name_filter , pool = pool , env = env )
449
-
450
- for es in domains :
451
- uid = short_uid ()
452
- node_ids [es .id ] = uid
453
- result ['nodes' ].append ({'id' : uid , 'arn' : es .id , 'name' : es .name (), 'type' : 'es' })
454
- for b in buckets :
455
- uid = short_uid ()
456
- node_ids [b .id ] = uid
457
- result ['nodes' ].append ({'id' : uid , 'arn' : b .id , 'name' : b .name (), 'type' : 's3' })
458
- for db in dbs :
459
- uid = short_uid ()
460
- node_ids [db .id ] = uid
461
- result ['nodes' ].append ({'id' : uid , 'arn' : db .id , 'name' : db .name (), 'type' : 'dynamodb' })
462
- for s in streams :
463
- uid = short_uid ()
464
- node_ids [s .id ] = uid
465
- result ['nodes' ].append ({'id' : uid , 'arn' : s .id , 'name' : s .name (), 'type' : 'kinesis' })
466
- for shard in s .shards :
467
- uid1 = short_uid ()
468
- name = re .sub (r'shardId-0*' , '' , shard .id ) or '0'
469
- result ['nodes' ].append ({'id' : uid1 , 'arn' : shard .id , 'name' : name ,
470
- 'type' : 'kinesis_shard' , 'streamName' : s .name (), 'parent' : uid })
471
- for f in firehoses :
472
- uid = short_uid ()
473
- node_ids [f .id ] = uid
474
- result ['nodes' ].append ({'id' : uid , 'arn' : f .id , 'name' : f .name (), 'type' : 'firehose' })
475
- for d in f .destinations :
476
- result ['edges' ].append ({'source' : uid , 'target' : node_ids [d .id ]})
477
- for q in queues :
478
- uid = short_uid ()
479
- node_ids [q .id ] = uid
480
- result ['nodes' ].append ({'id' : uid , 'arn' : q .id , 'name' : q .name (), 'type' : 'sqs' })
481
- for lda in lambdas :
482
- uid = short_uid ()
483
- node_ids [lda .id ] = uid
484
- result ['nodes' ].append ({'id' : uid , 'arn' : lda .id , 'name' : lda .name (), 'type' : 'lambda' })
485
- for s in lda .event_sources :
486
- lookup_id = s .id
487
- if isinstance (s , DynamoDBStream ):
488
- lookup_id = s .table .id
489
- result ['edges' ].append ({'source' : node_ids .get (lookup_id ), 'target' : uid })
490
- for t in lda .targets :
491
- lookup_id = t .id
492
- result ['edges' ].append ({'source' : uid , 'target' : node_ids .get (lookup_id )})
493
- for b in buckets :
494
- for n in b .notifications :
495
- src_uid = node_ids [b .id ]
496
- tgt_uid = node_ids [n .target .id ]
497
- result ['edges' ].append ({'source' : src_uid , 'target' : tgt_uid })
435
+ node_ids = {}
436
+
437
+ # Make sure we load components in the right order:
438
+ # (ES,DynamoDB,S3) -> (Kinesis,Lambda)
439
+ domains = get_elasticsearch_domains (name_filter , pool = pool , env = env )
440
+ dbs = get_dynamo_dbs (name_filter , pool = pool , env = env )
441
+ buckets = get_s3_buckets (name_filter , details = True , pool = pool , env = env )
442
+ streams = get_kinesis_streams (name_filter , pool = pool , env = env )
443
+ firehoses = get_firehose_streams (name_filter , pool = pool , env = env )
444
+ lambdas = get_lambda_functions (name_filter , details = True , pool = pool , env = env )
445
+ queues = get_sqs_queues (name_filter , pool = pool , env = env )
446
+
447
+ for es in domains :
448
+ uid = short_uid ()
449
+ node_ids [es .id ] = uid
450
+ result ['nodes' ].append ({'id' : uid , 'arn' : es .id , 'name' : es .name (), 'type' : 'es' })
451
+ for b in buckets :
452
+ uid = short_uid ()
453
+ node_ids [b .id ] = uid
454
+ result ['nodes' ].append ({'id' : uid , 'arn' : b .id , 'name' : b .name (), 'type' : 's3' })
455
+ for db in dbs :
456
+ uid = short_uid ()
457
+ node_ids [db .id ] = uid
458
+ result ['nodes' ].append ({'id' : uid , 'arn' : db .id , 'name' : db .name (), 'type' : 'dynamodb' })
459
+ for s in streams :
460
+ uid = short_uid ()
461
+ node_ids [s .id ] = uid
462
+ result ['nodes' ].append ({'id' : uid , 'arn' : s .id , 'name' : s .name (), 'type' : 'kinesis' })
463
+ for shard in s .shards :
464
+ uid1 = short_uid ()
465
+ name = re .sub (r'shardId-0*' , '' , shard .id ) or '0'
466
+ result ['nodes' ].append ({'id' : uid1 , 'arn' : shard .id , 'name' : name ,
467
+ 'type' : 'kinesis_shard' , 'streamName' : s .name (), 'parent' : uid })
468
+ for f in firehoses :
469
+ uid = short_uid ()
470
+ node_ids [f .id ] = uid
471
+ result ['nodes' ].append ({'id' : uid , 'arn' : f .id , 'name' : f .name (), 'type' : 'firehose' })
472
+ for d in f .destinations :
473
+ result ['edges' ].append ({'source' : uid , 'target' : node_ids [d .id ]})
474
+ for q in queues :
475
+ uid = short_uid ()
476
+ node_ids [q .id ] = uid
477
+ result ['nodes' ].append ({'id' : uid , 'arn' : q .id , 'name' : q .name (), 'type' : 'sqs' })
478
+ for lda in lambdas :
479
+ uid = short_uid ()
480
+ node_ids [lda .id ] = uid
481
+ result ['nodes' ].append ({'id' : uid , 'arn' : lda .id , 'name' : lda .name (), 'type' : 'lambda' })
482
+ for s in lda .event_sources :
483
+ lookup_id = s .id
484
+ if isinstance (s , DynamoDBStream ):
485
+ lookup_id = s .table .id
486
+ result ['edges' ].append ({'source' : node_ids .get (lookup_id ), 'target' : uid })
487
+ for t in lda .targets :
488
+ lookup_id = t .id
489
+ result ['edges' ].append ({'source' : uid , 'target' : node_ids .get (lookup_id )})
490
+ for b in buckets :
491
+ for n in b .notifications :
492
+ src_uid = node_ids [b .id ]
493
+ tgt_uid = node_ids [n .target .id ]
494
+ result ['edges' ].append ({'source' : src_uid , 'target' : tgt_uid })
498
495
499
496
return result
0 commit comments