@@ -5,15 +5,15 @@ layer {
55 top: "source_data"
66 top: "source_label"
77 image_data_param {
8- source: "./data/office/webcam_list .txt"
8+ source: "./data/office/amazon_list .txt"
99 batch_size: 64
1010 new_height: 256
1111 new_width: 256
1212 shuffle: true
1313 }
1414 transform_param {
1515 crop_size: 227
16- mirror: true
16+ mirror: true
1717 mean_file: "./data/ilsvrc12/imagenet_mean.binaryproto"
1818 }
1919 include: { phase: TRAIN }
@@ -24,7 +24,7 @@ layer {
2424 top: "target_data"
2525 top: "target_label"
2626 image_data_param {
27- source: "./data/office/amazon_list .txt"
27+ source: "./data/office/webcam_list .txt"
2828 batch_size: 64
2929 new_height: 256
3030 new_width: 256
@@ -49,7 +49,7 @@ layer {
4949 top: "data"
5050 top: "label"
5151 image_data_param {
52- source: "./data/office/amazon_list .txt"
52+ source: "./data/office/webcam_list .txt"
5353 batch_size: 1
5454 new_height: 256
5555 new_width: 256
@@ -381,7 +381,7 @@ layer {
381381 type: "InnerProduct"
382382 bottom: "fc7"
383383 top: "fc8"
384- # blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained
384+ # blobs_lr is set higher than at other layers, as this layer is trained from random weights while the others are fine-tuned
385385 param {
386386 lr_mult: 1
387387 decay_mult: 1
@@ -421,12 +421,6 @@ layer {
421421 }
422422 include: { phase: TRAIN }
423423}
424- layer {
425- name: "silence"
426- type: "Silence"
427- bottom: "fc8_target"
428- include: { phase: TRAIN }
429- }
430424layer {
431425 name: "softmax_loss"
432426 type: "SoftmaxWithLoss"
@@ -435,24 +429,19 @@ layer {
435429 top: "softmax_loss"
436430 include: { phase: TRAIN }
437431}
432+ layer {
433+ name: "silence_fc8_target"
434+ type: "Silence"
435+ bottom: "fc8_target"
436+ include: { phase: TRAIN }
437+ }
438438layer {
439439 name: "fc8_softmax"
440440 type: "Softmax"
441441 bottom: "fc8"
442442 top: "fc8_softmax"
443443 include: { phase: TRAIN }
444444}
445- layer {
446- name: "slice_fc7"
447- type: "Slice"
448- bottom: "fc7"
449- top: "fc7_source"
450- top: "fc7_target"
451- slice_param {
452- slice_dim: 0
453- }
454- include: { phase: TRAIN }
455- }
456445layer {
457446 name: "slice_softmax"
458447 type: "Slice"
@@ -467,18 +456,46 @@ layer {
467456layer {
468457 name: "jmmd_loss"
469458 type: "JMMDLoss"
470- bottom: "fc7_source"
471- bottom: "fc7_target"
472459 bottom: "source_softmax"
473460 bottom: "target_softmax"
474- loss_weight: 0.3 #best 0.3 for our tasks, can be tuned within [0.1, 1.0]
461+ bottom: "source_softmax"
462+ bottom: "target_softmax"
463+ loss_weight: 0.3 # best 0.3 for our tasks, can be tuned within [0.1, 1.0]
475464 top: "jmmd_loss"
476465 include: { phase: TRAIN }
477466}
478467layer {
479- name: "silence_loss_value "
468+ name: "silence_jmmd_loss "
480469 type: "Silence"
481470 bottom: "jmmd_loss"
482471 include: { phase: TRAIN }
483- # we don't calculate jmmd loss value in our code
484472}
473+ #The following three layers could be added for better performance
474+ #layer {
475+ # name: "slice_fc7"
476+ # type: "Slice"
477+ # bottom: "fc7"
478+ # top: "fc7_source"
479+ # top: "fc7_target"
480+ # slice_param {
481+ # slice_dim: 0
482+ # }
483+ # include: { phase: TRAIN }
484+ #}
485+ #layer {
486+ # name: "jmmd_loss_fc7"
487+ # type: "JMMDLoss"
488+ # bottom: "fc7_source"
489+ # bottom: "fc7_target"
490+ # bottom: "source_softmax"
491+ # bottom: "target_softmax"
492+ # loss_weight: 0.3 # best 0.3 for our tasks, can be tuned within [0.1, 1.0]# top: "jmmd_loss_fc7"
493+ # top: "jmmd_loss_fc7"
494+ # include: { phase: TRAIN }
495+ #}
496+ #layer {
497+ # name: "silence_jmmd_loss_fc7"
498+ # type: "Silence"
499+ # bottom: "jmmd_loss_fc7"
500+ # include: { phase: TRAIN }
501+ #}
0 commit comments