@@ -54,44 +54,44 @@ def xception(inputs,
5454
5555 #===========ENTRY FLOW==============
5656 #Block 1
57- net = slim .conv2d (inputs , 32 , [3 ,3 ], stride = 2 , padding = 'valid' , biases_initializer = None , scope = 'block1_conv1' )
57+ net = slim .conv2d (inputs , 32 , [3 ,3 ], stride = 2 , padding = 'valid' , scope = 'block1_conv1' )
5858 net = slim .batch_norm (net , scope = 'block1_bn1' )
5959 net = tf .nn .relu (net , name = 'block1_relu1' )
60- net = slim .conv2d (net , 64 , [3 ,3 ], padding = 'valid' , biases_initializer = None , scope = 'block1_conv2' )
60+ net = slim .conv2d (net , 64 , [3 ,3 ], padding = 'valid' , scope = 'block1_conv2' )
6161 net = slim .batch_norm (net , scope = 'block1_bn2' )
6262 net = tf .nn .relu (net , name = 'block1_relu2' )
63- residual = slim .conv2d (net , 128 , [1 ,1 ], stride = 2 , biases_initializer = None , scope = 'block1_res_conv' )
63+ residual = slim .conv2d (net , 128 , [1 ,1 ], stride = 2 , scope = 'block1_res_conv' )
6464 residual = slim .batch_norm (residual , scope = 'block1_res_bn' )
6565
6666 #Block 2
67- net = slim .separable_conv2d (net , 128 , [3 ,3 ], biases_initializer = None , scope = 'block2_dws_conv1' )
67+ net = slim .separable_conv2d (net , 128 , [3 ,3 ], scope = 'block2_dws_conv1' )
6868 net = slim .batch_norm (net , scope = 'block2_bn1' )
6969 net = tf .nn .relu (net , name = 'block2_relu1' )
70- net = slim .separable_conv2d (net , 128 , [3 ,3 ], biases_initializer = None , scope = 'block2_dws_conv2' )
70+ net = slim .separable_conv2d (net , 128 , [3 ,3 ], scope = 'block2_dws_conv2' )
7171 net = slim .batch_norm (net , scope = 'block2_bn2' )
7272 net = slim .max_pool2d (net , [3 ,3 ], stride = 2 , padding = 'same' , scope = 'block2_max_pool' )
7373 net = tf .add (net , residual , name = 'block2_add' )
74- residual = slim .conv2d (net , 256 , [1 ,1 ], stride = 2 , biases_initializer = None , scope = 'block2_res_conv' )
74+ residual = slim .conv2d (net , 256 , [1 ,1 ], stride = 2 , scope = 'block2_res_conv' )
7575 residual = slim .batch_norm (residual , scope = 'block2_res_bn' )
7676
7777 #Block 3
7878 net = tf .nn .relu (net , name = 'block3_relu1' )
79- net = slim .separable_conv2d (net , 256 , [3 ,3 ], biases_initializer = None , scope = 'block3_dws_conv1' )
79+ net = slim .separable_conv2d (net , 256 , [3 ,3 ], scope = 'block3_dws_conv1' )
8080 net = slim .batch_norm (net , scope = 'block3_bn1' )
8181 net = tf .nn .relu (net , name = 'block3_relu2' )
82- net = slim .separable_conv2d (net , 256 , [3 ,3 ], biases_initializer = None , scope = 'block3_dws_conv2' )
82+ net = slim .separable_conv2d (net , 256 , [3 ,3 ], scope = 'block3_dws_conv2' )
8383 net = slim .batch_norm (net , scope = 'block3_bn2' )
8484 net = slim .max_pool2d (net , [3 ,3 ], stride = 2 , padding = 'same' , scope = 'block3_max_pool' )
8585 net = tf .add (net , residual , name = 'block3_add' )
86- residual = slim .conv2d (net , 728 , [1 ,1 ], stride = 2 , biases_initializer = None , scope = 'block3_res_conv' )
86+ residual = slim .conv2d (net , 728 , [1 ,1 ], stride = 2 , scope = 'block3_res_conv' )
8787 residual = slim .batch_norm (residual , scope = 'block3_res_bn' )
8888
8989 #Block 4
9090 net = tf .nn .relu (net , name = 'block4_relu1' )
91- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = 'block4_dws_conv1' )
91+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = 'block4_dws_conv1' )
9292 net = slim .batch_norm (net , scope = 'block4_bn1' )
9393 net = tf .nn .relu (net , name = 'block4_relu2' )
94- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = 'block4_dws_conv2' )
94+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = 'block4_dws_conv2' )
9595 net = slim .batch_norm (net , scope = 'block4_bn2' )
9696 net = slim .max_pool2d (net , [3 ,3 ], stride = 2 , padding = 'same' , scope = 'block4_max_pool' )
9797 net = tf .add (net , residual , name = 'block4_add' )
@@ -102,33 +102,33 @@ def xception(inputs,
102102
103103 residual = net
104104 net = tf .nn .relu (net , name = block_prefix + 'relu1' )
105- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = block_prefix + 'dws_conv1' )
105+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = block_prefix + 'dws_conv1' )
106106 net = slim .batch_norm (net , scope = block_prefix + 'bn1' )
107107 net = tf .nn .relu (net , name = block_prefix + 'relu2' )
108- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = block_prefix + 'dws_conv2' )
108+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = block_prefix + 'dws_conv2' )
109109 net = slim .batch_norm (net , scope = block_prefix + 'bn2' )
110110 net = tf .nn .relu (net , name = block_prefix + 'relu3' )
111- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = block_prefix + 'dws_conv3' )
111+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = block_prefix + 'dws_conv3' )
112112 net = slim .batch_norm (net , scope = block_prefix + 'bn3' )
113113 net = tf .add (net , residual , name = block_prefix + 'add' )
114114
115115
116116 #========EXIT FLOW============
117- residual = slim .conv2d (net , 1024 , [1 ,1 ], stride = 2 , biases_initializer = None , scope = 'block12_res_conv' )
117+ residual = slim .conv2d (net , 1024 , [1 ,1 ], stride = 2 , scope = 'block12_res_conv' )
118118 residual = slim .batch_norm (residual , scope = 'block12_res_bn' )
119119 net = tf .nn .relu (net , name = 'block13_relu1' )
120- net = slim .separable_conv2d (net , 728 , [3 ,3 ], biases_initializer = None , scope = 'block13_dws_conv1' )
120+ net = slim .separable_conv2d (net , 728 , [3 ,3 ], scope = 'block13_dws_conv1' )
121121 net = slim .batch_norm (net , scope = 'block13_bn1' )
122122 net = tf .nn .relu (net , name = 'block13_relu2' )
123- net = slim .separable_conv2d (net , 1024 , [3 ,3 ], biases_initializer = None , scope = 'block13_dws_conv2' )
123+ net = slim .separable_conv2d (net , 1024 , [3 ,3 ], scope = 'block13_dws_conv2' )
124124 net = slim .batch_norm (net , scope = 'block13_bn2' )
125125 net = slim .max_pool2d (net , [3 ,3 ], stride = 2 , padding = 'same' , scope = 'block13_max_pool' )
126126 net = tf .add (net , residual , name = 'block13_add' )
127127
128- net = slim .separable_conv2d (net , 1536 , [3 ,3 ], biases_initializer = None , scope = 'block14_dws_conv1' )
128+ net = slim .separable_conv2d (net , 1536 , [3 ,3 ], scope = 'block14_dws_conv1' )
129129 net = slim .batch_norm (net , scope = 'block14_bn1' )
130130 net = tf .nn .relu (net , name = 'block14_relu1' )
131- net = slim .separable_conv2d (net , 2048 , [3 ,3 ], biases_initializer = None , scope = 'block14_dws_conv2' )
131+ net = slim .separable_conv2d (net , 2048 , [3 ,3 ], scope = 'block14_dws_conv2' )
132132 net = slim .batch_norm (net , scope = 'block14_bn2' )
133133 net = tf .nn .relu (net , name = 'block14_relu2' )
134134
@@ -162,7 +162,7 @@ def xception_arg_scope(weight_decay=0.00001,
162162 # Set weight_decay for weights in conv2d and separable_conv2d layers.
163163 with slim .arg_scope ([slim .conv2d , slim .separable_conv2d ],
164164 weights_regularizer = slim .l2_regularizer (weight_decay ),
165- biases_regularizer = slim . l2_regularizer ( weight_decay ) ):
165+ biases_initializer = None ):
166166
167167 # Set parameters for batch_norm. Note: Do not set activation function as it's preset to None already.
168168 with slim .arg_scope ([slim .batch_norm ],
0 commit comments