@@ -28,8 +28,7 @@ public class LayersApi
2828 /// <param name="renorm"></param>
2929 /// <param name="renorm_momentum"></param>
3030 /// <returns></returns>
31- public Tensors batch_normalization ( Tensor inputs ,
32- int axis = - 1 ,
31+ public BatchNormalization BatchNormalization ( int axis = - 1 ,
3332 float momentum = 0.99f ,
3433 float epsilon = 0.001f ,
3534 bool center = true ,
@@ -38,31 +37,26 @@ public Tensors batch_normalization(Tensor inputs,
3837 IInitializer gamma_initializer = null ,
3938 IInitializer moving_mean_initializer = null ,
4039 IInitializer moving_variance_initializer = null ,
41- Tensor training = null ,
4240 bool trainable = true ,
4341 string name = null ,
4442 bool renorm = false ,
4543 float renorm_momentum = 0.99f )
46- {
47- var layer = new BatchNormalization ( new BatchNormalizationArgs
48- {
49- Axis = axis ,
50- Momentum = momentum ,
51- Epsilon = epsilon ,
52- Center = center ,
53- Scale = scale ,
54- BetaInitializer = beta_initializer ,
55- GammaInitializer = gamma_initializer ,
56- MovingMeanInitializer = moving_mean_initializer ,
57- MovingVarianceInitializer = moving_variance_initializer ,
58- Renorm = renorm ,
59- RenormMomentum = renorm_momentum ,
60- Trainable = trainable ,
61- Name = name
62- } ) ;
63-
64- return layer . Apply ( inputs ) ;
65- }
44+ => new BatchNormalization ( new BatchNormalizationArgs
45+ {
46+ Axis = axis ,
47+ Momentum = momentum ,
48+ Epsilon = epsilon ,
49+ Center = center ,
50+ Scale = scale ,
51+ BetaInitializer = beta_initializer ?? tf . zeros_initializer ,
52+ GammaInitializer = gamma_initializer ?? tf . ones_initializer ,
53+ MovingMeanInitializer = moving_mean_initializer ?? tf . zeros_initializer ,
54+ MovingVarianceInitializer = moving_variance_initializer ?? tf . ones_initializer ,
55+ Renorm = renorm ,
56+ RenormMomentum = renorm_momentum ,
57+ Trainable = trainable ,
58+ Name = name
59+ } ) ;
6660
6761 /// <summary>
6862 ///
@@ -115,53 +109,64 @@ public Conv2D Conv2D(int filters,
115109 Activation = activation ?? keras . activations . Linear
116110 } ) ;
117111
118- public Tensor conv2d ( Tensor inputs ,
119- int filters ,
120- int [ ] kernel_size ,
121- int [ ] strides = null ,
112+ public Conv2D Conv2D ( int filters ,
113+ TensorShape kernel_size = null ,
114+ TensorShape strides = null ,
122115 string padding = "valid" ,
123- string data_format = "channels_last" ,
124- int [ ] dilation_rate = null ,
116+ string data_format = null ,
117+ TensorShape dilation_rate = null ,
118+ int groups = 1 ,
119+ string activation = null ,
125120 bool use_bias = true ,
121+ string kernel_initializer = "glorot_uniform" ,
122+ string bias_initializer = "zeros" ,
123+ string kernel_regularizer = null ,
124+ string bias_regularizer = null ,
125+ string activity_regularizer = null )
126+ => new Conv2D ( new Conv2DArgs
127+ {
128+ Rank = 2 ,
129+ Filters = filters ,
130+ KernelSize = kernel_size ,
131+ Strides = strides == null ? ( 1 , 1 ) : strides ,
132+ Padding = padding ,
133+ DataFormat = data_format ,
134+ DilationRate = dilation_rate == null ? ( 1 , 1 ) : dilation_rate ,
135+ Groups = groups ,
136+ UseBias = use_bias ,
137+ KernelInitializer = GetInitializerByName ( kernel_initializer ) ,
138+ BiasInitializer = GetInitializerByName ( bias_initializer ) ,
139+ Activation = GetActivationByName ( activation )
140+ } ) ;
141+
142+ public Dense Dense ( int units ,
126143 Activation activation = null ,
127144 IInitializer kernel_initializer = null ,
128145 IInitializer bias_initializer = null ,
129- bool trainable = true ,
130- string name = null )
131- {
132- if ( strides == null )
133- strides = new int [ ] { 1 , 1 } ;
134- if ( dilation_rate == null )
135- dilation_rate = new int [ ] { 1 , 1 } ;
136- if ( bias_initializer == null )
137- bias_initializer = tf . zeros_initializer ;
138-
139- var layer = new Conv2D ( new Conv2DArgs
146+ TensorShape input_shape = null )
147+ => new Dense ( new DenseArgs
140148 {
141- Filters = filters ,
142- KernelSize = kernel_size ,
143- Strides = strides ,
144- Padding = padding ,
145- DataFormat = data_format ,
146- DilationRate = dilation_rate ,
147- Activation = activation ,
148- UseBias = use_bias ,
149- KernelInitializer = kernel_initializer ,
150- BiasInitializer = bias_initializer ,
151- Trainable = trainable ,
152- Name = name
149+ Units = units ,
150+ Activation = activation ?? keras . activations . Linear ,
151+ KernelInitializer = kernel_initializer ?? tf . glorot_uniform_initializer ,
152+ BiasInitializer = bias_initializer ?? tf . zeros_initializer ,
153+ InputShape = input_shape
153154 } ) ;
154155
155- return layer . Apply ( inputs ) ;
156- }
156+ public Dense Dense ( int units )
157+ => new Dense ( new DenseArgs
158+ {
159+ Units = units ,
160+ Activation = GetActivationByName ( "linear" )
161+ } ) ;
157162
158163 public Dense Dense ( int units ,
159- Activation activation = null ,
164+ string activation = null ,
160165 TensorShape input_shape = null )
161166 => new Dense ( new DenseArgs
162167 {
163168 Units = units ,
164- Activation = activation ?? keras . activations . Linear ,
169+ Activation = GetActivationByName ( activation ) ,
165170 InputShape = input_shape
166171 } ) ;
167172
@@ -367,6 +372,12 @@ public ZeroPadding2D ZeroPadding2D(NDArray padding)
367372 Padding = padding
368373 } ) ;
369374
375+ public Tensor add ( params Tensor [ ] inputs )
376+ => new Add ( new MergeArgs { Inputs = inputs } ) . Apply ( inputs ) ;
377+
378+ public GlobalAveragePooling2D GlobalAveragePooling2D ( )
379+ => new GlobalAveragePooling2D ( new Pooling2DArgs { } ) ;
380+
370381 Activation GetActivationByName ( string name )
371382 => name switch
372383 {
@@ -376,5 +387,14 @@ Activation GetActivationByName(string name)
376387 "tanh" => keras . activations . Tanh ,
377388 _ => keras . activations . Linear
378389 } ;
390+
391+ IInitializer GetInitializerByName ( string name )
392+ => name switch
393+ {
394+ "glorot_uniform" => tf . glorot_uniform_initializer ,
395+ "zeros" => tf . zeros_initializer ,
396+ "ones" => tf . ones_initializer ,
397+ _ => tf . glorot_uniform_initializer
398+ } ;
379399 }
380400}
0 commit comments