Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

batch_norm_layer_dense_batch.py

1 # file: batch_norm_layer_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 #
19 # ! Content:
20 # ! Python example of forward and backward batch normalization layer usage
21 # !
22 # !*****************************************************************************
23 
24 #
25 
26 
27 #
28 
29 import os
30 import sys
31 
32 from daal.algorithms.neural_networks import layers
33 from daal.data_management import HomogenTensor, TensorIface
34 
35 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
36 if utils_folder not in sys.path:
37  sys.path.insert(0, utils_folder)
38 from utils import printTensor, readTensorFromCSV
39 
40 # Input data set name
41 datasetFileName = os.path.join("..", "data", "batch", "layer.csv")
42 dimension = 1
43 
44 if __name__ == "__main__":
45 
46  # Read datasetFileName from a file and create a tensor to store input data
47  data = readTensorFromCSV(datasetFileName)
48 
49  printTensor(data, "Forward batch normalization layer input (first 5 rows):", 5)
50 
51  # Get collection of dimension sizes of the input data tensor
52  dataDims = data.getDimensions()
53  dimensionSize = dataDims[dimension]
54 
55  # Create a collection of dimension sizes of input weights, biases, population mean and variance tensors
56  dimensionSizes = [dimensionSize]
57 
58  # Create input weights, biases, population mean and population variance tensors
59  weights = HomogenTensor(dimensionSizes, TensorIface.doAllocate, 1.0)
60  biases = HomogenTensor(dimensionSizes, TensorIface.doAllocate, 2.0)
61  populationMean = HomogenTensor(dimensionSizes, TensorIface.doAllocate, 0.0)
62  populationVariance = HomogenTensor(dimensionSizes, TensorIface.doAllocate, 0.0)
63 
64  # Create an algorithm to compute forward batch normalization layer results using default method
65  forwardLayer = layers.batch_normalization.forward.Batch()
66  forwardLayer.parameter.dimension = dimension
67  forwardLayer.input.setInput(layers.forward.data, data)
68  forwardLayer.input.setInput(layers.forward.weights, weights)
69  forwardLayer.input.setInput(layers.forward.biases, biases)
70  forwardLayer.input.setInputLayerData(layers.batch_normalization.forward.populationMean, populationMean)
71  forwardLayer.input.setInputLayerData(layers.batch_normalization.forward.populationVariance, populationVariance)
72 
73  # Compute forward batch normalization layer results
74  forwardResult = forwardLayer.compute()
75 
76  printTensor(forwardResult.getResult(layers.forward.value), "Forward batch normalization layer result (first 5 rows):", 5)
77  printTensor(forwardResult.getLayerData(layers.batch_normalization.auxMean), "Mini-batch mean (first 5 values):", 5)
78  printTensor(forwardResult.getLayerData(layers.batch_normalization.auxStandardDeviation), "Mini-batch standard deviation (first 5 values):", 5)
79  printTensor(forwardResult.getLayerData(layers.batch_normalization.auxPopulationMean), "Population mean (first 5 values):", 5)
80  printTensor(forwardResult.getLayerData(layers.batch_normalization.auxPopulationVariance), "Population variance (first 5 values):", 5)
81 
82  # Create input gradient tensor for backward batch normalization layer
83  inputGradientTensor = HomogenTensor(dataDims, TensorIface.doAllocate, 10.0)
84 
85  # Create an algorithm to compute backward batch normalization layer results using default method
86  backwardLayer = layers.batch_normalization.backward.Batch()
87  backwardLayer.parameter.dimension = dimension
88  backwardLayer.input.setInput(layers.backward.inputGradient, inputGradientTensor)
89  backwardLayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
90 
91  # Compute backward batch normalization layer results
92  backwardResult = backwardLayer.compute()
93 
94  printTensor(backwardResult.getResult(layers.backward.gradient), "Backward batch normalization layer result (first 5 rows):", 5)
95  printTensor(backwardResult.getResult(layers.backward.weightDerivatives), "Weight derivatives (first 5 values):", 5)
96  printTensor(backwardResult.getResult(layers.backward.biasDerivatives), "Bias derivatives (first 5 values):", 5)

For more complete information about compiler optimizations, see our Optimization Notice.