Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

initializers_dense_batch.py

1 # file: initializers_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 #
19 # ! Content:
20 # ! Python example of initializers
21 # !
22 # !*****************************************************************************
23 
24 #
25 
26 
27 #
28 
29 import os
30 import sys
31 
32 from daal.algorithms.neural_networks import layers
33 from daal.algorithms.neural_networks import initializers
34 from daal.data_management import HomogenTensor, TensorIface
35 
36 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
37 if utils_folder not in sys.path:
38  sys.path.insert(0, utils_folder)
39 from utils import printTensor
40 
41 if __name__ == "__main__":
42  # Create collection of dimension sizes of the input data tensor
43  inDims = [2, 1, 3, 4]
44  tensorData = HomogenTensor(inDims, TensorIface.doAllocate)
45 
46  # Fill tensor data using truncated gaussian initializer
47  # Create an algorithm to initialize data using default method
48  truncatedGaussInitializer = initializers.truncated_gaussian.Batch(0.0, 1.0)
49 
50  # Set input object and parameters for the truncated gaussian initializer
51  truncatedGaussInitializer.input.set(initializers.data, tensorData)
52 
53  # Compute truncated gaussian initializer
54  truncatedGaussInitializer.compute()
55 
56  # Print the results of the truncated gaussian initializer
57  printTensor(tensorData, "Data with truncated gaussian distribution:")
58 
59 
60  # Fill tensor data using gaussian initializer
61  # Create an algorithm to initialize data using default method
62  gaussInitializer = initializers.gaussian.Batch(1.0, 0.5)
63 
64  # Set input object and parameters for the gaussian initializer
65  gaussInitializer.input.set(initializers.data, tensorData)
66 
67  # Compute gaussian initializer
68  gaussInitializer.compute()
69 
70  # Print the results of the gaussian initializer
71  printTensor(tensorData, "Data with gaussian distribution:")
72 
73 
74  # Fill tensor data using uniform initializer
75  # Create an algorithm to initialize data using default method
76  uniformInitializer = initializers.uniform.Batch(-5.0, 5.0)
77 
78  # Set input object and parameters for the uniform initializer
79  uniformInitializer.input.set(initializers.data, tensorData)
80 
81  # Compute uniform initializer
82  uniformInitializer.compute()
83 
84  # Print the results of the uniform initializer
85  printTensor(tensorData, "Data with uniform distribution:")
86 
87 
88  # Fill layer weights using xavier initializer
89  # Create an algorithm to compute forward fully-connected layer results using default method
90  fullyconnectedLayerForward = layers.fullyconnected.forward.Batch(5)
91 
92  # Set input objects and parameter for the forward fully-connected layer
93  fullyconnectedLayerForward.input.setInput(layers.forward.data, tensorData)
94  fullyconnectedLayerForward.parameter.weightsInitializer = initializers.xavier.Batch()
95 
96  # Compute forward fully-connected layer results
97  fullyconnectedLayerForward.compute()
98 
99  # Print the results of the xavier initializer
100  printTensor(fullyconnectedLayerForward.input.getInput(layers.forward.weights), "Weights filled by xavier initializer:")

For more complete information about compiler optimizations, see our Optimization Notice.