Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

prelu_layer_dense_batch.py

1 # file: prelu_layer_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 #
19 # ! Content:
20 # ! Python example of forward and backward parametric rectified linear unit (prelu) layer usage
21 # !
22 # !*****************************************************************************
23 
24 #
25 
26 
27 #
28 
29 import os
30 import sys
31 
32 from daal.algorithms.neural_networks import layers
33 from daal.data_management import HomogenTensor, Tensor
34 
35 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
36 if utils_folder not in sys.path:
37  sys.path.insert(0, utils_folder)
38 from utils import printTensor, readTensorFromCSV
39 
40 # Input data set parameters
41 datasetName = os.path.join("..", "data", "batch", "layer.csv")
42 weightsName = os.path.join("..", "data", "batch", "layer.csv")
43 
44 dataDimension = 0
45 weightsDimension = 2
46 
47 if __name__ == "__main__":
48 
49  # Read datasetFileName from a file and create a tensor to store input data
50  tensorData = readTensorFromCSV(datasetName)
51  tensorWeights = readTensorFromCSV(weightsName)
52 
53  # Create an algorithm to compute forward prelu layer results using default method
54  forwardPreluLayer = layers.prelu.forward.Batch()
55  forwardPreluLayer.parameter.dataDimension = dataDimension
56  forwardPreluLayer.parameter.weightsDimension = weightsDimension
57  forwardPreluLayer.parameter.weightsAndBiasesInitialized = True
58 
59  # Set input objects for the forward prelu layer
60  forwardPreluLayer.input.setInput(layers.forward.data, tensorData)
61  forwardPreluLayer.input.setInput(layers.forward.weights, tensorWeights)
62 
63  # Compute forward prelu layer results
64  forwardResult = forwardPreluLayer.compute()
65 
66  # Print the results of the forward prelu layer
67  printTensor(forwardResult.getResult(layers.forward.value), "Forward prelu layer result (first 5 rows):", 5)
68 
69  # Get the size of forward prelu layer output
70  gDims = forwardResult.getResult(layers.forward.value).getDimensions()
71  tensorDataBack = HomogenTensor(gDims, Tensor.doAllocate, 0.01)
72 
73  # Create an algorithm to compute backward prelu layer results using default method
74  backwardPreluLayer = layers.prelu.backward.Batch()
75  backwardPreluLayer.parameter.dataDimension = dataDimension
76  backwardPreluLayer.parameter.weightsDimension = weightsDimension
77 
78  # Set input objects for the backward prelu layer
79  backwardPreluLayer.input.setInput(layers.backward.inputGradient, tensorDataBack)
80  backwardPreluLayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
81 
82  # Compute backward prelu layer results
83  backwardResult = backwardPreluLayer.compute()
84 
85  # Print the results of the backward prelu layer
86  printTensor(backwardResult.getResult(layers.backward.gradient), "Backward prelu layer result (first 5 rows):", 5)
87  printTensor(backwardResult.getResult(layers.backward.weightDerivatives), "Weights derivative (first 5 rows):", 5)

For more complete information about compiler optimizations, see our Optimization Notice.