Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

df_cls_dense_batch.py

1 # file: df_cls_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 
19 
20 
21 import os
22 import sys
23 
24 from daal.algorithms import decision_forest
25 from daal.algorithms.decision_forest.classification import prediction, training
26 from daal.algorithms import classifier
27 from daal.data_management import (
28  FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable,
29  MergedNumericTable, features
30 )
31 
32 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
33 if utils_folder not in sys.path:
34  sys.path.insert(0, utils_folder)
35 from utils import printNumericTable, printNumericTables
36 
37 DAAL_PREFIX = os.path.join('..', 'data')
38 
39 # Input data set parameters
40 trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_train.csv')
41 testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_test.csv')
42 
43 nFeatures = 3
44 nClasses = 5
45 
46 # Decision forest parameters
47 nTrees = 10
48 minObservationsInLeafNode = 8
49 
50 # Model object for the decision forest classification algorithm
51 model = None
52 predictionResult = None
53 testGroundTruth = None
54 
55 
56 def trainModel():
57  global model
58 
59  # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
60  trainDataSource = FileDataSource(
61  trainDatasetFileName,
62  DataSourceIface.notAllocateNumericTable,
63  DataSourceIface.doDictionaryFromContext
64  )
65 
66  # Create Numeric Tables for training data and labels
67  trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
68  trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
69  mergedData = MergedNumericTable(trainData, trainGroundTruth)
70 
71  # Retrieve the data from the input file
72  trainDataSource.loadDataBlock(mergedData)
73 
74  # Get the dictionary and update it with additional information about data
75  dict = trainData.getDictionary()
76 
77  # Add a feature type to the dictionary
78  dict[0].featureType = features.DAAL_CONTINUOUS
79  dict[1].featureType = features.DAAL_CONTINUOUS
80  dict[2].featureType = features.DAAL_CATEGORICAL
81 
82  # Create an algorithm object to train the decision forest classification model
83  algorithm = training.Batch(nClasses)
84  algorithm.parameter.nTrees = nTrees
85  algorithm.parameter.minObservationsInLeafNode = minObservationsInLeafNode
86  algorithm.parameter.featuresPerNode = nFeatures
87  algorithm.parameter.varImportance = decision_forest.training.MDI
88  algorithm.parameter.resultsToCompute = decision_forest.training.computeOutOfBagError
89 
90  # Pass the training data set and dependent values to the algorithm
91  algorithm.input.set(classifier.training.data, trainData)
92  algorithm.input.set(classifier.training.labels, trainGroundTruth)
93 
94  # Train the decision forest classification model and retrieve the results of the training algorithm
95  trainingResult = algorithm.compute()
96  model = trainingResult.get(classifier.training.model)
97  printNumericTable(trainingResult.getTable(training.variableImportance), "Variable importance results: ")
98  printNumericTable(trainingResult.getTable(training.outOfBagError), "OOB error: ")
99 
100 def testModel():
101  global testGroundTruth, predictionResult
102 
103  # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
104  testDataSource = FileDataSource(
105  testDatasetFileName,
106  DataSourceIface.notAllocateNumericTable,
107  DataSourceIface.doDictionaryFromContext
108  )
109 
110  # Create Numeric Tables for testing data and labels
111  testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
112  testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
113  mergedData = MergedNumericTable(testData, testGroundTruth)
114 
115  # Retrieve the data from input file
116  testDataSource.loadDataBlock(mergedData)
117 
118  # Get the dictionary and update it with additional information about data
119  dict = testData.getDictionary()
120 
121  # Add a feature type to the dictionary
122  dict[0].featureType = features.DAAL_CONTINUOUS
123  dict[1].featureType = features.DAAL_CONTINUOUS
124  dict[2].featureType = features.DAAL_CATEGORICAL
125 
126  # Create algorithm objects for decision forest classification prediction with the default method
127  algorithm = prediction.Batch(nClasses)
128 
129  # Pass the testing data set and trained model to the algorithm
130  algorithm.input.setTable(classifier.prediction.data, testData)
131  algorithm.input.setModel(classifier.prediction.model, model)
132 
133  # Compute prediction results and retrieve algorithm results
134  # (Result class from classifier.prediction)
135  predictionResult = algorithm.compute()
136 
137 
138 def printResults():
139  printNumericTable(predictionResult.get(classifier.prediction.prediction),"Decision forest prediction results (first 10 rows):",10)
140  printNumericTable(testGroundTruth, "Ground truth (first 10 rows):", 10);
141 
142 if __name__ == "__main__":
143 
144  trainModel()
145  testModel()
146  printResults()

For more complete information about compiler optimizations, see our Optimization Notice.