I don’t find different things in my function . This is the same approach. Maybe you find differents in these functions :
def runLearning(numRecords):
learning_time = time()
with open("test3.csv", "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
if count >= numRecords: break
# Convert data string into Python date object.
#dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
event_value = float(record[6]) # device 5
event_value_3 = float(record[4]) # device 3
event_value_2 = float(record[3]) #device 2
event_value_7 = float(record[8]) # device 7
bezline_all = float(record[10])
flow_value = float(record[0])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
eventBits = numpy.zeros(eventEncoder.getWidth())
eventBits_2 = numpy.zeros(eventEncoder2.getWidth())
eventBits_3 = numpy.zeros(eventEncoder1.getWidth())
eventBits_7 = numpy.zeros(eventEncoder7.getWidth())
baseline_Bits = numpy.zeros(baselineEncoder.getWidth())
flowBits = numpy.zeros(flowEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
eventEncoder.encodeIntoArray(event_value, eventBits)
eventEncoder1.encodeIntoArray(event_value_3,eventBits_3)
eventEncoder2.encodeIntoArray(event_value_2,eventBits_2)
eventEncoder7.encodeIntoArray(event_value_7,eventBits_7)
baselineEncoder.encodeIntoArray(bezline_all,baseline_Bits)
flowEncoder.encodeIntoArray(flow_value, flowBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[eventBits,flowBits,baseline_Bits,eventBits_2,flowBits,baseline_Bits,eventBits_3,flowBits,baseline_Bits,eventBits_7,flowBits,baseline_Bits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# activeColumns1 = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding,True,activeColumns)
# sp.compute(encoding1, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = eventEncoder.getBucketIndices(event_value)[0]
bucketIdx_2 = eventEncoder2.getBucketIndices(event_value_2)[0]
bucketIdx_3 = eventEncoder1.getBucketIndices(event_value_3)[0]
bucketIdx_7 = eventEncoder7.getBucketIndices(event_value_7)[0]
print "BucketIdx_3:",bucketIdx_3
print "BucketIdx:",bucketIdx
# Run classifier to translate active cells back to scalar value.
classifierResult = classifier.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx,
"actValue": event_value
},
learn=True,
infer=False
)
classifierResult1 = classifier1.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx_3,
"actValue": event_value_3
},
learn=True,
infer=False
)
classifierResult7 = classifier7.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx_7,
"actValue": event_value_7
},
learn=True,
infer=False
)
classifierResult2 = classifier2.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx_2,
"actValue": event_value_2
},
learn=True,
infer=False
)
learning_time_end = time()
print "Time",(learning_time - learning_time_end)
with open("out_sp.tmp", "wb") as f1:
sp.writeToFile(f1)
with open("out_tm.tmp", "wb") as f:
tm.writeToFile(f)
return result