This is my encoder configuration:
eventEncoder = ScalarEncoder(name="event", w=7, n=14, minval=0, maxval=1,forced=True)
eventEncoder1 = ScalarEncoder(name="event1", w=7, n=14, minval=0, maxval=1,forced=True)
eventEncoder7 = ScalarEncoder(name="event7", w=7, n=14, minval=0, maxval=1,forced=True)
eventEncoder2 = ScalarEncoder(name="event2", w=7, n=14, minval=0, maxval=1,forced=True)
#eventEncoder2 = ScalarEncoder(name="event2", w=9, n=18, minval=0, maxval=1,forced=True)
baselineEncoder = ScalarEncoder(name = "baseline",w = 21, n = 315,minval= 49,maxval=64,forced= True)
pressEncoder = ScalarEncoder(name = "pressure",w = 21, n = 462,minval= 44,maxval=66,forced= True)
flowEncoder = ScalarEncoder(name="flow", w=11, n=143, minval=0, maxval=13,forced = True)
encodingWidth = (eventEncoder.getWidth()+flowEncoder.getWidth()+baselineEncoder.getWidth()
+eventEncoder1.getWidth()+flowEncoder.getWidth()+baselineEncoder.getWidth()
+eventEncoder2.getWidth()+flowEncoder.getWidth()+baselineEncoder.getWidth())
This is my model :
with open("test3.csv", "r") as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for count, record in enumerate(reader):
print "Count",count
if count >= numRecords: break
# Convert data string into Python date object.
#dateString = datetime.datetime.strptime(record[0], "%m/%d/%y %H:%M")
# Convert data value string into float.
event_value = float(record[2]) # device 1
event_value_3 = float(record[4]) # device 3
event_value_2 = float(record[3]) #device 2
# event_value_7 = float(record[8]) # device 7
bezline_all = float(record[10])
pres_data = float(record[11])
flow_value = float(record[0])
# To encode, we need to provide zero-filled numpy arrays for the encoders
# to populate.
eventBits = numpy.zeros(eventEncoder.getWidth())
eventBits_2 = numpy.zeros(eventEncoder2.getWidth())
eventBits_3 = numpy.zeros(eventEncoder1.getWidth())
presBits = numpy.zeros(pressEncoder.getWidth())
baseline_Bits = numpy.zeros(baselineEncoder.getWidth())
flowBits = numpy.zeros(flowEncoder.getWidth())
# Now we call the encoders to create bit representations for each value.
eventEncoder.encodeIntoArray(event_value, eventBits)
eventEncoder1.encodeIntoArray(event_value_3,eventBits_3)
eventEncoder2.encodeIntoArray(event_value_2,eventBits_2)
pressEncoder.encodeIntoArray(pres_data,presBits)
baselineEncoder.encodeIntoArray(bezline_all,baseline_Bits)
flowEncoder.encodeIntoArray(flow_value, flowBits)
# Concatenate all these encodings into one large encoding for Spatial
# Pooling.
encoding = numpy.concatenate(
[eventBits,flowBits,baseline_Bits,eventBits_2,flowBits,baseline_Bits,eventBits_3,flowBits,baseline_Bits]
)
# Create an array to represent active columns, all initially zero. This
# will be populated by the compute method below. It must have the same
# dimensions as the Spatial Pooler.
activeColumns = numpy.zeros(spParams["columnCount"])
# activeColumns1 = numpy.zeros(spParams["columnCount"])
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding,True,activeColumns)
# sp.compute(encoding1, True, activeColumns)
activeColumnIndices = numpy.nonzero(activeColumns)[0]
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumnIndices, learn=True)
activeCells = tm.getActiveCells()
# Get the bucket info for this input value for classification.
bucketIdx = eventEncoder.getBucketIndices(event_value)[0]
bucketIdx_2 = eventEncoder2.getBucketIndices(event_value_2)[0]
bucketIdx_3 = eventEncoder1.getBucketIndices(event_value_3)[0]
# Run classifier to translate active cells back to scalar value.
classifierResult = classifier.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx,
"actValue": event_value
},
learn=True,
infer=False
)
classifierResult1 = classifier1.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx_3,
"actValue": event_value_3
},
learn=True,
infer=False
)
classifierResult2 = classifier2.compute(
recordNum=count,
patternNZ=activeCells,
classification={
"bucketIdx": bucketIdx_2,
"actValue": event_value_2
},
learn=True,
infer=False
)
learning_time_end = time()
print "Time",(learning_time - learning_time_end)
with open("out_sp.tmp", "wb") as f1:
sp.writeToFile(f1)
with open("out_tm.tmp", "wb") as f2:
tm.writeToFile(f2)
My data look like column with digits, maybe I don’t understand your question about data. Sorry