ClassName= LearningParam // Increase the importance of SOV and decrease importance of // encoding cost (compared to default) NetInitCenterWeight = 0 // don't worry about centering weights NetInitRangeWeight = 0 // don't scale weights for desired variance on sums NetCenterDecay = 1.0 NetRangeDecay = 1.0 NetResetAfter = 40 QRBestCostMult=1.0 QRBestQMult = 1.0 QRBestSOVMult = 0.5 NetActWrongWeight = 1.0 // weight multiplier for correcting wrong answers NetActChangeCorrectWeight = 1.0 // weight multiplier for transitions // where the correct answer changes ActPartialKluge = 0 // 0 is off, 1 is weight proportional to prob LayBaseTimesSeq = 999999. // Use LayMaxBaseRate---not dependent on # seqs LayMaxBaseRate = 0.0005 // THIS RATE SEEMS TO BE TOO HIGH. LayWeightOverBase = 1.00 LayWeightOverBaseForMult = 1.5 LayPseudoOverBase = 1.0 // was 0.4 LayPseudoOverBaseForMult =0.1 LayBiasOverBase = 0 // don't learn biases LayBiasOverBaseForMult = 0.05 LayGainOverBase = 0.0 // don't learn gain LayGainOverBaseForMult = 0.01 LayDesiredSqOverNumWeights = 0.06 LayGainExtConst = 0 // no weight extinction LayBiasExtConst = 0 LayPseudoExtConst = 0 LayGainExtConstForMult = 0 LayBiasExtConstForMult = 0 LayPseudoExtConstForMult = 0 LayWindowShapeExp = 0.0 // no shaping to window LayMaxWeight=25.00 LayMaxWeightChange=0.05 LayMaxPseudo = 10.0 LayMaxPseudoChange = 2.0 LayMaxBias=40.0 // old code limited bias to window size, // which may be better LayMaxBiasChange=0.1 LayMaxGain=2.0 // keep gain in [1/MaxGain, MaxGain] LayMaxGainChange = 1.5 // scale by [1/GainChange, GainChange] LayMainObjWeight= 1.0 LayMainObjWeightForMult=1.0 LayMainObjPseudo= 1.0 LayMainObjPseudoForMult = 1.0 LayMainObjBias=0.010 LayMainObjBiasForMult=0.005 LayMainObjGain = 1.0 LayMainObjGainForMult =1.0 LayMaxWeightRateFactor = 2.0 LayMaxPseudoRateFactor = 2.0 LayMaxBiasRateFactor = 1.1 LayMaxGainRateFactor = 1.1 LayWeightRateDecay= 1.0 LayPseudoRateDecay = 1.0 LayBiasRateDecayForLowerCost = 1.0 LayBiasRateDecayForHigherCost= 1.0 LayGainRateDecayForLowerCost = 1.0 LayGainRateDecayForHigherCost= 1.0 LayRangeMult = 0.6 LayRangeDenom = 0.4 LayRangeMinForMult = 0.1 LayTotalInitialPseudo = 0.1 EndClassName= LearningParam