ClassName= LearningParam // Increase the importance of SOV and decrease importance of // encoding cost (compared to default) // Low weight for everything except main objective NetInitCenterWeight = 1.0e-9 // keep sums centered on 0 NetInitRangeWeight = 1.0e-9 // scale weights for desired variance on sums NetCenterDecay = 0.95 NetRangeDecay =0.99 NetResetAfter = 15 NetResetWhenWorseBy = 1.3 //useless parameter NetFractionWrongExponent = 0.5 // weight chains by sqrt(1-Q) QRBestCostMult=1.0 QRBestQMult = 0 QRBestSOVMult = 0 NetActWrongWeight = 1.5 // extra weight for correcting wrong answers NetActChangeCorrectWeight = 1.80 // extra weight for transitions // where the correct answer changes ActPartialKluge = 0.1 LayBaseTimesSeq = 0.01 // very slow learning LayMaxBaseRate = 0.09 // for single training example LayWeightOverBase = 1.00 LayWeightOverBaseForMult = 1.5 LayPseudoOverBase = 0.01 LayPseudoOverBaseForMult =0.1 LayBiasOverBase = 0.03 LayBiasOverBaseForMult = 0.05 LayGainOverBase = 0.01 LayGainOverBaseForMult = 0.01 LayDesiredSqOverNumWeights = 0.07 LayGainExtConst = 1.5e-5 LayBiasExtConst = 4.0e-6 LayPseudoExtConst = 8.0e-6 LayGainExtConstForMult = 2.0e-5 LayBiasExtConstForMult = 4.0e-5 LayPseudoExtConstForMult = 4.0e-5 LayWindowShapeExp = 0.1 LayMaxWeight=10.00 LayMaxWeightChange=0.03 // changed from 0.01 LayMaxPseudo = 10.0 LayMaxPseudoChange = 2.0 LayMaxBias=10.0 // old code limited bias to window size, // which may be better LayMaxBiasChange=0.1 LayMaxGain=2.0 // keep gain in [1/MaxGain, MaxGain] LayMaxGainChange = 1.5 // scale by [1/GainChange, GainChange] LayMainObjWeight= 1.0 LayMainObjWeightForMult=1.0 LayMainObjPseudo= 1.0 LayMainObjPseudoForMult = 1.0 LayMainObjBias=1.0 LayMainObjBiasForMult=0.5 LayMainObjGain = 1.0 LayMainObjGainForMult =1.0 LayMaxWeightRateFactor = 2.0 LayMaxPseudoRateFactor = 2.0 LayMaxBiasRateFactor = 1.1 LayMaxGainRateFactor = 1.1 LayWeightRateDecay=0.995 LayPseudoRateDecay = 0.99 LayBiasRateDecayForLowerCost = 1.01 LayBiasRateDecayForHigherCost=0.88 LayGainRateDecayForLowerCost = 1.01 LayGainRateDecayForHigherCost=0.7 LayRangeMult = 1 LayRangeDenom = 10 LayRangeMinForMult = 0.1 LayTotalInitialPseudo = 0.1 EndClassName= LearningParam