梯度下降法
使用 AdaGrad 拟合 y=wx+b
直线
<script src="https://cdn.bootcss.com/tensorflow/0.14.1/tf.min.js"></script>
// Generate Traning data set
function TargetFx(x) {
return new Array(x.length).fill().map((v,i)=>{
return 3*x[i] + 1 + (0.1 * (2 * Math.random() - 1));
});
}
const tarin_x = new Array(200).fill().map((v,i)=>+(0.01*i).toFixed(2));
const tarin_y = TargetFx(tarin_x);
const tensor_x = tf.tensor(tarin_x, [tarin_x.length, 1]);
const tensor_y = tf.tensor(tarin_y, [tarin_y.length, 1]);
// Initial variable.
const w = tf.variable(tf.scalar(Math.random()));
const b = tf.variable(tf.scalar(Math.random()));
// fx = w*x + b
const ModelFx = x => w.mul(x).add(b);
const iterations = 200;
const learningRate = 1;
// https://js.tensorflow.org/api/0.14.1/#train.adagrad
const optimizer = tf.train.adagrad(learningRate);
// Loss = mean of sum (y-y')^2
const loss = (pred, label) => pred.sub(label).square().mean();
for (let iter = 0; iter < iterations; iter++) {
optimizer.minimize(() => {
const loss_var = loss(ModelFx(tensor_x), tensor_y);
// loss_var.print();
return loss_var;
}, true).print()
}
使用 sgd 拟合 y=ax²+bx+c
曲线
// Generate Traning data set
function TargetFx(x) {
return new Array(x.length).fill().map((v,i)=>{
return Math.pow(x[i],2) + 3*x[i] + 1 + (0.1 * (2 * Math.random() - 1));
});
}
const tarin_x = new Array(200).fill().map((v,i)=>+(0.01*i).toFixed(2));
const tarin_y = TargetFx(tarin_x);
const tensor_x = tf.tensor(tarin_x, [tarin_x.length, 1]);
const tensor_y = tf.tensor(tarin_y, [tarin_y.length, 1]);
// Initial variable.
const a = tf.variable(tf.scalar(Math.random()));
const b = tf.variable(tf.scalar(Math.random()));
const c = tf.variable(tf.scalar(Math.random()));
// fx = ax²+bx+c
const ModelFx = x => tf.tidy(() =>a.mul(x.square()).add(b.mul(x)).add(c));
const iterations = 200;
const learningRate = 0.1;
// https://js.tensorflow.org/api/0.14.1/#train.sgd
const optimizer = tf.train.sgd(learningRate);
// Loss = mean of sum (y-y')^2
const loss = (pred, label) => pred.sub(label).square().mean();
for (let iter = 0; iter < iterations; iter++) {
optimizer.minimize(() => {
const loss_var = loss(ModelFx(tensor_x), tensor_y);
// loss_var.print();
return loss_var;
}, true).print()
}
const test_x = tf.tensor([0.5,1,1.5],[3,1]);
ModelFx(test_x).print();
使用tf.model拟合曲线
// Generate Traning data set
function TargetFx(x) {
return new Array(x.length).fill().map((v,i)=>{
return 3*x[i] + 1 + (0.1 * (2 * Math.random() - 1));
});
}
const tarin_x = new Array(200).fill().map((v,i)=>+(0.01*i).toFixed(2));
const tarin_y = TargetFx(tarin_x);
const tensor_x = tf.tensor(tarin_x, [tarin_x.length, 1]);
const tensor_y = tf.tensor(tarin_y, [tarin_y.length, 1]);
async function trainLinear() {
const model = tf.sequential();
model.add(tf.layers.dense({
units: 1,
inputShape: [1]
}));
model.compile({
loss: 'meanSquaredError',
optimizer: 'sgd'
});
await model.fit(tensor_x, tensor_y, {
epochs: 100
});
model.predict(tf.tensor2d([1,2,3,4], [4, 1])).print();
}
trainLinear();
/*
当输入x为:[1,2,3,4],输出为
Tensor
[[4.0171099 ],
[6.9510436 ],
[9.8849773 ],
[12.8189116]]
对比 y = 3x+1, 还是比较好地拟合到直线.
*/