[[["容易理解","easyToUnderstand","thumb-up"],["確實解決了我的問題","solvedMyProblem","thumb-up"],["其他","otherUp","thumb-up"]],[["缺少我需要的資訊","missingTheInformationINeed","thumb-down"],["過於複雜/步驟過多","tooComplicatedTooManySteps","thumb-down"],["過時","outOfDate","thumb-down"],["翻譯問題","translationIssue","thumb-down"],["示例/程式碼問題","samplesCodeIssue","thumb-down"],["其他","otherDown","thumb-down"]],["上次更新時間:2024-11-08 (世界標準時間)。"],[[["Backpropagation is the primary training algorithm for neural networks, enabling gradient descent for multi-layer networks and often handled automatically by machine learning libraries."],["Vanishing gradients occur when gradients in lower layers become very small, hindering their training, and can be mitigated by using ReLU activation function."],["Exploding gradients happen when large weights cause excessively large gradients, disrupting convergence, and can be addressed with batch normalization or lowering the learning rate."],["Dead ReLU units emerge when a ReLU unit's output gets stuck at 0, halting gradient flow, and can be avoided by lowering the learning rate or using ReLU variants like LeakyReLU."],["Dropout regularization is a technique to prevent overfitting by randomly dropping unit activations during training, with higher dropout rates indicating stronger regularization."]]],[]]