diff --git a/assets/scss/custom.scss b/assets/scss/custom.scss index fd6a4ce6..e90e37b4 100644 --- a/assets/scss/custom.scss +++ b/assets/scss/custom.scss @@ -1,6 +1,13 @@ // Override this file to add your own SCSS styling. @use "sass:color"; +body { + line-height: 1.6; +} + +.sidebar, .sidebar a { color: #333333; } +.dark .sidebar, .dark .sidebar a { color: #e0e0e0; } + .container { max-width: 90%; } @@ -196,7 +203,12 @@ html { } .hljs { - background-color: #f9f9f9; + .hljs-title, + .hljs-title.class_, + .hljs-title.class_.inherited__, + .hljs-title.function_ { + color: #7a3e9d; +} } #TableOfContents li a, diff --git a/content/notes/pytorch-hpc/overview.md b/content/notes/pytorch-hpc/overview.md index 17fc4af2..970c4f03 100644 --- a/content/notes/pytorch-hpc/overview.md +++ b/content/notes/pytorch-hpc/overview.md @@ -96,18 +96,18 @@ Activation functions introduce non-linearity into neural networks, enabling them
- {{< figure src="/courses/pytorch-hpc/img/sigmoid.png" caption="Sigmoid" width="400px" >}} + {{< figure src="/courses/pytorch-hpc/img/sigmoid.png" caption="Sigmoid" alt="Line graph of the Sigmoid activation function showing an S-shaped curve with output ranging from 0 to 1" width="400px" >}}
- {{< figure src="/courses/pytorch-hpc/img/tanh.png" caption="Tanh" width="400px" >}} + {{< figure src="/courses/pytorch-hpc/img/tanh.png" caption="Tanh" alt="Line graph of the Tanh activation function showing an S-shaped curve with output ranging from -1 to 1" width="400px" >}}
- {{< figure src="/courses/pytorch-hpc/img/relu.png" caption="ReLU" width="400px" >}} + {{< figure src="/courses/pytorch-hpc/img/relu.png" caption="ReLU" alt="Line graph of the ReLU activation function showing zero output for negative inputs and a linear increase for positive inputs" width="400px" >}}
- {{< figure src="/courses/pytorch-hpc/img/leakyrelu.png" caption="Leaky ReLU" width="400px" >}} + {{< figure src="/courses/pytorch-hpc/img/leakyrelu.png" caption="Leaky ReLU" alt="Line graph of the Leaky ReLU activation function showing a small negative slope for negative inputs and a linear increase for positive inputs" width="400px" >}}
diff --git a/content/notes/pytorch-hpc/simple_nn.md b/content/notes/pytorch-hpc/simple_nn.md index 418f1598..4729c1eb 100644 --- a/content/notes/pytorch-hpc/simple_nn.md +++ b/content/notes/pytorch-hpc/simple_nn.md @@ -13,7 +13,7 @@ menu: ### **Neural Network Construction** A neural network consists of multiple layers, each performing specific transformations on the input data. -{{< figure src="/courses/pytorch-hpc/img/nn.png" caption="An Artificial Neural Network" width="500px" >}} +{{< figure src="/courses/pytorch-hpc/img/nn.png" caption="An Artificial Neural Network" alt="Diagram of an artificial neural network showing 4 input layer nodes in blue, 5 hidden layer nodes in orange, and 3 output layer nodes in green, with lines connecting each layer" width="500px" >}} Frequently used Layers in PyTorch: ```python diff --git a/layouts/partials/book_sidebar.html b/layouts/partials/book_sidebar.html index aca58d54..83e326fd 100644 --- a/layouts/partials/book_sidebar.html +++ b/layouts/partials/book_sidebar.html @@ -1,4 +1,4 @@ -