initial commit
Some checks failed
Build, Test & Publish / Build and Publish Container Image (push) Has been cancelled
Build, Test & Publish / Deploy to Infrastructure (push) Has been cancelled
Build, Test & Publish / Build (push) Has been cancelled

This commit is contained in:
Liam Pietralla 2024-09-05 13:54:08 +10:00
commit 8ad5845efc
57 changed files with 6046 additions and 0 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
node_modules

96
.github/workflows/pipeline.yml vendored Normal file
View File

@ -0,0 +1,96 @@
name: Build, Test & Publish
on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch:
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Enable corepack
run: corepack enable
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Build
run: yarn build
publish:
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
name: Build and Publish Container Image
runs-on: ubuntu-latest
needs:
- build
steps:
- uses: actions/checkout@v3
- name: Setup Docker Metadata
id: meta
uses: docker/metadata-action@v4
with:
images: liamp1/code
tags: |
type=raw,value=latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and Push Docker Image to DockerHub
uses: docker/build-push-action@v4
with:
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
deploy:
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
name: Deploy to Infrastructure
runs-on: ubuntu-latest
needs:
- publish
steps:
- uses: actions/checkout@v3
- name: Write GitHub SSH Key to File
env:
SSH_PRIVATE_KEY: ${{ secrets.SSH_PRIVATE_KEY }}
run: |
echo "$SSH_PRIVATE_KEY" > private.key
sudo chmod 400 private.key
- name: Write Ansible Inventory To File
env:
APP_HOST: ${{ secrets.APP_HOST }}
run: |
echo "[app]" > hosts.ini
echo "$APP_HOST" >> hosts.ini
- name: Run Ansible Playbook to Configure Servers
run: |
export ANSIBLE_HOST_KEY_CHECKING=False # Disable host key checking
ansible-playbook infra/ansible/deploy-playbook.yml --private-key private.key --inventory hosts.ini
env:
APP_HOST: ${{ secrets.APP_HOST }}

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
node_modules/
.vitepress/dist
.vitepress/cache

157
.vitepress/config.mts Normal file
View File

@ -0,0 +1,157 @@
import { defineConfig } from 'vitepress'
// https://vitepress.dev/reference/site-config
export default defineConfig({
title: "Snippets",
description: "Snippets and musings gathered from different mistakes and experience.",
srcDir: 'docs',
sitemap: {
hostname: 'https://code.liampietralla.com'
},
themeConfig: {
// https://vitepress.dev/reference/default-theme-config
nav: [
{ text: 'Home', link: '/' },
],
search: {
provider: 'local'
},
sidebar: [
{
text: '.NET',
link: '/dotnet/',
collapsed: true,
items: [
{ text: 'Blazor with an API', link: '/dotnet/blazor-with-api' },
{ text: 'Database Seeding', link: '/dotnet/database-seed' },
{ text: 'Dockerising Blazor', link: '/dotnet/dockerising-blazor' },
{ text: 'OWIN Logging', link: '/dotnet/owin-logging' },
{ text: 'System.NET Logging', link: '/dotnet/system-net-logging' },
{ text: 'Unit Of Work Template', link: '/dotnet/unit-of-work-template' },
{ text: 'JWT Authentication', link: '/dotnet/jwt-authentication' },
{ text: 'JWT Authentication in Cookie', link: '/dotnet/jwt-authentication-cookie' },
{ text: 'Google Sign in without Identity', link: '/dotnet/google-sign-in-without-identity' },
{ text: 'Service Testing', link: '/dotnet/service-testing' },
{ text: 'Controller Testing', link: '/dotnet/controller-testing' },
]
},
{
text: 'Ansible',
link: '/ansible/',
collapsed: true,
items: [
{ text: 'Installing Docker', link: '/ansible/installing-docker' },
{ text: 'Configure SSL', link: '/ansible/certbot-ssl' },
{ text: 'Waiting For Servers', link: '/ansible/server-wait' },
]
},
{
text: 'CSS',
link: '/css/',
collapsed: true,
items: [
{ text: 'Text Width HR', link: '/css/text-width-hr' },
]
},
{
text: 'EF Core',
link: '/ef-core/',
collapsed: true,
items: [
{ text: 'Stored Procedure Migration', link: '/ef-core/stp-migration' },
]
},
{
text: 'Git',
link: '/git/',
collapsed: true,
items: [
{ text: 'SSH Config', link: '/git/ssh-config' },
]
},
{
text: 'Github Actions',
link: '/github-actions/',
collapsed: true,
items: [
{ text: 'Build and Test .NET', link: '/github-actions/build-test-dotnet' },
{ text: 'Build and Publish Container', link: '/github-actions/build-publish-container' },
{ text: 'Run EF Core Migrations', link: '/github-actions/run-ef-core-migrations' },
]
},
{
text: 'Nginx',
link: '/nginx/',
collapsed: true,
items: [
{ text: 'Easy Reverse Proxy Config', link: '/nginx/easy-reverse-proxy-config' },
{ text: 'Adding Nginx Site', link: '/nginx/adding-nginx-site' }
]
},
{
text: 'Nuxt',
link: '/nuxt/',
collapsed: true,
items: [
{ text: 'Custom Fetch', link: '/nuxt/custom-fetch' },
]
},
{
text: 'PowerShell',
link: '/powershell/',
collapsed: true,
items: [
{ text: 'Basic Template', link: '/powershell/basic-template' },
]
},
{
text: 'React',
link: '/react/',
collapsed: true,
items: [
{ text: 'Context with Custom Hook', link: '/react/context-with-custom-hook' },
{ text: 'Config and Docker', link: '/react/reading-env-vars-docker' }
]
},
{
text: 'React Native',
link: '/react-native/',
collapsed: true,
items: [
{ text: 'Generic Box', link: '/react-native/generic-box' },
{ text: 'Generic Stacks', link: '/react-native/generic-stacks' },
{ text: 'Generic Text', link: '/react-native/generic-text' },
]
},
{
text: 'Terraform',
link: '/terraform/',
collapsed: true,
items: [
{ text: 'Ansible Inventory Generation', link: '/terraform/ansible-inventory-generation' },
]
},
],
socialLinks: [
{ icon: 'github', link: 'https://github.com/LiamPietralla' }
]
},
head: [
[
'script',
{ async: '', src: 'https://www.googletagmanager.com/gtag/js?id=G-PHMPD0HWEF' }
],
[
'script',
{},
`window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-PHMPD0HWEF');`
]
]
})

17
.vitepress/theme/index.ts Normal file
View File

@ -0,0 +1,17 @@
// https://vitepress.dev/guide/custom-theme
import { h } from 'vue'
import type { Theme } from 'vitepress'
import DefaultTheme from 'vitepress/theme'
import './style.css'
export default {
extends: DefaultTheme,
Layout: () => {
return h(DefaultTheme.Layout, null, {
// https://vitepress.dev/guide/extending-default-theme#layout-slots
})
},
enhanceApp({ app, router, siteData }) {
// ...
}
} satisfies Theme

139
.vitepress/theme/style.css Normal file
View File

@ -0,0 +1,139 @@
/**
* Customize default theme styling by overriding CSS variables:
* https://github.com/vuejs/vitepress/blob/main/src/client/theme-default/styles/vars.css
*/
/**
* Colors
*
* Each colors have exact same color scale system with 3 levels of solid
* colors with different brightness, and 1 soft color.
*
* - `XXX-1`: The most solid color used mainly for colored text. It must
* satisfy the contrast ratio against when used on top of `XXX-soft`.
*
* - `XXX-2`: The color used mainly for hover state of the button.
*
* - `XXX-3`: The color for solid background, such as bg color of the button.
* It must satisfy the contrast ratio with pure white (#ffffff) text on
* top of it.
*
* - `XXX-soft`: The color used for subtle background such as custom container
* or badges. It must satisfy the contrast ratio when putting `XXX-1` colors
* on top of it.
*
* The soft color must be semi transparent alpha channel. This is crucial
* because it allows adding multiple "soft" colors on top of each other
* to create a accent, such as when having inline code block inside
* custom containers.
*
* - `default`: The color used purely for subtle indication without any
* special meanings attched to it such as bg color for menu hover state.
*
* - `brand`: Used for primary brand colors, such as link text, button with
* brand theme, etc.
*
* - `tip`: Used to indicate useful information. The default theme uses the
* brand color for this by default.
*
* - `warning`: Used to indicate warning to the users. Used in custom
* container, badges, etc.
*
* - `danger`: Used to show error, or dangerous message to the users. Used
* in custom container, badges, etc.
* -------------------------------------------------------------------------- */
:root {
--vp-c-default-1: var(--vp-c-gray-1);
--vp-c-default-2: var(--vp-c-gray-2);
--vp-c-default-3: var(--vp-c-gray-3);
--vp-c-default-soft: var(--vp-c-gray-soft);
--vp-c-brand-1: var(--vp-c-indigo-1);
--vp-c-brand-2: var(--vp-c-indigo-2);
--vp-c-brand-3: var(--vp-c-indigo-3);
--vp-c-brand-soft: var(--vp-c-indigo-soft);
--vp-c-tip-1: var(--vp-c-brand-1);
--vp-c-tip-2: var(--vp-c-brand-2);
--vp-c-tip-3: var(--vp-c-brand-3);
--vp-c-tip-soft: var(--vp-c-brand-soft);
--vp-c-warning-1: var(--vp-c-yellow-1);
--vp-c-warning-2: var(--vp-c-yellow-2);
--vp-c-warning-3: var(--vp-c-yellow-3);
--vp-c-warning-soft: var(--vp-c-yellow-soft);
--vp-c-danger-1: var(--vp-c-red-1);
--vp-c-danger-2: var(--vp-c-red-2);
--vp-c-danger-3: var(--vp-c-red-3);
--vp-c-danger-soft: var(--vp-c-red-soft);
}
/**
* Component: Button
* -------------------------------------------------------------------------- */
:root {
--vp-button-brand-border: transparent;
--vp-button-brand-text: var(--vp-c-white);
--vp-button-brand-bg: var(--vp-c-brand-3);
--vp-button-brand-hover-border: transparent;
--vp-button-brand-hover-text: var(--vp-c-white);
--vp-button-brand-hover-bg: var(--vp-c-brand-2);
--vp-button-brand-active-border: transparent;
--vp-button-brand-active-text: var(--vp-c-white);
--vp-button-brand-active-bg: var(--vp-c-brand-1);
}
/**
* Component: Home
* -------------------------------------------------------------------------- */
:root {
--vp-home-hero-name-color: transparent;
--vp-home-hero-name-background: -webkit-linear-gradient(
120deg,
#bd34fe 30%,
#41d1ff
);
--vp-home-hero-image-background-image: linear-gradient(
-45deg,
#bd34fe 50%,
#47caff 50%
);
--vp-home-hero-image-filter: blur(44px);
}
@media (min-width: 640px) {
:root {
--vp-home-hero-image-filter: blur(56px);
}
}
@media (min-width: 960px) {
:root {
--vp-home-hero-image-filter: blur(68px);
}
}
/**
* Component: Custom Block
* -------------------------------------------------------------------------- */
:root {
--vp-custom-block-tip-border: transparent;
--vp-custom-block-tip-text: var(--vp-c-text-1);
--vp-custom-block-tip-bg: var(--vp-c-brand-soft);
--vp-custom-block-tip-code-bg: var(--vp-c-brand-soft);
}
/**
* Component: Algolia
* -------------------------------------------------------------------------- */
.DocSearch {
--docsearch-primary-color: var(--vp-c-brand-1) !important;
}

23
Dockerfile Normal file
View File

@ -0,0 +1,23 @@
FROM nginx:alpine AS base
EXPOSE 80
WORKDIR /app
FROM node:20 as build
WORKDIR /src
# Copy package.json and package-lock.json
COPY package.json .
COPY package-lock.json .
# Install dependencies
RUN npm ci
# Copy the app
COPY . .
# Build the app
RUN npm run build
FROM base AS final
WORKDIR /usr/share/nginx/html
COPY --from=build /src/.vitepress/dist .

25
README.md Normal file
View File

@ -0,0 +1,25 @@
# LMP Snippets Store
A collection of handy code snippets and techniques that I have found useful so far in my career.
## Live Site
The live site can be found at [https://code.liampietralla.com/](https://code.liampietralla.com/).
### Running locally
To run the site locally, you will need to have [Node.js](https://nodejs.org/en/) installed. Once you have Node.js installed, you can run the following commands to get the site running locally:
```bash
# Install dependencies
yarn
# Run the dev server
yarn dev
```
### Deployment
The site is deployed using a custom infrastructure setup. Ansible configuration can be found in the `infra` directory which is used to deploy the lastest version of the site to a DigitalOcean droplet. The site is then served using Nginx.
Deployment occurs automatically when a new commit is pushed to the `main` branch. This is done using GitHub Actions.

View File

@ -0,0 +1,59 @@
# Configuring Certbot SSL with Ansible (and Nginx)
## Overview
This is a simple playbook to setup and configure certbot SSL certificates on a server. This is really useful for getting SSL certificates installed on servers before running a workload.
## Pre-requisites
You will need ansible already installed on your machine and a ansible inventory file, check out the sample below:
```yml
playbook-hosts:
hosts:
host1:
ansible_host: <ip-address>
```
This playbook also assumes that you have a user with sudo privileges on the remote machine, and you can use ssh keys to authenticate.
## The playbook
The playbook to install and then run certbot is below:
```yml
---
- name: Install and Run Certbot
hosts: playbook-hosts
remote_user: root # or whatever user you have
become: yes # sudo
tasks:
vars:
ssl_email: <email-address>
domain_list: <domain-name> # comma separated list of domains (e.g. example.com,www.example.com)
tasks:
- name: Install certbot
apt:
pkg:
- certbot
- python3-certbot-nginx
state: latest
update_cache: true
- name: Run certbot to get SSL certificate
shell: certbot --nginx --non-interactive --agree-tos --email {{ ssl_email }} --domains {{ domain_list }}
- name: Restart nginx
service:
name: nginx
state: restarted
enabled: yes
```
## Running the playbook
To run the playbook, you can use the following command:
```bash
ansible-playbook -i <inventory-file> <playbook-name> --private-key <ssh-key>
```

5
docs/ansible/index.md Normal file
View File

@ -0,0 +1,5 @@
# Ansible Snippets and Musings
#### [Installing Docker](./installing-docker.md)
#### [Install and Configure Certbot for SSL](./certbot-ssl.md)
#### [Waiting for Servers to be Provisioned and Ready](./server-wait.md)

View File

@ -0,0 +1,83 @@
# Installing Docker on Servers With Ansible
## Overview
This is a simple playbook to install Docker on a server. This is really useful for getting docker installed on servers before running a dockerised workload.
## Pre-requisites
You will need ansible already installed on your machine and a ansible inventory file, check out the sample below:
```yml
playbook-hosts:
hosts:
host1:
ansible_host: <ip-address>
```
This playbook also assumes that you have a user with sudo privileges on the remote machine, and you can use ssh keys to authenticate.
## The playbook
The playbook to install docker is below:
```yml
---
- name: Install Docker
hosts: playbook-hosts
remote_user: root # or whatever user you have
become: yes # sudo
tasks:
- name: Install aptitude
apt:
name: aptitude
state: latest
update_cache: true
- name: Install required system packages
apt:
pkg:
- apt-transport-https
- ca-certificates
- curl
- software-properties-common
- python3-pip
- virtualenv
- python3-setuptools
state: latest
update_cache: true
- name: Add Docker GPG apt Key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Add Docker Repository
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu jammy stable
state: present
- name: Update apt and install docker-ce
apt:
name: docker-ce
state: latest
update_cache: true
- name: Install Docker Module for Python
pip:
name: docker
```
::: tip
Apitude is used to install the packages as it is more reliable than apt-get, and also preferred by Ansible.
:::
## Running the playbook
To run the playbook, you can use the following command:
```bash
ansible-playbook -i <inventory-file> <playbook-name> --private-key <ssh-key>
```

View File

@ -0,0 +1,42 @@
# Waiting for Servers to be Provisioned and Ready
## Overview
Often when working with automated deployments you will want to wait for your servers to be provisioned and ready before running your Ansible playbook. This is a simple bash script you can run before your Ansible playbook to wait for your servers to be ready.
## The Script
The script below assumes a few things, namely:
* You have a private key file called `private.key` in the root directory of your project
* You have a terraform directory in the root of your project
* You have two IP address outputs in your terraform called `server-1-ip-address` and `server-2-ip-address`
```bash [wait-for-servers.sh]
# Simple script to wait till all the servers are up and running (e.g. not setting up)
echo 'Trying SSH to new instances, checking cloud-init status... (It will say "Connection refused" until it is ready.)'
# 6 retries x 5 seconds each = maximum approx 30 seconds to wait for SSH, then bail.
check_instances() {
echo now checking...
ssh root@"$server_1_ip" -o StrictHostKeyChecking=no -i private.key cloud-init status -w
ssh root@"$server_2_ip" -o StrictHostKeyChecking=no -i private.key cloud-init status -w
}
echo getting hostnames...
# Navigate to terraform directory
cd terraform
server_1_ip="$(terraform output -raw server-1-ip-address)"
server_2_ip="$(terraform output -raw server-2-ip-address)"
# Navigate back to root directory
cd ..
timeout=6
stopwatch=0
until check_instances; do
stopwatch=$((stopwatch+1))
if [[ $stopwatch -ge $timeout ]]; then echo Error: Timed out waiting for instance; exit -1; fi
sleep 5
done
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

3
docs/css/index.md Normal file
View File

@ -0,0 +1,3 @@
# CSS Snippets and Musings
#### [Text Width Horizontal Rule](./text-width-hr.md)

34
docs/css/text-width-hr.md Normal file
View File

@ -0,0 +1,34 @@
# Text Width Horizontal Rule
Often times, you want to create a horizontal rule that spans the width of the text. This is a simple way to do that.
```css
.hr {
display: inline-block;
}
.hr::after {
content: '';
display: block;
border-top: 2px solid black;
margin-top: .1rem;
}
```
In the above snippet we create a horizontal rule that spans the width of the text. We do this by creating a block element with `display: inline-block` and then adding a pseudo element with `display: block` and a border. The `margin-top` is used to create some space between the text and the horizontal rule.
This is how it looks:
```html
<p>
This is some normal text
</p>
<h2 class="hr">This is a heading</h2>
<p>
This is some more normal text
</p>
```
Which will render as:
<img src="./images/text-width-hr.png" alt="Text Width Horizontal Rule Example" style="max-width: 100%;">

View File

@ -0,0 +1,314 @@
# Blazor App with an inbuilt API
Using the new Blazor interactive auto mode we can combine the benifits of Blazor WASM with Blazor Server.
In order to create a unified experience the recommended way is to create an API that is exposed from the Server application.
The Server application will have a service class that will encapsulate all the actual logic, leaving the API controller to just call the service class.
The Blazor WASM application will then call the API controller to get the data or perform actions.
## Create the Blazor Application
The best way to create a new Blazor application is to use Visual Studio 2022.
1. Open Visual Studio 2022
2. Click on `Create a new project`
3. Search for `Blazor Web App` and select the `Blazor Web App` template
4. Ensure that the `Interactive Server Mode` is set to `Auto (Blazor Server and WebAssembly)`
5. Ensure that `Interactivity Mode` is set to `Per page/component`
6. Untick `Include sample pages`
## Create a model class in the client
Get started by creating a model class in the client application. This model class will be used to represent the data that is returned from the API.
```csharp [BlazorApp1.Client/Models/Movie.cs]
namespace BlazorApp1.Client.Models
{
public class Movie
{
public int Id { get; set; }
public string Title { get; set; } = string.Empty;
public string Genre { get; set; } = string.Empty;
public int Year { get; set; }
}
}
```
## Create a service interface in the client
Next we will create a service interface in the client application. This service interface will define the methods that will be used to interact with the API. For this guide only a GET method is defined, but more methods can be added as needed.
```csharp [BlazorApp1.Client/Services/IMovieService.cs]
using BlazorApp1.Client.Models;
namespace BlazorApp1.Client.Services
{
public interface IMovieService
{
public List<Movie> GetMovies();
}
}
```
## Create a client service
Now that we have our service contract we can create our client service. The service will look like the below:
```csharp [BlazorApp1.Client/Services/ClientMovieService.cs]
using BlazorApp1.Client.Models;
using System.Net.Http.Json;
namespace BlazorApp1.Client.Services
{
public class ClientMovieService(HttpClient httpClient) : IMovieService
{
private readonly HttpClient _httpClient = httpClient;
public async Task<List<Movie>> GetMoviesAsync()
{
return await _httpClient.GetFromJsonAsync<List<Movie>>("api/movie") ?? [];
}
}
}
```
The client service calls the API endpoint `movies` and deserializes the response into a list of `Movie` objects. We will also need to register this HTTP service and the Movie service in the `Program.cs` file.
```csharp [BlazorApp1/Client/Program.cs]
builder.Services.AddScoped<IMovieService, ClientMovieService>();
builder.Services.AddScoped(sp => new HttpClient { BaseAddress = new Uri(builder.Configuration["FrontendUrl"] ?? "https://localhost:5002") });
```
At this point also update the appsettings.json for both the client and server application to include the `FrontendUrl` key.
::: code-group
```json [BlazorApp1.Client/wwwroot/appsettings.json]
{
"FrontendUrl": "https://localhost:5002"
}
```
```json [BlazorApp1/appsettings.json]
{
"FrontendUrl": "https://localhost:5002"
}
```
:::
::: tip
For the client application the appsettings.json will need to be placed in the wwwroot folder, which can be created if it does not exist.
:::
## Create the CSR (Client Side Rendered) movie page
Now that we have our service we can create a page that will call the service and display the data.
```razor [BlazorApp1.Client/Pages/MoviesCSR.razor]
@page "/movies-csr";
/* NOTE: InteractiveAuto is used to specify the interactivity mode
for the page. This is set to InteractiveAuto so that the page
can be rendered on the client side.
Technically this is not doing anything at this point, as there is no
need for client interactivity, but if we add a button or some other
interactive element this will be useful.
*/
@rendermode InteractiveAuto
@using BlazorApp1.Client.Models
@using BlazorApp1.Client.Services
@inject IMovieService MovieService
<h3>MoviesCSR</h3>
@if (moviesList.Count == 0)
{
<h5>No movies found</h5>
} else
{
<table class="table">
<thead>
<tr>
<th>Id</th>
<th>Title</th>
<th>Genre</th>
<th>Year</th>
</tr>
</thead>
<tbody>
@foreach (Movie movie in moviesList)
{
<tr>
<td>@movie.Id</td>
<td>@movie.Title</td>
<td>@movie.Genre</td>
<td>@movie.Year</td>
</tr>
}
</tbody>
</table>
}
@code {
private List<Movie> moviesList = [];
protected override async Task OnInitializedAsync()
{
await GetMovies();
}
private async Task GetMovies()
{
moviesList = await MovieService.GetMoviesAsync();
}
}
```
Note the following:
* The `@page` directive specifies the route for the page.
* The `@inject` directive is used to inject the `IMovieService` into the page.
* The `@code` block contains the code for the page. In the block we have a `moviesList` variable that will hold the list of movies.
* The `OnInitializedAsync` method is called when the page is initialized. In this method we call the `GetMovies` method.
* The `GetMovies` method calls the `GetMoviesAsync` method on the `MovieService` and assigns the result to the `moviesList` variable.
## Create the server service
Now that the frontend part is done we can move on to the server part. Get started by creating a ServerMovieService class in the server application:
```csharp [BlazorApp1/Services/ServerMovieService.cs]
using BlazorApp1.Client.Models;
using BlazorApp1.Client.Services;
namespace BlazorApp1.Services
{
public class ServerMovieService : IMovieService
{
public Task<List<Movie>> GetMoviesAsync()
{
return Task.FromResult(new List<Movie>
{
new() { Title = "The Shawshank Redemption", Year = 1994 },
new() { Title = "The Godfather", Year = 1972 },
new() { Title = "The Dark Knight", Year = 2008 },
new() { Title = "Pulp Fiction", Year = 1994 },
new() { Title = "The Lord of the Rings: The Return of the King", Year = 2003 },
new() { Title = "Schindler's List", Year = 1993 }
});
}
}
}
```
## Create the API Controller
Now that our service is ready we can create the API controller. The controller will call the service and return the data.
```csharp [BlazorApp1/Controllers/MovieController.cs]
using BlazorApp1.Client.Services;
using Microsoft.AspNetCore.Mvc;
namespace BlazorApp1.Controllers
{
[ApiController]
[Route("api/[controller]")]
public class MovieController(IMovieService movieService) : ControllerBase
{
private readonly IMovieService _movieService = movieService;
public async Task<IActionResult> GetMoviesAsync()
{
var movies = await _movieService.GetMoviesAsync();
return Ok(movies);
}
}
}
```
This will require us to do two things in the `Program.cs` file. First we need to register the movie service in the file:
```csharp [BlazorApp1/Program.cs]
builder.Services.AddScoped<IMovieService, ServerMovieService>();
```
And second we also need to register the API controller in the file:
```csharp [BlazorApp1/Program.cs]
// Before builder.Build();
builder.Services.AddControllers();
// Before app.Run();
app.MapControllers();
```
## Add the server side movie page
Now that the API is ready we can create a server side page that will call the service directly and display the data.
```razor [BlazorApp1/Pages/MoviesSSR.razor]
@page "/movies-ssr";
@using BlazorApp1.Client.Models
@using BlazorApp1.Client.Services
@inject IMovieService MovieService
<h3>MoviesSSR</h3>
@if (moviesList.Count == 0)
{
<h5>No movies found</h5>
} else
{
<table class="table">
<thead>
<tr>
<th>Id</th>
<th>Title</th>
<th>Genre</th>
<th>Year</th>
</tr>
</thead>
<tbody>
@foreach (Movie movie in moviesList)
{
<tr>
<td>@movie.Id</td>
<td>@movie.Title</td>
<td>@movie.Genre</td>
<td>@movie.Year</td>
</tr>
}
</tbody>
</table>
}
@code {
private List<Movie> moviesList = [];
protected override async Task OnInitializedAsync()
{
await GetMovies();
}
private async Task GetMovies()
{
moviesList = await MovieService.GetMoviesAsync();
}
}
```
As you can see the code is almost identical to the CSR page. The only difference is that the data is fetched from the server side service.
## Run the application
At this point you can run the application and navigate to the `/movies-csr` and `/movies-ssr` pages to see the data being displayed.
### Conclusion
In this guide we have seen how to create a Blazor application that uses an inbuilt API. We have created a client service that calls the API and a server service that returns the data. We have also created a client side rendered page and a server side rendered page that display the data. This is a good way to create a unified experience for the user.
While simple in this example this can be extended so that the Blazor app stores data in a database and in this way we can build a full stack application.

View File

@ -0,0 +1,488 @@
# Testing Controller
Testing controllers can be an important part of your application testing strategy. Controllers are the entry point for your application and are responsible for handling requests and responses. In this guide we will cover how to test controllers in a Web API application.
Testing controllers is a little more tricky but the best method is to actually run a test version of your app, and interact with it using HTTP requests. This style of testing is known as integration testing and is a great way to test the entire application stack, even though we will often use a in-memory database to avoid the need for a real database.
.NET provides the `WebApplicationFactory` class to help with this. This class allows you to create a test server that can be used to send HTTP requests to your application.
# Example Application
In this example we will create a simple Web API to maintain a user list. To get started create a new Web API project titled `UserApp` and enabled controller support.
## User Model and DB Context
Create a new `Data` folder and insert the following files:
::: code-group
```csharp [User.cs]
namespace UserApp.Data
{
public class User
{
public int Id { get; set; }
public string Name { get; set; } = string.Empty;
public string Email { get; set; } = string.Empty;
public string FirstName { get; set; } = string.Empty;
public string LastName { get; set; } = string.Empty;
}
}
```
:::
::: code-group
```csharp [UserAppContext.cs]
using Microsoft.EntityFrameworkCore;
namespace UserApp.Data
{
public class UserAppContext : DbContext
{
public UserAppContext(DbContextOptions<UserAppContext> options) : base(options) { }
public DbSet<User> Users { get; set; }
}
}
```
:::
::: code-group
```csharp [UserViewModel.cs]
namespace UserApp.Models
{
public class UserViewModel(string email, string firstName, string lastName)
{
public string Email { get; set; } = email;
public string FirstName { get; set; } = firstName;
public string LastName { get; set; } = lastName;
}
}
```
:::
As part of this ensure the following NuGet packages are installed:
* Microsoft.EntityFrameworkCore
## Service Interface and Implementation
First create a entity not found exception:
::: code-group
```csharp [EntityNotFoundException.cs]
namespace UserApp.Infrastructure.Exceptions
{
public class EntityNotFoundException(string message) : Exception(message) { }
}
```
:::
Then we can create the service interface and implementation:
::: code-group
```csharp [IUserService.cs]
using UserApp.Data;
namespace UserApp.Services
{
public interface IUserService
{
public Task<IEnumerable<User>> GetUsersAsync();
public Task<User> GetUserAsync(int id);
public Task<User> AddUserAsync(string email, string firstName, string lastName);
public Task<User> UpdateUserAsync(int id, string email, string firstName, string lastName);
public Task<User> DeleteUserAsync(int id);
}
}
```
```csharp [UserService.cs]
using Microsoft.EntityFrameworkCore;
using UserApp.Data;
using UserApp.Infrastructure.Exceptions;
namespace UserApp.Services
{
public class UserService(UserAppContext context) : IUserService
{
private readonly UserAppContext _context = context;
public async Task<User> AddUserAsync(string email, string firstName, string lastName)
{
User user = new()
{
Email = email,
FirstName = firstName,
LastName = lastName
};
await _context.Users.AddAsync(user);
await _context.SaveChangesAsync();
return user;
}
public async Task<User> DeleteUserAsync(int id)
{
User? user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
_context.Users.Remove(user);
await _context.SaveChangesAsync();
return user;
}
public async Task<User> GetUserAsync(int id)
{
User user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
return user;
}
public async Task<IEnumerable<User>> GetUsersAsync()
{
return await _context.Users.ToListAsync();
}
public async Task<User> UpdateUserAsync(int id, string email, string firstName, string lastName)
{
User user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
user.Email = email;
user.FirstName = firstName;
user.LastName = lastName;
await _context.SaveChangesAsync();
return user;
}
}
}
```
:::
## Setup Dependency Injection
We now need to add our DI config to the `Program.cs` file. Add the following to the service section, you will also need to install the following NuGet packages:
* Microsoft.EntityFrameworkCore.Sqlite
* Microsoft.EntityFrameworkCore.Design
* Microsoft.EntityFrameworkCore.Tools
::: code-group
```csharp [Program.cs]
builder.Services.AddDbContext<UserAppContext>(options =>
{
options.UseSqlite("UserApp");
});
builder.Services.AddScoped<IUserService, UserService>();
```
:::
Once this is done we can generate the database migrations that we need:
::: code-group
```bash [dotnet console]
dotnet ef migrations add InitialCreate
```
```bash [package manager console]
Add-Migration InitialCreate
```
:::
## Controller
Finally we can create a controller to interact with the service:
::: code-group
```csharp [UsersController.cs]
using Microsoft.AspNetCore.Mvc;
using UserApp.Data;
using UserApp.Infrastructure.Exceptions;
using UserApp.Models;
using UserApp.Services;
namespace UserApp.Controllers
{
[Route("api/[controller]")]
public class UserController(IUserService userService, ILogger<UserController> logger) : ControllerBase
{
private readonly ILogger<UserController> _logger = logger;
private readonly IUserService _userService = userService;
[HttpGet]
public async Task<IActionResult> GetUsersAsync()
{
try
{
return Ok(await _userService.GetUsersAsync());
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while getting the users");
return StatusCode(500);
}
}
[HttpGet("{id}")]
public async Task<IActionResult> GetUserAsync(int id)
{
try
{
return Ok(await _userService.GetUserAsync(id));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while getting the user");
return StatusCode(500);
}
}
[HttpPost]
public async Task<IActionResult> AddUserAsync([FromBody] UserViewModel user)
{
try
{
User createdUser = await _userService.AddUserAsync(user.Email, user.FirstName, user.LastName);
return CreatedAtAction("GetUser", new { id = createdUser.Id }, createdUser);
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while adding the user");
return StatusCode(500);
}
}
[HttpPut("{id}")]
public async Task<IActionResult> UpdateUserAsync(int id, [FromBody] UserViewModel user)
{
try
{
return Ok(await _userService.UpdateUserAsync(id, user.Email, user.FirstName, user.LastName));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while updating the user");
return StatusCode(500);
}
}
[HttpDelete("{id}")]
public async Task<IActionResult> DeleteUserAsync(int id)
{
try
{
return Ok(await _userService.DeleteUserAsync(id));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while deleting the user");
return StatusCode(500);
}
}
}
}
```
:::
# Controller Testing
To test the controller we will create a new MSTest Test project called `UserApp.Tests`. Add the following NuGet packages:
* Microsoft.EntityFrameworkCore.Sqlite
* Microsoft.AspNetCore.Mvc.Testing
* Microsoft.EntityFrameworkCore.Sqlite
## Test Setup
For this example we are going to be using a SQLite database for testing the application. This is not needed for this paticular example, as the code is already using an in-memory database, but it is a good example showing how to override the database connection for testing.
In an actual system you would want to use a real database for testing, such as one spun up in a docker container in the CI.
To setup the test project add the following to the `Program.cs` file:
::: code-group
```csharp [Program.cs]
// ...
public partial class Program { }
```
:::
Now we want to create a new factory to generate the test server:
::: code-group
```csharp [Factories/UserAppApplicationFactory.cs]
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Mvc.Testing;
using Microsoft.EntityFrameworkCore;
using Microsoft.Extensions.DependencyInjection;
using UserApp.Data;
namespace UserApp.Tests.Factories
{
public class UserAppApplicationFactory<TProgram>(object[] existingEntites = null!) : WebApplicationFactory<TProgram> where TProgram : class
{
private readonly object[] _existingEntites = existingEntites ?? [];
protected override void ConfigureWebHost(IWebHostBuilder builder)
{
builder.ConfigureServices(services =>
{
var descriptor = services.SingleOrDefault(d => d.ServiceType == typeof(DbContextOptions<UserAppContext>));
if (descriptor != null)
{
services.Remove(descriptor);
}
services.AddDbContext<UserAppContext>((container, options) =>
{
options.UseNpgsql("Server=localhost;Port=5432;Database=postgres;User Id=postgres;Password=postgres;");
});
// After the DbContext is registered, we need to create the database
using var scope = services.BuildServiceProvider().CreateScope();
var context = scope.ServiceProvider.GetRequiredService<UserAppContext>();
context.Database.ExecuteSqlRaw(DB_CLEAR);
context.Database.Migrate();
// Add any existing entities to the context
foreach (var entity in _existingEntites)
{
context.Add(entity);
}
if (_existingEntites.Length != 0)
{
context.SaveChanges();
}
});
}
public string DB_CLEAR = """
DROP TABLE IF EXISTS "Users";
DROP TABLE IF EXISTS "__EFMigrationsHistory";
""";
}
}
```
:::
::: danger
NOTE: The above code will delete any existing data in the database, so be careful when using this in a real application. Always make sure the connection string is pointing to a test database.
:::
This setup requires that a Postgres database is running, the best way to do this is to use a docker container (which is what will occur in the CI as well). A compose is below:
```yaml
services:
postgres:
image: postgres:16
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
ports:
- "5432:5432"
```
# Writing Controller Tests
Now we can write some tests for the controller. Create a new file called `UsersControllerTests.cs` and add the following:
::: code-group
```csharp [UsersControllerTests.cs]
using Newtonsoft.Json;
using UserApp.Data;
using UserApp.Tests.Factories;
namespace UserApp.Tests
{
[TestClass]
public class UserControllerTests
{
private HttpClient _client = null!;
private UserAppApplicationFactory<Program> _factory = null!;
[TestCleanup]
public void Cleanup()
{
_client.Dispose();
_factory.Dispose();
}
[TestMethod]
public async Task GetUsers_ForNoUsers_ReturnsSuccessAndNoUsers()
{
// Arrange
_factory = new UserAppApplicationFactory<Program>();
_client = _factory.CreateClient();
var request = new HttpRequestMessage(HttpMethod.Get, "/api/user");
// Act
var response = await _client.SendAsync(request);
// Assert
response.EnsureSuccessStatusCode();
var content = await response.Content.ReadAsStringAsync();
Assert.AreEqual("[]", content);
}
[TestMethod]
public async Task GetUsers_ForUsers_ReturnsSuccessAndUsers()
{
// Arrange
User user1 = new()
{
Id = 1,
FirstName = "John",
LastName = "Doe",
Email = "john.doe@test.com"
};
User user2 = new()
{
Id = 2,
FirstName = "Jane",
LastName = "Doe",
Email = "jane.doe@test.com"
};
_factory = new UserAppApplicationFactory<Program>([user1, user2]);
_client = _factory.CreateClient();
// Act
var response = await _client.GetAsync("/api/user");
// Assert
response.EnsureSuccessStatusCode();
var content = await response.Content.ReadAsStringAsync();
var users = JsonConvert.DeserializeObject<User[]>(content);
Assert.AreEqual(2, users!.Length);
Assert.AreEqual("john.doe@test.com", users[0].Email);
Assert.AreEqual("jane.doe@test.com", users[1].Email);
}
}
}
```
:::

View File

@ -0,0 +1,58 @@
# Database Seeding in .NET
Often in development and testing, you need to seed your database with some data. This can be done manually, but it's a tedious process. In this article, we'll see how to seed a database in .NET.
The best way to seed the database in .NET is to first check that the application is running in development mode.
Most of the time such a check will already exist in your `Program.cs`:
```csharp [Program.cs]
// ...
if (builder.Environment.IsDevelopment()) {
// ...
}
// ...
```
To seed your database first add a SeedData file (personally I usually place this in a helpers folder):
```csharp [Helpers/SeedData.cs]
using Microsoft.EntityFrameworkCore;
using MySampleApp.Models;
namespace MySampleApp.Helpers;
public class SeedData
{
public static async Task InitializeAsync(IServiceProvider serviceProvider)
{
using var context = new AppContext(serviceProvider.GetRequiredService<DbContextOptions<AppContext>>());
context.Add(new Movie { Name = "Batman Begins", Genre = "Action" });
context.Add(new Movie { Name = "The Dark Knight", Genre = "Action" });
context.Add(new Movie { Name = "The Dark Knight Rises", Genre = "Action" });
await context.SaveChangesAsync();
}
}
```
In the `SeedData` class, we have a static method `InitializeAsync` that takes an `IServiceProvider` as a parameter. This method initializes the database with some sample data.
Next, we need to call this method in the `Program.cs` file:
```csharp [Program.cs]
// ...
if (builder.Environment.IsDevelopment()) {
// Seed the database
await using var scope = app.Services.CreateAsyncScope();
await SeedData.InitializeAsync(scope.ServiceProvider);
// ...
}
// ...
```

View File

@ -0,0 +1,35 @@
# Dockerising a Blazor Web App
Dockerising a blazor web app is a simple process. The first step is to create a Dockerfile in the root of the project. The Dockerfile should contain the following:
```Dockerfile
FROM mcr.microsoft.com/dotnet/aspnet:8.0 AS base
USER app
WORKDIR /app
EXPOSE 8080
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
ARG BUILD_CONFIGURATION=Release
WORKDIR /src
COPY ["BlazorApp1/BlazorApp1.csproj", "BlazorApp1/"]
COPY ["BlazorApp1.Client/BlazorApp1.Client.csproj", "BlazorApp1.Client/"]
RUN dotnet restore "./BlazorApp1/BlazorApp1.csproj"
COPY . .
WORKDIR "/src/BlazorApp1"
RUN dotnet build "./BlazorApp1.csproj" -c $BUILD_CONFIGURATION -o /app/build
FROM build AS publish
ARG BUILD_CONFIGURATION=Release
RUN dotnet publish "./BlazorApp1.csproj" -c $BUILD_CONFIGURATION -o /app/publish /p:UseAppHost=false
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "BlazorApp1.dll"]
```
::: tip
The dockerfile has the HTTPS port disabled, so you can run the app on HTTP. If you want to enable HTTPS add `EXPOSE 8081`, however a reverse proxy like Nginx is recommended for production.
:::

View File

@ -0,0 +1,230 @@
# Google Sign-In Without Identity
This guide will show you how to implement Google Sign-In without using the Identity framework. While Identity is a great framework, it can be overkill for some applications. Often we just want to use a third-party provider to authenticate a user and use default cookie or JWT authentication for the rest of the application.
## Prerequisites
* A Google account
* .NET 8
* Visual Studio 2022
Note: This article assumes you are using a Windows development environment. If you are using a Mac or Linux, you will need to use the appropriate tools for your environment.
## Implementation
### Create a new .NET Core MVC Application
Open Visual Studio and create a new .NET Core MVC application. You can use the default template. I have called my project `GoogleAuthentication`.
::: warning
Ensure that when you create the project the 'None' option is selected for authentication. (Otherwise, you will need to remove the Identity framework later.)
:::
### Create a new Account Controller
Create a new blank MVC controller called `AccountController.cs`. This controller will be used to handle the Google Sign-In process.
Inject the configuration service into the controller so that we can access the Google Client ID from the `appsettings.json` file later.
After creating the controller and injecting the service the code should look like this:
```csharp
using Microsoft.AspNetCore.Mvc;
namespace GoogleAuthentication.Controllers
{
public class AccountController(IConfiguration configuration) : Controller
{
private readonly IConfiguration _configuration = configuration;
public IActionResult Index()
{
return View();
}
}
}
```
### Create a Login View
We will now add a `Login` method to the `AccountController`. This method will be used to redirect the user to the Google Sign-In page.
Start by creating a `LoginViewModel` in the `Models` folder. This view model will be used to pass the Google Client ID to the view.
```csharp
public class LoginViewModel
{
public string GoogleClientId { get; set; } = null!;
}
```
Then the following code to the `AccountController` class:
```csharp
public IActionResult Login()
{
LoginViewModel model = new()
{
GoogleClientId = _configuration["GoogleClientId"]!
};
return View(model);
}
```
This code will create a new `LoginViewModel` and pass the Google Client ID from the configuration to the view. At this state we can also remove the default `Index` method from the controller as we will not be using it.
Add a new view called `Login.cshtml` to the `Views/Account` folder. This view will be used to display the Google Sign-In button.
```razor
@model LoginViewModel
@{
ViewData["Title"] = "Login";
}
<div class="row">
<div class="column text-center">
<h3>Please sign in with your Google Account</h3>
<hr />
<div id="buttonDiv" class="has-text-centered"></div>
</div>
</div>
<form id="login-form" asp-asp-controller="Account" asp-action="Callback" method="post">
<input type="hidden" name="jwt" />
</form>
@section Scripts {
<script src="https://accounts.google.com/gsi/client" async defer></script>
<script>
function handleCredentialResponse(response) {
if (response.credential) {
let form = document.getElementById('login-form');
let jwt = document.getElementsByName('jwt')[0];
jwt.value = response.credential;
form.submit();
}
}
window.onload = function () {
google.accounts.id.initialize({
client_id: "@Model.GoogleClientId",
callback: handleCredentialResponse
});
google.accounts.id.renderButton(
document.getElementById("buttonDiv"),
{ theme: "outline", size: "large" } // customization attributes
);
google.accounts.id.prompt(); // also display the One Tap dialog
}
</script>
}
```
The above code will render the Google Sign-In button and handle the response from Google. When the user signs into google, the `handleCredentialResponse` function will be called and the authentication response from Google will be submitted as the data. This function will then submit the form with the JWT token (called credential in the response) as a hidden field.
### Create a Callback Method
Start by creating a model to represent the submitted form data.
```csharp
public class LoginRequestViewModel
{
public string Jwt { get; set; } = string.Empty;
}
```
We will now create a `Callback` method in the `AccountController`. This method will be used to handle the response from Google and then perform the authentication step that our appliation requires.
Add the following code to the `AccountController` class:
```csharp{19-20}
[HttpPost]
public async Task<IActionResult> Callback(LoginRequestViewModel login)
{
// If the jwt token is empty then user is not authorized
if (string.IsNullOrEmpty(login.Jwt))
{
throw new Exception("The jwt token is empty.");
}
// Otherwise we can verify the token with google and get the user's email address
Payload payload = await GoogleJsonWebSignature.ValidateAsync(login.Jwt);
// If the payload is not null and is valid then get the user by email address
if (payload != null)
{
string userEmail = payload.Email!;
// Perform necessary logic to sign in the user here
// e.g. create a cookie, or a JWT token, etc.
// Return the user to the home page
return RedirectToAction("Index", "Home");
}
else
{
// If the payload is null then the user is not authorized
throw new Exception("The payload is null.");
}
}
```
The above code will retrive the JWT token and then validate it with Google, using the GoogleJsonWebSignature class. This class is provided as part of the nuget package `Google.Apis.Auth`. If the token is valid, we can then perform any necessary logic to sign the user in. In this example we will just redirect the user to the home page, however in your own applications should could create a sign-in cookie, create a JWT or start a session.
In this above example if the sign in fails, an exception will be thrown. This will cause the user to be redirected to the error page. In a production application you would want to handle this error more gracefully.
### Add the Google Client ID to the Configuration
We will now add the Google Client ID to the configuration. This will allow us to access the value from the `appsettings.json` file.
Add the following code to the `appsettings.json` file:
```json
{
"GoogleClientId": "YOUR_GOOGLE_CLIENT_ID"
}
```
To generate a Google Client ID, follow the steps below:
1. Go to the [Google Cloud Console](https://console.cloud.google.com/)
2. Create a new project
3. Go to the [Credentials Page](https://console.cloud.google.com/apis/credentials)
4. Click the `Create Credentials` button and select `OAuth client ID`
5. Select `Web application` as the application type
6. Enter a name for the application
7. Add the following URL to the `Authorized JavaScript origins` section: `https://localhost:5001` (replace with your own URL)
8. Add the following URL to the `Authorized redirect URIs` section: `https://localhost:5001/account/callback` (replace with your own URL)
9. Click the `Create` button
10. Copy the Client ID and paste it into the `appsettings.json` file
You will also need to setup the consent screen for your application. To do this, follow the steps below:
1. Go to the [OAuth consent screen](https://console.cloud.google.com/apis/credentials/consent)
2. Select `External` as the user type
3. Click the `Create` button
4. Enter a name for the application
5. Add the following URL to the `Authorized domains` section: `localhost`
6. Click the `Save and continue` button
7. Click the `Add or remove scopes` button
8. Select the `email` and `profile` scopes
9. Click the `Update` button
10. Click the `Save and continue` button
### Test the Authentication
We will now test the authentication using the Google Sign-In button.
Start the application and then manually update the url to go to the `/Account/Login` route. This will redirect you to the Google Sign-In page. Either click the Google Sign-In button or use the one tap feature to sign in with your Google account. You should then be redirected to the home page.
## Conclusion
We have now learnt how to implement Google Sign-In without using the Identity framework. This is a great way to add authentication to your application without the overhead of the Identity framework.
This method works well when combined with cookie or JWT authentication. You can use the Google Sign-In to authenticate the user and then use the cookie or JWT to authenticate the user for future requests.

13
docs/dotnet/index.md Normal file
View File

@ -0,0 +1,13 @@
# .NET Snippets and Musings
#### [Blazor with an inbuilt API](./blazor-with-api.md)
#### [Database Seeding](./database-seed.md)
#### [Dockerising a Blazor Web App](./dockerising-blazor.md)
#### [OWIN Logging](./owin-logging.md)
#### [System.NET Logging](./system-net-logging.md)
#### [Unit of Work Template](./unit-of-work-template.md)
#### [JWT Authentication](./jwt-authentication.md)
#### [JWT Authentication with Cookie](./jwt-authentication-cookie.md)
#### [Google Sign in Without Identity](./google-sign-in-without-identity.md)
#### [Service Testing](./service-testing.md)
#### [Controller Testing](./controller-testing.md)

View File

@ -0,0 +1,125 @@
# JWT Authentication Stored in a Cookies
## Overview
Best practice for storing JWT tokens is to store them in a cookie. This is because cookies are automatically sent with every request to the server. This means that we do not need to manually add the token to the request header for every request. It also means that (assuming the cookie is set to HttpOnly) the token cannot be accessed by JavaScript. This is important as it prevents malicious JavaScript from accessing the token and sending it to a third party.
## Prerequisites
* Previous article completed: [JWT Authentication](./jwt-authentication.md)
Note: This article assumes you are using a Windows development environment. If you are using a Mac or Linux, you will need to use the appropriate tools for your environment.
## Implementation
### Open the JWT Authentication Project
We will start by opening the project we created in the previous article. This JWT Authentication project will be modified to store the JWT token in a cookie.
### Remove Swagger Bearer Authentication
We will start by removing the Swagger Bearer authentication. This is because we will be using cookies for authentication instead of a Bearer token and thus we no longer need to manually paste our token into the Swagger UI.
Simple open the `Program.cs` file and replace `builder.Services.AddSwaggerGen` section with the below:
```csharp
builder.Services.AddSwaggerGen();
```
This will remove the Bearer authentication from Swagger UI (i.e. the default setup).
### Update JWT Authentication to check cookies for JWT
We now need to update our JWT authentication to check for the JWT token in the cookies. We will do this by modifying the `AddJwtBearer` call in the `Program.cs` file. See changes below:
```csharp{15-26}
builder.Services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme)
.AddJwtBearer(o =>
{
o.TokenValidationParameters = new TokenValidationParameters
{
ValidIssuer = builder.Configuration["Jwt:Issuer"],
ValidAudience = builder.Configuration["Jwt:Audience"],
IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(builder.Configuration["Jwt:Key"]!)),
ValidateIssuer = true,
ValidateAudience = true,
ValidateLifetime = true,
ValidateIssuerSigningKey = true
};
o.Events = new JwtBearerEvents
{
OnMessageReceived = context =>
{
if (context.Request.Cookies.TryGetValue(WeatherForecastController.ACCESS_TOKEN_NAME, out var accessToken))
{
context.Token = accessToken;
}
return Task.CompletedTask;
}
};
});
```
This will configure JWT authentication to check for the JWT token in the `WeatherForecastController.ACCESS_TOKEN_NAME` cookie. If the cookie is found, it will be used as the JWT token, otherwise no token will be used, and the request will be rejected as per normal.
### Add Cookie Authentication to the Controller
Open the `WeatherForecastController.cs` file and add the following code to the top of the class:
```csharp
public const string ACCESS_TOKEN_NAME = "X-Access-Token";
```
This will create a constant that we can use to reference the name of the cookie that will store our JWT token.
In the same file, replace the `Ok(token)` line with the following:
```csharp
Response.Cookies.Append(ACCESS_TOKEN_NAME, token, new CookieOptions
{
HttpOnly = true,
Secure = true,
SameSite = SameSiteMode.Strict,
Expires = DateTime.UtcNow.AddMinutes(expireMinutes)
});
return Ok();
```
Here we are adding a HTTP only cookie to the response. This cookie will be used to store our JWT token. We are also setting the cookie to expire after 20 minutes (as per our configuration). This way when the JWT token expires, the cookie will also expire and the user will need to login again.
### Logout Method
We will now add a logout method to the controller. This method will be used to remove the JWT token cookie from the response. This will effectively log the user out of the application.
Add the following method to the `WeatherForecastController` class:
```csharp
[AllowAnonymous]
[HttpGet("logout")]
public IActionResult Logout()
{
Response.Cookies.Delete(ACCESS_TOKEN_NAME);
return Ok();
}
```
### Test the Authentication
We will now test the authentication using Swagger UI.
Start the application and then make the following requests:
* GET /weatherforecast/auth - This should return a 401 Unauthorized response.
* GET /weatherforecast - This should return a 200 OK response.
* POST /weatherforecast/login - Try this with both valid and invalid credentials, you should get a 200 OK response with when using valid credentials.
* GET /weatherforecast/auth - This should return a 200 OK response.
* GET /weatherforecast/logout - This should return a 200 OK response.
* GET /weatherforecast/auth - This should return a 401 Unauthorized response.
## Conclusion
We have now learnt how to update our JWT authentication to store the JWT token in a cookie. This is a more secure way of using JWTs with single page applications and also simplifies the process of authenticating requests as we no longer need to manually add the JWT token to the request header.

View File

@ -0,0 +1,187 @@
# JWT Authentication
## Overview
JWT authentication is a common authentication mechanism for web applications. It generally works well when using with APIs and SPAs. This article will cover how to implement JWT authentication in a .NET Core web application.
## Prerequisites
* .NET Core 8
* Visual Studio 2022
Note: This article assumes you are using a Windows development environment. If you are using a Mac or Linux, you will need to use the appropriate tools for your environment.
## Implementation
### Create a new .NET Core Web API Application
Open Visual Studio and create a new .NET Core Web API application. You can use the default template.
### Create an Authenticated Route
We will get started by creating a new authenticated route that will return the default weather forecast data. This will be the only route that requires authentication. We will be able to use this route to ensure that our authentication is working properly.
Simply add the following code to the `WeatherForecastController` class. You will need to add the `using using Microsoft.AspNetCore.Authorization;` namespace.
```csharp
[Authorize]
[HttpGet("auth")]
public IEnumerable<WeatherForecast> GetAuthenicated()
{
return Enumerable.Range(1, 5).Select(index => new WeatherForecast
{
Date = DateOnly.FromDateTime(DateTime.Now.AddDays(index)),
TemperatureC = Random.Shared.Next(-20, 55),
Summary = Summaries[Random.Shared.Next(Summaries.Length)]
})
.ToArray();
}
```
### Add JWT Authentication
We will now add JWT authentication to our application. We will be using the `Microsoft.AspNetCore.Authentication.JwtBearer` package to handle the authentication. This package will need to be installed via NuGet.
Open the Program.cs file and add the following code before the `builder.Build()` call.
```csharp
builder.Services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme)
.AddJwtBearer(o =>
{
o.TokenValidationParameters = new TokenValidationParameters
{
ValidIssuer = builder.Configuration["Jwt:Issuer"],
ValidAudience = builder.Configuration["Jwt:Audience"],
IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(builder.Configuration["Jwt:Key"]!)),
ValidateIssuer = true,
ValidateAudience = true,
ValidateLifetime = true,
ValidateIssuerSigningKey = true
};
});
```
In the above we are adding a default JWT authentication scheme and configuring it to use the values pulled from our configuration sources. In this case we will add them to the appsettings.json, however in a production application you would want to use a more secure method of storing these values.
::: tip
You can use the [Secret Manager](https://docs.microsoft.com/en-us/aspnet/core/security/app-secrets?view=aspnetcore-6.0&tabs=windows) to store your secrets locally.
:::
```json
{
"Jwt": {
"Key": "ThisIsMySuperSecretKeyForTheDevEnvironment",
"Issuer": "https://localhost:5001",
"Audience": "https://localhost:5001",
"ExpireMinutes": 20
}
}
```
### Create a Login Method
We will now create a login method that will return a JWT token. This token will be used to authenticate the user for future requests.
Add the following code to the `WeatherForecastController` class.
First we will need to inject an IConfiguration instance into the controller. This will allow us to access the configuration values we added earlier.
```csharp
private readonly IConfiguration _configuration;
public WeatherForecastController(ILogger<WeatherForecastController> logger, IConfiguration configuration)
{
_logger = logger;
_configuration = configuration;
}
```
We can then fill out the login method as below:
::: warning
This is a very basic implementation of a login method. In a production application you would want to use a more secure method of storing and retrieving user credentials.
:::
```csharp
[AllowAnonymous]
[HttpPost("login")]
public IActionResult Login([FromBody] string username, [FromBody] string password)
{
if (username == "username" && password == "password")
{
var issuer = _configuration["Jwt:Issuer"];
var audience = _configuration["Jwt:Audience"];
var expireMinutes = Convert.ToInt32(_configuration["Jwt:ExpireMinutes"]);
var key = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(_configuration["Jwt:Key"]!));
var tokenDescriptor = new SecurityTokenDescriptor
{
Subject = new ClaimsIdentity(new Claim[]
{
new(ClaimTypes.Name, username)
}),
Expires = DateTime.UtcNow.AddMinutes(expireMinutes),
Issuer = issuer,
Audience = audience,
SigningCredentials = new SigningCredentials(key, SecurityAlgorithms.HmacSha256Signature)
};
var tokenHandler = new JwtSecurityTokenHandler();
var createdToken = tokenHandler.CreateToken(tokenDescriptor);
var token = tokenHandler.WriteToken(createdToken);
return Ok(token);
}
return Unauthorized();
}
```
At this stage we can do some testing to ensure out methods are working as expected. Start the API project and then use the default Swagger UI to test the following:
* GET /weatherforecast/auth - This should return a 401 Unauthorized response.
* GET /weatherforecast - This should return a 200 OK response.
* POST /weatherforecast/login - Try this with both valid and invalid credentials, you should get a 200 OK response with a token when using valid credentials.
### Setting up Swagger UI to allow authentication
Finally for us to test if our authentication is working we will need to configure Swagger UI to allow us to pass the token in the request header. Simple reaplace the `builder.Services.AddSwaggerGen()` call in the Program.cs file with the following:
```csharp
builder.Services.AddSwaggerGen(options =>
{
options.AddSecurityDefinition("Bearer", new OpenApiSecurityScheme
{
Description = "JWT Authorization header using the Bearer scheme.",
Type = SecuritySchemeType.Http,
Scheme = "bearer"
});
options.AddSecurityRequirement(new OpenApiSecurityRequirement
{
{
new OpenApiSecurityScheme
{
Reference = new OpenApiReference
{
Id = "Bearer",
Type = ReferenceType.SecurityScheme
}
},
Array.Empty<string>()
}
});
});
```
To test this out, start the API project and then use the login method to get a token. You can then click the Authorize button in the Swagger UI and enter the token.
You should now be able to access the authenticated route. If you try to access the route without the token you should get a 401 Unauthorized response.
## Conclusion
In this article we have covered how to implement JWT authentication in a .NET Core web application. We have also covered how to test the authentication using Swagger UI.

View File

@ -0,0 +1,29 @@
# Enable OWIN Logging in .NET Framework
The following code snippet shows how to enable Microsoft.OWIN logging in ASP.NET. OWIN logging is mostly useful for debugging issues relating to user sign in / flow, especially when using external identity providers such as Google, Facebook, etc.
To enable OWIN logging, add the following to your `web.config` file inside the `<configuration>` element:
```xml
<system.diagnostics>
<switches>
<add name="Microsoft.Owin" value="Verbose" />
</switches>
<trace autoflush="true" />
<sources>
<source name="Microsoft.Owin">
<listeners>
<add name="console" />
</listeners>
</source>
<source name="Microsoft.Owin">
<listeners>
<add name="file"
type="System.Diagnostics.TextWriterTraceListener"
initializeData="traces-Owin.log"
/>
</listeners>
</source>
</sources>
</system.diagnostics>
```

View File

@ -0,0 +1,489 @@
# Testing Services
Testing services is a important part of .NET devolopment as it helps to ensure that the services are working as expected and also helps to catch bugs early in the development process. It's most likely for a service test to be a unit test, and it's important to test the service in isolation. This means that the service should be tested without any dependencies on external services or databases.
For this reason it's important to use dependency injection to inject mock services into the service being tested. This allows the service to be tested in isolation and ensures that the test is repeatable and reliable. This will also require that interfaces are used for the services so that the mock services can be injected into the service being tested.
# Example Application
In this example we will create a simple Web API to maintain a user list. To get started create a new Web API project titled `UserApp` and enabled controller support.
## User Model and DB Context
Create a new `Data` folder and insert the following files:
::: code-group
```csharp [User.cs]
namespace UserApp.Data
{
public class User
{
public int Id { get; set; }
public string Name { get; set; } = string.Empty;
public string Email { get; set; } = string.Empty;
public string FirstName { get; set; } = string.Empty;
public string LastName { get; set; } = string.Empty;
}
}
```
:::
::: code-group
```csharp [UserAppContext.cs]
using Microsoft.EntityFrameworkCore;
namespace UserApp.Data
{
public class UserAppContext : DbContext
{
public UserAppContext(DbContextOptions<UserAppContext> options) : base(options) { }
public DbSet<User> Users { get; set; }
}
}
```
:::
::: code-group
```csharp [UserViewModel.cs]
namespace UserApp.Models
{
public class UserViewModel(string email, string firstName, string lastName)
{
public string Email { get; set; } = email;
public string FirstName { get; set; } = firstName;
public string LastName { get; set; } = lastName;
}
}
```
:::
As part of this ensure the following NuGet packages are installed:
* Microsoft.EntityFrameworkCore
## Service Interface and Implementation
First create a entity not found exception:
::: code-group
```csharp [EntityNotFoundException.cs]
namespace UserApp.Infrastructure.Exceptions
{
public class EntityNotFoundException(string message) : Exception(message) { }
}
```
:::
Then we can create the service interface and implementation:
::: code-group
```csharp [IUserService.cs]
using UserApp.Data;
namespace UserApp.Services
{
public interface IUserService
{
public Task<IEnumerable<User>> GetUsersAsync();
public Task<User> GetUserAsync(int id);
public Task<User> AddUserAsync(string email, string firstName, string lastName);
public Task<User> UpdateUserAsync(int id, string email, string firstName, string lastName);
public Task<User> DeleteUserAsync(int id);
}
}
```
```csharp [UserService.cs]
using Microsoft.EntityFrameworkCore;
using UserApp.Data;
using UserApp.Infrastructure.Exceptions;
namespace UserApp.Services
{
public class UserService(UserAppContext context) : IUserService
{
private readonly UserAppContext _context = context;
public async Task<User> AddUserAsync(string email, string firstName, string lastName)
{
User user = new()
{
Email = email,
FirstName = firstName,
LastName = lastName
};
await _context.Users.AddAsync(user);
await _context.SaveChangesAsync();
return user;
}
public async Task<User> DeleteUserAsync(int id)
{
User? user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
_context.Users.Remove(user);
await _context.SaveChangesAsync();
return user;
}
public async Task<User> GetUserAsync(int id)
{
User user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
return user;
}
public async Task<IEnumerable<User>> GetUsersAsync()
{
return await _context.Users.ToListAsync();
}
public async Task<User> UpdateUserAsync(int id, string email, string firstName, string lastName)
{
User user = await _context.Users.FindAsync(id) ?? throw new EntityNotFoundException("User not found");
user.Email = email;
user.FirstName = firstName;
user.LastName = lastName;
await _context.SaveChangesAsync();
return user;
}
}
}
```
:::
## Setup Dependency Injection
We now need to add our DI config to the `Program.cs` file. Add the following to the service section, you will also need to install the following NuGet packages:
* Microsoft.EntityFrameworkCore.InMemory
::: code-group
```csharp [Program.cs]
builder.Services.AddDbContext<UserAppContext>(options =>
{
options.UseInMemoryDatabase("UserApp");
});
builder.Services.AddScoped<IUserService, UserService>();
```
:::
## Controller
Finally we can create a controller to interact with the service:
::: code-group
```csharp [UsersController.cs]
using Microsoft.AspNetCore.Mvc;
using UserApp.Data;
using UserApp.Infrastructure.Exceptions;
using UserApp.Models;
using UserApp.Services;
namespace UserApp.Controllers
{
[Route("api/[controller]")]
public class UserController(IUserService userService, ILogger<UserController> logger) : ControllerBase
{
private readonly ILogger<UserController> _logger = logger;
private readonly IUserService _userService = userService;
[HttpGet]
public async Task<IActionResult> GetUsersAsync()
{
try
{
return Ok(await _userService.GetUsersAsync());
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while getting the users");
return StatusCode(500);
}
}
[HttpGet("{id}")]
public async Task<IActionResult> GetUserAsync(int id)
{
try
{
return Ok(await _userService.GetUserAsync(id));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while getting the user");
return StatusCode(500);
}
}
[HttpPost]
public async Task<IActionResult> AddUserAsync([FromBody] UserViewModel user)
{
try
{
User createdUser = await _userService.AddUserAsync(user.Email, user.FirstName, user.LastName);
return CreatedAtAction("GetUser", new { id = createdUser.Id }, createdUser);
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while adding the user");
return StatusCode(500);
}
}
[HttpPut("{id}")]
public async Task<IActionResult> UpdateUserAsync(int id, [FromBody] UserViewModel user)
{
try
{
return Ok(await _userService.UpdateUserAsync(id, user.Email, user.FirstName, user.LastName));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while updating the user");
return StatusCode(500);
}
}
[HttpDelete("{id}")]
public async Task<IActionResult> DeleteUserAsync(int id)
{
try
{
return Ok(await _userService.DeleteUserAsync(id));
}
catch (EntityNotFoundException exception)
{
_logger.LogError(exception, "User not found");
return NotFound();
}
catch (Exception exception)
{
_logger.LogError(exception, "An error occurred while deleting the user");
return StatusCode(500);
}
}
}
}
```
:::
# Service Testing
Now that we have our service and controller setup we can start testing the service. We will be using MSTest and Moq to test the service.
Create a new MSTest project titled `UserApp.Service.Test` and then add a new file titled `UserServiceTests.cs`. We will first get started with two tests, to ensure our `GetUsersAsync` method is working as expected.
::: code-group
```csharp [UserServiceTests.cs]
using Microsoft.EntityFrameworkCore;
using UserApp.Data;
using UserApp.Services;
namespace UserApp.Service.Test
{
[TestClass]
public class UserServiceTests
{
private UserAppContext _context = null!;
[TestInitialize]
public void Initialize()
{
DbContextOptions<UserAppContext> options = new DbContextOptionsBuilder<UserAppContext>()
.UseInMemoryDatabase(Guid.NewGuid().ToString())
.Options;
_context = new UserAppContext(options);
}
[TestCleanup]
public void Cleanup()
{
_context.Dispose();
_context = null!;
}
[TestMethod]
public async Task GetUsersAsync_NoUsers_ReturnsNoUsers()
{
// Arrange
UserService service = new(_context);
// Act
IEnumerable<User> users = await service.GetUsersAsync();
// Assert
Assert.AreEqual(0, users.Count());
}
[TestMethod]
public async Task GetUsersAsync_OneUser_ReturnsOneUser()
{
// Arrange
_context.Users.Add(new User() { Email = "john@test.com", FirstName = "John", LastName = "Smith" });
_context.SaveChanges();
UserService service = new(_context);
// Act
IEnumerable<User> users = await service.GetUsersAsync();
// Assert
Assert.AreEqual(1, users.Count());
Assert.AreEqual("john@test.com", users.First().Email);
}
}
}
```
Run these two tests and confirm that they pass. This will confirm that the `GetUsersAsync` method is working as expected. You can now continue to write tests for the other methods in the `UserService` class.
:::
```csharp [UserServiceTests]
// ...
[TestMethod]
public async Task GetUserAsync_ForExistingUser_ReturnsUser()
{
// Arrange
User user1 = new() { Email = "john@test.com", FirstName = "John", LastName = "Smith" };
User user2 = new() { Email = "jane@test.com", FirstName = "Jane", LastName = "Doe" };
await _context.Users.AddRangeAsync(user1, user2);
await _context.SaveChangesAsync();
UserService service = new(_context);
// Act
User user = await service.GetUserAsync(1);
// Assert
Assert.AreEqual("john@test.com", user.Email);
}
[TestMethod]
public async Task GetUserAsync_ForNonExistingUserAndNoUsers_ThrowsEntityNotFoundException()
{
// Arrange
UserService service = new(_context);
// Act and Assert
await Assert.ThrowsExceptionAsync<EntityNotFoundException>(() => service.GetUserAsync(1));
}
[TestMethod]
public async Task GetUserAsync_ForNonExistingUserAndUsers_ThrowsEntityNotFoundException()
{
// Arrange
User user1 = new() { Email = "john@test.com", FirstName = "John", LastName = "Smith" };
User user2 = new() { Email = "jane@test.com", FirstName = "Jane", LastName = "Doe" };
await _context.Users.AddRangeAsync(user1, user2);
await _context.SaveChangesAsync();
UserService service = new(_context);
// Act and Assert
await Assert.ThrowsExceptionAsync<EntityNotFoundException>(() => service.GetUserAsync(3));
}
[TestMethod]
public async Task AddUserAsync_AddsUser()
{
// Arrange
UserService service = new(_context);
// Act
int oldCount = _context.Users.Count();
User user = await service.AddUserAsync("john@test.com", "John", "Smith");
int newCount = _context.Users.Count();
// Assert
Assert.AreEqual(0, oldCount);
Assert.AreEqual(1, newCount);
Assert.AreEqual("john@test.com", _context.Users.First().Email);
}
[TestMethod]
public async Task UpdateUserAsync_CanUpdateExistingUser_UserUpdated()
{
// Arrange
User user1 = new() { Email = "john@test.com", FirstName = "John", LastName = "Smith" };
User user2 = new() { Email = "jane@test.com", FirstName = "Jane", LastName = "Doe" };
await _context.Users.AddRangeAsync(user1, user2);
await _context.SaveChangesAsync();
UserService service = new(_context);
// Act
await service.UpdateUserAsync(1, "johnnew@test.com", "JohnNew", "SmithNew");
// Assert
User updatedUser = (await _context.Users.FindAsync(1))!;
Assert.AreEqual("johnnew@test.com", updatedUser.Email);
Assert.AreEqual("JohnNew", updatedUser.FirstName);
Assert.AreEqual("SmithNew", updatedUser.LastName);
}
public async Task UpdateUserAsync_ForNonExistingUser_ThrowsEntityNotFoundException()
{
// Arrange
User user1 = new() { Email = "john@test.com", FirstName = "John", LastName = "Smith" };
User user2 = new() { Email = "jane@test.com", FirstName = "Jane", LastName = "Doe" };
await _context.Users.AddRangeAsync(user1, user2);
await _context.SaveChangesAsync();
UserService service = new(_context);
// Act and Assert
await Assert.ThrowsExceptionAsync<EntityNotFoundException>(() => service.UpdateUserAsync(3, "", "", ""));
}
[TestMethod]
public async Task DeleteUserAsync_ForExistingUser_RemovesUser()
{
// Arrange
User user1 = new() { Email = "john@test.com", FirstName = "John", LastName = "Smith" };
await _context.Users.AddAsync(user1);
await _context.SaveChangesAsync();
UserService service = new(_context);
// Act
int oldCount = _context.Users.Count();
User user = await service.DeleteUserAsync(1);
int newCount = _context.Users.Count();
// Assert
Assert.AreEqual(1, oldCount);
Assert.AreEqual(0, newCount);
Assert.AreEqual("john@test.com", user.Email);
}
[TestMethod]
public async Task DeleteUserAsync_ForNonExistingUser_ThrowsEntityNotFoundException()
{
// Arrange
UserService service = new(_context);
// Act and Assert
await Assert.ThrowsExceptionAsync<EntityNotFoundException>(() => service.DeleteUserAsync(1));
}
// ...
```
# Conclusion
This this example we have created a simple Web API, and tested the service using MSTest. This is a good starting point for testing services in .NET and can be expanded upon to test more complex services.

View File

@ -0,0 +1,21 @@
# Enable System.NET Logging in .NET Framework
The following code snippet shows how to enable System.NET logging in ASP.NET. System.NET logging is mostly useful for debugging issues relating to HTTP requests and responses. Often this is useful when proxies are involved, or when you need to see the raw HTTP request and response.
To enable System.NET logging, add the following to your `web.config` file inside the `<configuration>` element:
```xml
<system.diagnostics>
<trace autoflush="true" />
<sharedListeners>
<add name="file" initializeData="D:\\network.log" type="System.Diagnostics.TextWriterTraceListener" />
</sharedListeners>
<sources>
<source name="System.Net" switchValue="Verbose">
<listeners>
<add name="file" />
</listeners>
</source>
</sources>
</system.diagnostics>
```

View File

@ -0,0 +1,177 @@
# Unit of Work Pattern
The unit of work pattern is a way to manage the state of multiple objects in a single transaction. This pattern is useful when you need to update multiple objects in a single transaction, and you need to ensure that all of the objects are updated successfully or none of them are updated.
A common way of implementing the unit of work pattern is to use a repository class that manages the state of multiple objects. The repository class is responsible for managing the state of the objects and for committing the changes to the database. The repository class is also responsible for ensuring that all of the objects are updated successfully or none of them are updated.
A generic repository class can be used in most cases, and where not possible it can be extended into a more specific repository class. The following example shows a generic repository class that can be used to manage the state of multiple objects.
```csharp title="GenericRepository.cs"
using Microsoft.EntityFrameworkCore;
using System.Linq.Expressions;
public class GenericRepository < TEntity > where TEntity: class
{
internal CMSContext _context;
internal DbSet < TEntity > _dbSet;
public GenericRepository(CMSContext context)
{
_context = context;
_dbSet = context.Set < TEntity > ();
}
public virtual async Task < IEnumerable < TEntity >> GetAsync(
Expression < Func < TEntity, bool >> ? filter = null,
Func < IQueryable < TEntity > , IOrderedQueryable < TEntity >> ? orderBy = null,
string includeProperties = "")
{
IQueryable < TEntity > query = _dbSet;
if (filter != null)
{
query = query.Where(filter);
}
foreach(var includeProperty in includeProperties.Split(new char[]
{
','
}, StringSplitOptions.RemoveEmptyEntries)) {
query = query.Include(includeProperty);
}
if (orderBy != null)
{
return await orderBy(query).ToListAsync();
} else
{
return await query.ToListAsync();
}
}
public virtual async Task < TEntity ? > GetByIdAsync(object id)
{
return await _dbSet.FindAsync(id);
}
public virtual async Task InsertAsync(TEntity entity)
{
await _dbSet.AddAsync(entity);
}
public virtual async Task < bool > DeleteAsync(object id)
{
TEntity ? entityToDelete = await _dbSet.FindAsync(id);
if (entityToDelete != null)
{
Delete(entityToDelete);
return true;
}
return false;
}
public virtual void DeleteRange(ICollection < TEntity > entitiesToDelete)
{
foreach(TEntity entity in entitiesToDelete)
{
Delete(entity);
}
}
public virtual void Delete(TEntity entityToDelete) {
if (_context.Entry(entityToDelete).State == EntityState.Detached)
{
_dbSet.Attach(entityToDelete);
}
_dbSet.Remove(entityToDelete);
}
public virtual void Update(TEntity entityToUpdate)
{
_dbSet.Attach(entityToUpdate);
_context.Entry(entityToUpdate).State = EntityState.Modified;
}
public virtual async Task < bool > AnyAsync(
Expression < Func < TEntity, bool >> ? filter = null
)
{
IQueryable < TEntity > query = _dbSet;
if (filter != null)
{
query = query.Where(filter);
}
return await query.AnyAsync();
}
}
```
The following example shows a Unit of Work class that uses the generic repository class to manage the state of multiple objects.
```csharp title="UnitOfWork.cs"
public class UnitOfWork: IDisposable, IUnitOfWork
{
private bool disposedValue;
private readonly CMSContext _context;
private GenericRepository < User > ? _userRepository;
public UnitOfWork(CMSContext context)
{
_context = context;
}
public GenericRepository < User > UserRepository
{
get
{
_userRepository ??= new GenericRepository < User > (_context);
return _userRepository;
}
}
public async Task SaveChangesAsync()
{
await _context.SaveChangesAsync();
}
protected virtual void Dispose(bool disposing)
{
if (!disposedValue)
{
if (disposing)
{
_context.Dispose();
}
disposedValue = true;
}
}
public void Dispose()
{
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}
```
::: tip
Add extra repositories for each model you have in your source code. For example, if you have a `Item` model, add a `ItemRepository` property to the `UnitOfWork` class.
:::
The following example shows the interface for the Unit of Work class, this should be used when setting up dependency injection.
```csharp title="IUnitOfWork.cs"
public interface IUnitOfWork : IDisposable
{
GenericRepository<User> UserRepository { get; }
Task SaveChangesAsync();
}
```

3
docs/ef-core/index.md Normal file
View File

@ -0,0 +1,3 @@
# EF Core Snippets and Musings
#### [Stored Procedure in Migration](./stp-migration.md)

View File

@ -0,0 +1,86 @@
---
sidebar_position: 1
---
# Adding Stored Procedures to a Migration
Sometimes even when working with an ORM like Entity Framework Core, you need to use a stored procedure. This is especially true when you are working with legacy databases or you need to have performant and consise SQL.
## Add Migration
The first step is to add a migration to your DB Context:
::: code-group
```bash [dotnet CLI]
dotnet ef migrations add AddUserStoredProcedure
```
```bash [Package Manager Console]
Add-Migration AddUserStoredProcedure
```
:::
This will add a migration file similar to the following:
```csharp
public partial class AddUserStoredProcedure : Migration
{
protected override void Up(MigrationBuilder migrationBuilder)
{
}
protected override void Down(MigrationBuilder migrationBuilder)
{
}
}
```
## Add Stored Procedure
The stored precedure can then be added using the `Sql` method on the `MigrationBuilder` object. The following example shows how to add a stored procedure that returns a user by their ID:
```csharp
public partial class AddUserStoredProcedure : Migration
{
protected override void Up(MigrationBuilder migrationBuilder)
{
migrationBuilder.Sql(@"
CREATE PROCEDURE [dbo].[GetUserById]
@Id int
AS
BEGIN
SET NOCOUNT ON;
SELECT * FROM [dbo].[Users] WHERE [Id] = @Id
END
");
}
protected override void Down(MigrationBuilder migrationBuilder)
{
migrationBuilder.Sql(@"
DROP PROCEDURE [dbo].[GetUserById]
");
}
}
```
## Update Database
Once the migration has been added, you can update the database using the following command:
::: code-group
```bash [dotnet CLI]
dotnet ef database update
```
```bash [Package Manager Console]
Update-Database
```
:::

3
docs/git/index.md Normal file
View File

@ -0,0 +1,3 @@
# Git Snippets and Musings
#### [SSH Config](./ssh-config.md)

14
docs/git/ssh-config.md Normal file
View File

@ -0,0 +1,14 @@
# Git SSH Config
When using SSH to connect to a remote repository, you can configure your SSH client to use a specific key for a specific host. This is useful when you have multiple keys and want to use a specific key for a specific host.
Sample configuration is below
```bash
# ~/.ssh/config
Host github.com
HostName github.com
User git
IdentityFile ~/.ssh/my-github-key
PreferredAuthentications publickey
```

View File

@ -0,0 +1,61 @@
# Build and Publish a Docker Image
This action builds and publishes a Docker image to a container registry. For most of my projects I use DockerHub, but you could use this action and tweak it to use any container registry.
For publish docker images I usually restrict this to the `main` branch, however as this step is often just part of the CI pipeline, you will need some conditional logic to ensure the image is only published on the `main` branch (and not when the build is triggered by a `feature/*` or `fix/*` branch).
## Minimal Pipeline Example
```yaml
name: Build, Test & Publish
on:
push:
branches:
- main
- feature/*
- fix/*
pull_request:
branches:
- main
jobs:
publish:
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
name: Build and Publish Container Image
runs-on: ubuntu-latest
needs:
- build
steps:
- uses: actions/checkout@v3
- name: Setup Docker Metadata
id: meta
uses: docker/metadata-action@v4
with:
images: your-username/your-project
tags: |
type=raw,value=latest
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and Push Docker Image to DockerHub
uses: docker/build-push-action@v4
with:
file: './path/to/project/Dockerfile'
context: ./path/to/project
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
```
In the above pipeline a few changes will need to be made to suit the project you are working on:
* In the setup docker metadata step, update the `images` to your DockerHub username and the name of your project.
* In the build and push docker image step, update the `file` to the path of your Dockerfile, and the `context` to the path of your project.
* You will also need to add your docker username and docker token to your GitHub repository secrets. The `DOCKER_USERNAME` is your DockerHub username, and the `DOCKER_TOKEN` is a token generated from DockerHub.

View File

@ -0,0 +1,44 @@
# Build and Test .NET Core Applicatons
This action builds and tests .NET Core applications using the `dotnet` CLI.
Building and testing .NET Core applications is a common task in CI/CD pipelines. I often like to ensure this step runs on all commits to the `main` branch, and any `feature/*` or `fix/*` branches.
## Minimal Pipeline Example
```yaml
name: Build and Test .NET Core
on:
push:
branches:
- main
- feature/*
- fix/*
jobs:
build:
name: Build and Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup .NET
uses: actions/setup-dotnet@v3
with:
dotnet-version: 8.0.x
- name: Restore dependencies
run: dotnet restore ./path/to/project
- name: Build
run: dotnet build ./path/to/project --no-restore
- name: Test
run: dotnet test ./path/to/project --no-restore --no-build --verbosity normal
```
In the above pipeline only two main changes are required:
* Update the `path/to/project` to the path of your .NET Core project.
* Update the `dotnet-version` to the version of .NET Core you are using.

View File

@ -0,0 +1,5 @@
# Github Actions Snippets and Musings
#### [Build and Test .NET](./build-test-dotnet.md)
#### [Build and Publish Docker Image](./build-publish-container.md)
#### [Run Entity Framework Core Migrations](./run-ef-core-migrations.md)

View File

@ -0,0 +1,39 @@
# Run Entity Framework Core Migrations
Running Entity Framework Core migrations in a GitHub Actions pipeline is a common task. This action demonstrates how to run EF Core migrations in a GitHub Actions pipeline.
## Minimal Pipeline Example
In this example the pipeline will run the EF Core migrations on the `main` branch when a push event occurs. The pipeline runs this as part of a `publish` step, in most cases this will include other steps as well, but these have been omitted for brevity.
```yaml
name: Build, Test & Publish
on:
push:
branches:
- main
- feature/*
- fix/*
pull_request:
branches:
- main
jobs:
deploy:
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
name: Deploy to Infrastructure
runs-on: ubuntu-latest
- name: Run Database Migrations
run: |
dotnet tool install --global dotnet-ef
dotnet tool restore
dotnet ef database update -p .path/to/project/Project.csproj -s ./path/to/project/Project.csproj --connection "$DATABASE_CONNECTION_STRING"
env:
DATABASE_CONNECTION_STRING: ${{ secrets.DATABASE_CONNECTION_STRING }}
```
For this pipeline you will need to ensure that the database connection string is stored as a secret in the GitHub repository.
The above example also assumes that your project and startup project are the same and in the same directory, however you may need to adjust these slightly depending on your project setup.

65
docs/index.md Normal file
View File

@ -0,0 +1,65 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
hero:
name: "Liam's code snippets"
text: Assorted snippets and musings
actions:
- theme: alt
text: .NET
link: /dotnet/
- theme: alt
text: Ansible
link: /ansible/
- theme: alt
text: CSS
link: /css/
- theme: alt
text: EF Core
link: /ef-core/
- theme: alt
text: Git
link: /git/
- theme: alt
text: Github Actions
link: /github-actions/
- theme: alt
text: Nginx
link: /nginx/
- theme: alt
text: Nuxt
link: /nuxt/
- theme: alt
text: PowerShell
link: /powershell/
- theme: alt
text: React
link: /react/
- theme: alt
text: React Native
link: /react-native/
- theme: alt
text: Terraform
link: /terraform/
# features:
# - title: Feature A
# details: Lorem ipsum dolor sit amet, consectetur adipiscing elit
# - title: Feature B
# details: Lorem ipsum dolor sit amet, consectetur adipiscing elit
# - title: Feature C
# details: Lorem ipsum dolor sit amet, consectetur adipiscing elit
---

View File

@ -0,0 +1,42 @@
# Adding a new site to Nginx in Ubuntu
This guide assumes you already have Nginx running on your server.
## Add an available site configuration
Create a new configuration file in the `/etc/nginx/sites-available` directory. The file will usually be titled after the domain or subdomain you are adding
```bash
sudo vim /etc/nginx/sites-available/example.com
```
For the file contents enter a valid nginx server block configuration. Here is an example:
```nginx
server {
listen 80;
server_name example.com www.example.com;
}
```
## Add the site configuration
Once the configuration file is setup we can enable it by creating a symbolic link to the `sites-enabled` directory.
```bash
sudo ln -s /etc/nginx/sites-available/example.com /etc/nginx/sites-enabled/
```
## Test and Update Nginx
Test the configuration file for syntax errors:
```bash
sudo nginx -t
```
If the test is successful, reload Nginx to apply the changes:
```bash
sudo systemctl reload nginx
```

View File

@ -0,0 +1,29 @@
# Easy Reverse Proxy Config for Nginx
## Introduction
This guide will show you how to set up a reverse proxy for your web server using Nginx. A reverse proxy is a really handy tool that allows use to redirect traffic incoming to our server to a different location, for example to another web server, or to a different port.
I find this most useful for redirecting requests to different dockerised services, but it can be used for many other things too.
## Config
The config for a reverse proxy is really simple. Here's an example of a basic reverse proxy config:
```nginx
server {
listen 80;
server_name my-site.com
location / {
proxy_pass http://localhost:5000;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}
```
This config will redirect all traffic coming to `my-site.com` to `http://localhost:5000`. This method will also easily allow you to add SSL to your services, as you can use the `certbot` tool to generate SSL certificates for your domain. Certbot will automatically configure this basic reverse proxy config to use SSL.

4
docs/nginx/index.md Normal file
View File

@ -0,0 +1,4 @@
# Nginx Snippets and Musings
#### [Easy Reverse Proxy Config](./easy-reverse-proxy-config.md)
#### [Adding a Site to Nginx](./adding-nginx-site.md)

95
docs/nuxt/custom-fetch.md Normal file
View File

@ -0,0 +1,95 @@
# Nuxt Custom Fetch
## Introduction
Often when working with Nuxt, you will need to fetch data from an API. Nuxt provides a handy wrapper for this called `useFetch`. Often you will want to have defaults for your fetch calls, such as a base URL, or headers. This is where a custom wrapper around `useFetch` can be useful.
## Setup
Start by creating a `composables` directory if you do not have one already. Inside this directory, create a file called `useMyFetch.ts`. This will be our custom wrapper around `useFetch`.
The code for the file is as below, however you may need to change the default settings to suit your needs.
```ts
import type { UseFetchOptions } from "nuxt/app";
import { defu } from 'defu'
export function useMyFetch<DataT, ErrorT>(url: string | (() => string), options: UseFetchOptions<DataT> = {}): ReturnType<typeof useFetch<DataT, ErrorT>>{
const config = useRuntimeConfig();
const headers = useRequestHeaders(['cookie']);
const nuxtApp = useNuxtApp();
const defaults: UseFetchOptions<DataT> = {
baseURL: config.public.apiBase,
credentials: 'include',
headers: headers,
onResponseError: async (error) => {
// If we get a 401, then we need to log the user out
if (error.response.status === 401) {
// Navigate to the logout page to handle the logout
try {
await nuxtApp.runWithContext(() => navigateTo('/account/logout'));
} catch (e) {
console.error(e);
}
}
},
};
// Merge the options with the defaults
const params = defu(options, defaults)
// @ts-ignore - Reponse should always be a DataT or ErrorT
return useFetch<DataT, ErrorT>(url, params);
}
```
## Explanation
The first thing we setup if our function definition. This is a generic function, in which we pass a `DataT` and `ErrorT` type. These are the types of the data we expect to receive from the API. For example, if we are fetching a list of users, then `DataT` would be `User[]` and `ErrorT` would be a type that represents the potential error object, in my case this is almost always a string. If you don't need to have types for the errors then you can just use a type for the data and not pass in the `ErrorT` type.
The return type of the function is the return type of `useFetch`. This is a tuple of the data and error types. We can use the `ReturnType` helper to get this type. This is required to ensure that your IDE can correctly infer the types of the data and error objects.
Next we outline the defaults for the request, in my cases often I want to have a base URL, and some headers. I also want to handle 401 errors, as this means the user is no longer authenticated and needs to be logged out. You can add any other defaults you want here.
Next we merge the options passed into the function with the defaults. This is done using the `defu` function. This function will merge the two objects, and if there are any conflicts, it will take the value from the second object. This is useful as it means we can override the defaults if we need to.
Finally we call `useFetch` with the merged options, and return the result. We currently have a ts-ignore here, as the TypeScript compiler complains about typing the resonse as we have done (i.e. null values are not handled correctly). This is not something we have to worry about in this case.
## Usage
To use the custom fetch, we simply import it into our component or composables, and use it as we would `useFetch`. For example:
```ts [./composables/useAccount.ts]
import type { User } from "~/types";
export const useAccount = () => {
return {
login(credential: string) {
return useMyFetch<User, string>('/api/Account/Login', {
method: 'POST',
body: { credential }
});
},
}
}
```
A good practice is to always have all your fetch calls in a separate file (i.e. as a composable), and then import them into your components. This makes it easier to test, and also makes it easier to change the implementation of the fetch calls if you need to.
In your components you would use them as follows:
```ts
<script setup lang="ts">
const { data, error, loading, fetch } = useAccount().login('my-credential');
</script>
<template>
...
</template>
```
## Conclusion
This is a simple way to create a custom wrapper around `useFetch` to provide defaults for your fetch calls. This is useful if you have a base URL, or headers that you want to use for all your fetch calls. It also allows you to handle errors in a consistent way, and to handle errors that are specific to your application.

3
docs/nuxt/index.md Normal file
View File

@ -0,0 +1,3 @@
# Nuxt Snippets and Musings
#### [Custom Fetch](./custom-fetch.md)

View File

@ -0,0 +1,43 @@
# PowerShell Script Template
This is a basic template for a PowerShell script, that can be used as a starting point for your own scripts.
## Prerequisites
* PowerShell 7.1 or higher
## Step 1: Create a new file
Create a new file with the extension `.ps1` and add the following content:
```powershell
<#
.SYNOPSIS
A short description of the script.
.DESCRIPTION
A longer description of the script.
This description can span multiple lines.
.PARAMETER SampleText
A sample parameter. This should describe what the parameter is for and any restrictions on it.
.EXAMPLE
-SampleText 'Hello World!'
.NOTES
Author: Liam Pietralla
Last Update: 2023-04-13
#>
param(
[parameter(Mandatory=$true)]
[string] $SampleText
)
filter timestamp {"[$(Get-Date -Format G)]: $_"}
Write-Output 'Script started.' | timestamp
# Your script goes here
Write-Output $SampleText | timestamp
Write-Output 'Script finished.' | timestamp
```

3
docs/powershell/index.md Normal file
View File

@ -0,0 +1,3 @@
# PowerShell Snippets and Musings
#### [Basic Template](./basic-template.md)

View File

@ -0,0 +1,48 @@
# Generic React Native Box
Often in React Native, you'll want to create a generic box component that can be styled and reused throughout your app. This box in questions allows us to sourrund other components with a box that has a border, padding, and margin that we can apply. In this case they can be applied directly or via a style object.
## Component
```tsx:line-numbers [Box.tsx]
import { ReactNode } from "react";
import { StyleProp, StyleSheet, View, ViewStyle } from "react-native";
type BoxProps = {
children?: ReactNode;
fullWidth?: boolean;
style?: StyleProp<ViewStyle>
} & ViewStyle;
const Box = ({ children, style, ...restProps }: BoxProps) => {
const boxStyle = getBoxStyle(restProps);
return <View style={[boxStyle.box, style]}>{children}</View>;
};
const getBoxStyle = (restProps: Omit<BoxProps, "children">) =>
StyleSheet.create({
box: {
width: restProps.fullWidth ? "100%" : "auto",
...restProps,
},
});
export default Box;
```
## Usage
```tsx
<Box
fullWidth
style={{
backgroundColor: "red",
padding: 10,
margin: 10,
borderRadius: 5,
}}
/>
<Box backgroundColor="red" borderColor="black" borderWidth={1}>
<Text>Some text</Text>
</Box>
```

View File

@ -0,0 +1,70 @@
# Generic React Native Stacks
The generic stack components are useful tools when creating layouts, as we can re-use these components to apply consistent spacing and alignment to our components. In this case, we'll create a `VStack` and an `HStack` component that allows us to stack components vertically or horizontally depnding on the component used.
## Components
```tsx:line-numbers
import { StyleSheet, View, ViewStyle } from "react-native";
type VStackProps = {
gap?: number;
children?: React.ReactNode;
} & ViewStyle;
const VStack = ({ gap, children, ...restProps }: VStackProps) => {
const style = vStackStyle(gap ?? 5, restProps);
return <View style={style.vStack}>{children}</View>;
};
const vStackStyle = (gap: number, restProps: ViewStyle) =>
StyleSheet.create({
vStack: {
flexDirection: "column",
gap: gap,
...restProps
},
});
export default VStack;
```
```tsx:line-numbers
import { StyleSheet, View, ViewStyle } from "react-native";
type HStackProps = {
gap?: number;
children: React.ReactNode;
} & ViewStyle;
const HStack = ({ gap, children, ...restProps }: HStackProps) => {
const style = hStackStyle(gap ?? 5, restProps);
return <View style={style.hStack}>{children}</View>;
};
const hStackStyle = (gap: number, restProps: ViewStyle) =>
StyleSheet.create({
hStack: {
flexDirection: "row",
gap: gap,
...restProps
},
});
export default HStack;
```
## Usage
```tsx
<VStack gap={10}>
<Text>First</Text>
<Text>Second</Text>
<Text>Third</Text>
</VStack>
<HStack gap={10}>
<Text>First</Text>
<Text>Second</Text>
<Text>Third</Text>
</HStack>
```

View File

@ -0,0 +1,98 @@
# Generic React Native Text
The generic text component is a useful tool so apply consistent styling to text components throughout your app. In this case, we'll create a `Text` component that allows us to apply consistent styling to text components.
## Component
::: code-group
```tsx:line-numbers [Text.tsx]
import { TextType } from './types';
import { ReactNode, forwardRef } from 'react';
import { Text as NativeText, TextStyle, StyleSheet, StyleProp } from 'react-native';
type TextProps = {
type?: TextType;
children: ReactNode;
style?: StyleProp<TextStyle>;
} & TextStyle;
const Text = forwardRef<NativeText, TextProps>(function Text(
{ type, children, style, ...restProps }: TextProps,
ref
) {
const textStyles = textStyle(type ?? 'standard', restProps);
return (
<NativeText ref={ref} style={[textStyles.text, style]}>
{children}
</NativeText>
);
});
const textStyle = (type: TextType, restProps: TextStyle) => {
let styles = {};
switch (type) {
case 'title':
styles = {
fontSize: 32,
fontWeight: 'bold',
color: 'black',
};
break;
case 'title2':
styles = {
fontSize: 25,
fontWeight: 'bold',
color: 'black',
};
break;
case 'subtitle':
styles = {
fontSize: 12,
fontWeight: 'normal',
color: 'darkgray',
};
break;
case 'link':
styles = {
fontSize: 14,
fontWeight: 'normal',
color: 'blue',
textDecorationLine: 'underline',
};
break;
case 'standard':
default:
styles = {
fontSize: 14,
fontWeight: 'normal',
color: 'black',
};
break;
}
return StyleSheet.create({
text: {
...styles,
...restProps,
},
});
};
export default Text;
```
```tsx:line-numbers [types.ts]
export type TextType = "standard" | "title" | "title2" | "subtitle" | "link";
```
:::
## Usage
```tsx
<Text type="title">Title</Text>
<Text type="subtitle">Subtitle</Text>
<Text type="link">Link</Text>
<Text>Standard</Text>
```

View File

@ -0,0 +1,5 @@
# React Native Snippets and Musings
#### [Generic Box](./generic-box.md)
#### [Generic Stacks](./generic-stacks.md)
#### [Generic Text](./generic-text.md)

View File

@ -0,0 +1,181 @@
# Combing Context and Custom Hooks
Combining context and custom hooks is a powerful way to manage state in your application. This pattern allows you to create a custom hook that can be used to access the context and state in a more readable and reusable way.
## React Setup
First we are going to assume that you have a react app created already. This could be done using a Vite SPA template, or by using a framework like Next.js or Gatsby.
## Create the Hook
We will get started by creating a new custom hook (usually I create a `hooks` directory and place them all there) to contain our logic. This hook will be used to access the context and state. Both JS and TS examples are provided below.
::: code-group
```jsx:line-numbers [useCounter.js]
import { createContext, useContext, useState } from 'react';
const CounterContext = createContext();
export const CounterProvider = ({ children }) => {
const [count, setCount] = useState(0);
const increaseCount = () => setCount(count + 1);
const decreaseCount = () => setCount(count - 1);
const context = {
count,
increaseCount,
decreaseCount
};
return (
<CounterContext.Provider value={context}>
{children}
</CounterContext.Provider>
);
}
export const useCounter = () => {
const context = useContext(CounterContext);
if (context === undefined) {
throw new Error('useCounter must be used within a CounterProvider');
}
return context;
};
```
```tsx:line-numbers [useCounter.tsx]
import { createContext, useContext, useState } from 'react';
type CounterContextType = {
count: number;
increaseCount: () => void;
decreaseCount: () => void;
};
const CounterContext = createContext<CounterContextType>({} as CounterContextType);
type CounterProviderProps = {
children: React.ReactNode;
};
export const CounterProvider = ({ children }: CounterProviderProps) => {
const [count, setCount] = useState(0);
const increaseCount = () => setCount(count + 1);
const decreaseCount = () => setCount(count - 1);
const context = {
count,
increaseCount,
decreaseCount
};
return (
<CounterContext.Provider value={context}>
{children}
</CounterContext.Provider>
);
}
export const useCounter = () => {
const context = useContext(CounterContext);
if (context === undefined) {
throw new Error('useCounter must be used within a CounterProvider');
}
return context;
};
```
:::
::: tip
If using TypeScript ensure that your hook file type is `.tsx` and that you have the correct types defined for your context and state.
:::
Lets go through this and explain it. First we create our Context using `const CounterContext = createContext();`. This will allow us to store our state and methods in a single place, to access anywhere in our app (assuming we have a `CounterProvider` wrapping our app).
Next we create our `CounterProvider` component. This will be used to wrap our app and provide the context to our custom hook. We use `useState` to create a `count` state and `setCount` method. We also create `increaseCount` and `decreaseCount` methods to update the `count` state. We will then create our context value, which is simply the output data we need (`count`), and the methods we can use to interact with and update it (`increaseCount` and `decreaseCount`).
Finally we create our `useCounter` custom hook. This hook will be used to access the context and state in a more readable and reusable way. We use `useContext` to access the context, and then check if the context is `undefined`, which will only be the case if the hook is being used outside of the provider.
## Using the Hook
Now that we have our custom hook, we can use it in our app to access the same context and state in different components. See below for some sample usage, in this example the buttons and couter have been separated into their own components.
::: code-group
```jsx:line-numbers [components/Buttons.js]
import { useCounter } from "../hooks/useCounter";
const Buttons = () => {
const { increaseCount, decreaseCount } = useCounter();
return (
<div>
<button onClick={increaseCount}>Increase Count +</button>
<button onClick={decreaseCount} >Decrease Count -</button>
</div>
);
};
export default Buttons;
```
```jsx:line-numbers [components/Count.js]
import { useCounter } from "../hooks/useCounter";
const Count = () => {
const { count } = useCounter();
return (
<div>
<h1>Count: {count}</h1>
</div>
);
}
export default Count;
```
```jsx:line-numbers [App.js]
import './App.css';
import { CounterProvider } from './hooks/useCounter';
import Buttons from './components/Buttons';
import Count from './components/Count';
function App() {
return (
<>
<CounterProvider>
<div className="App">
<header className="App-header">
<Buttons />
<Count />
</header>
</div>
</CounterProvider>
</>
);
}
export default App;
```
:::
When the above app is run and the buttons are clicked, the count will increase and decrease, and the `Count` component will update to reflect the new count.
## Conclusion
Combining context and custom hooks is a really powerful way to manage state in your application. Often using TypeScript can help to make this pattern even more powerful, as you can define the shape of the context and state, and ensure that the correct data is being passed around your app.
You can also do all your data fetching and API calls in the custom hook, and then pass the data to the context, which can then be accessed by any component in your app. This is a great way to manage your state and data in a more readable and reusable way.

4
docs/react/index.md Normal file
View File

@ -0,0 +1,4 @@
# React Snippets and Musings
#### [Context with Custom Hook](./context-with-custom-hook.md)
#### [Reading Config Values from a Docker Container in React](./reading-env-vars-docker.md)

View File

@ -0,0 +1,308 @@
# Reading Config Vars in Docker
Often you will need to read in config values in a react app, for example when connecting to a backend API, or when using config values to change various UI elements or control enabled features.
When using a full stack framework such as Next.JS or Gatsby, you can use the `process.env` object to read in environment variables as these applications are both server and client side rendered.
If we are using a client side only framework we will not have the luxury of using `process.env`. In this case we need to be able to load in our own configuration values into the app running in a Docker container. To this end we can create a `config.json` file, and serve this file in the Docker container mounted as a volume.
For this demo we'll be using a simple Vite react frontend, with an ExpressJS backend. We'll be using Docker to containerize our application.
## Creating a Backend
### Setup
First get started by creating a `backend` directory, and inside it a `package.json` with the following content:
::: code-group
```json [backend/package.json]
{
"name": "backend"
}
```
:::
### Installing Dependencies
```bash
cd backend
npm install typescript express cors
npm install -D @types/express @types/cors @types/node ts-node
```
### Creating the backend
In our case the backend will be a simple ExpressJS server that returns a list of users, so we can make this all one file:
::: code-group
```typescript [backend/src/index.ts]
import express from "express";
import cors from "cors";
const USERS = [
{ id: 1, name: 'John Doe', email: 'john.doe@gmail.com' },
{ id: 2, name: 'Jane Doe', email: 'jane.doe@gmail.com' },
{ id: 3, name: 'John Smith', email: 'john.smith@gmail.com' }
]
const app = express();
app.use(cors());
app.get('/', (_, res) => {
res.send('Hello World!');
});
app.get('/users', (_, res) => {
res.json(USERS);
});
app.listen(3000, () => {
console.log('Server is running on port 3000');
});
```
:::
### Setting Up Typescript
Create a `tsconfig.json` file in the `backend` directory with the following content:
::: code-group
```json [backend/tsconfig.json]
{
"compilerOptions": {
"target": "es2016",
"module": "commonjs",
"esModuleInterop": true,
"strict": true,
"skipLibCheck": true,
"rootDir": "./src",
"outDir": "./dist",
}
}
```
:::
### Add Scripts
Add the following scripts to the `package.json` file:
::: code-group
```json [backend/package.json]
{
"scripts": {
"dev": "ts-node src/index.ts",
"build": "tsc",
"start": "node dist/index.js"
},
}
```
:::
## Creating a Frontend
### Setup
Create a new Vite React TS app using the command `npm create vite@latest frontend -- --template react-ts`, ensuring you are in the parent directory of the `backend` directory.
### Ignore the Local Config File
Update the `.gitignore` file to ignore the `config.json` file we will be creating later:
::: code-group
``` [frontend/.gitignore]
# ...
public/config.json
```
:::
### Create Config File
Create a `config.js` file in the `public` directory with the following content:
::: code-group
```js [frontend/public/config.js]
const config = {
apiUrl: 'http://localhost:3000'
}
window.config = config;
```
:::
Next update the `index.html` file to include the `config.json` file:
::: code-group
```html [frontend/public/index.html]
<!-- ... -->
<script src="/config.js"></script>
</head>
<!-- ... -->
```
:::
### Create a Config Util
Create a `config.ts` file in the `src` directory with the following content:
::: code-group
```typescript [frontend/src/config.ts]
export interface Config {
apiUrl: string;
}
export const config = (window as any).config as Config;
```
:::
### Update App
Update the app to call the backend API:
::: code-group
```tsx [frontend/src/App.tsx]
import { useEffect, useState } from "react"
import { config } from "./config"
type User = {
id: number,
name: string
email: string
}
function App() {
const [users, setUsers] = useState([] as User[])
useEffect(() => {
const fetchUsers = async () => {
const response = await fetch(`${config.apiUrl}/users`)
const data = await response.json()
setUsers(data)
}
fetchUsers();
}, [])
return (
<>
<h1>Users</h1>
<br />
<ul>
{users.map((user) => (
<li key={user.id}>
{user.name} - ({user.email})
</li>
))}
</ul>
</>
)
}
export default App
```
:::
# Running the Application
At this stage you will be able to run the application locally by running the backend and frontend separately. The backend will expose the user list, and the frontend will display the list of users.
# Dockerise
Finally the last step is dockerising the application. Create the following Dockerfiles, docker-compose and config files in the root of the project:
::: code-group
``` [Dockerfile.backend]
FROM node:alpine
WORKDIR /app
COPY package.json .
COPY package-lock.json .
RUN npm install
COPY . .
RUN npm run build
CMD ["npm", "start"]
```
``` [Dockerfile.frontend]
FROM node:alpine as build
WORKDIR /app
COPY package.json .
COPY package-lock.json .
RUN npm install
COPY . .
RUN npm run build
FROM nginx:alpine
COPY --from=build /app/dist /usr/share/nginx/html
ENTRYPOINT ["nginx","-g","daemon off;"]
```
```yml [compose.yml]
services:
api:
build:
context: './backend'
dockerfile: '../Dockerfile.backend'
ports:
- "8081:3000"
web:
build:
context: './frontend'
dockerfile: '../Dockerfile.frontend'
ports:
- "8080:80"
volumes:
- './config-production.js:/usr/share/nginx/html/config.js'
```
```js [config-production.js]
const config = {
apiUrl: 'http://localhost:8081'
}
window.config = config;
```
:::
If you then run `docker-compose up` you should see the application running in a Docker container and when navigating to `http://localhost:8080`.
# Conclusion
In this guide we have seen how to read config values in a React app running in a Docker container. We have created a `config.js` file that we use to store our config values, and then use a volume to mount this file into the Docker container. This allows us to read the config values in our React app.
As a follow up this `config.js` file would be created per environment, or even created as part of the CI/CD pipeline, so that the correct values are used for each environment. Please note however as this is a client side app all values will be visible to the end user, so do not store any sensitive information in the `config.js` file.

View File

@ -0,0 +1,55 @@
# Generating a Ansible Inventory File from Terraform
## Overview
Often when working with automated deployments you will want to generate a dynamic inventory file for Ansible (usually after your infrastructure has been provisioned). This is a simple snippet to generate an inventory file from Terraform.
## Terraform Snippet
First we need to create an ouput variable to hold the inventory file.
::: code-group
```hcl [ouputs.tf]
output "ansible_inventory_yml" {
description = "An Ansible inventory (in YAML format) containing IP addresses as required by the Ansible playbook to configure the servers."
value = local.ansible_inventory_yml
}
```
:::
Next we will create the local variable that will hold the inventory file.
::: info
In this example we are using a DigitalOcean droplet, but you can use any Terraform resource that has an IP address.
:::
::: code-group
```hcl [locals.tf]
locals {
ansible_inventory_yml = <<-EOF
all:
hosts:
app:
ansible_host: ${digitalocean_droplet.server.ipv4_address}
EOF
}
```
:::
::: tip
It's better to use a local and output variable rather then a local_file as this avoids spurious plan noise when running on CI runners.
:::
Once all configured you can run `terraform apply` and the inventory file will be ouput to the console. To create the file you can pipe the output to a file (either in the console or in your CI pipeline).
```bash
terraform output -raw ansible_inventory_yml > ansible-inventory.yml
```

3
docs/terraform/index.md Normal file
View File

@ -0,0 +1,3 @@
# Terraform Snippets and Musings
#### [Generating a Ansible Inventory File from Terraform](./ansible-inventory-generation.md)

View File

@ -0,0 +1,7 @@
---
- name: Configure App Server
hosts: app
remote_user: root
become: yes # sudo
roles:
- app

View File

@ -0,0 +1,13 @@
server {
listen 80;
server_name code.liampietralla.com;
location / {
proxy_pass http://localhost:5000;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
}

View File

@ -0,0 +1,25 @@
- name: Run Code App Container
docker_container:
name: code
image: liamp1/code:latest
pull: yes
ports:
- "5000:80"
restart_policy: unless-stopped
- name: Copy nginx config file
copy:
src: nginx.conf
dest: /etc/nginx/sites-available/code
force: no
- name: Create symlink to code
file:
src: /etc/nginx/sites-available/code
dest: /etc/nginx/sites-enabled/code
state: link
- name: Restart Nginx
service:
name: nginx
state: restarted

1822
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

15
package.json Normal file
View File

@ -0,0 +1,15 @@
{
"name": "lmp-snippets-store",
"version": "1.0.0",
"author": "Liam Pietralla <liam.pietralla@gmail.com>",
"license": "MIT",
"devDependencies": {
"vitepress": "^1.3.1",
"vue": "^3.4.34"
},
"scripts": {
"dev": "vitepress dev",
"build": "vitepress build",
"preview": "vitepress preview"
}
}