Compare commits

...

No commits in common. 'main' and 'restructure' have entirely different histories.

  1. 3
      .Dockerignore
  2. 19
      .github/workflows/deploy.yml
  3. 36
      .gitignore
  4. 9
      Dockerfile
  5. 2
      LICENSE
  6. 14
      README.md
  7. 141
      components/BlogList.js
  8. 66
      components/Header.js
  9. 62
      components/Layout.js
  10. 278
      components/Meta.js
  11. 42
      components/Profile.js
  12. 11
      content/posts/2020-11-06-current-doings.md
  13. 75
      content/posts/2020-11-17-booleans-are-wasted-memory.md
  14. 8
      content/posts/2020-12-18-update-december.md
  15. 10
      content/posts/2021-01-07-delete-facebook.md
  16. 8
      content/posts/2021-01-11-100daystooffload.md
  17. 14
      content/posts/2021-01-11-are-humans-still-evolving.md
  18. 16
      content/posts/2021-01-13-512kb-club.md
  19. 53
      content/posts/2021-01-15-compiling-your-own-kernel.md
  20. 62
      content/posts/2021-01-18-reasons-the-fediverse-is-better.md
  21. 14
      content/posts/2021-01-23-signal-to-noise.md
  22. 72
      content/posts/2021-01-26-vim-macros.md
  23. 52
      content/posts/2021-01-29-sudo-to-doas.md
  24. 72
      content/posts/2021-02-02-bem-methodology.md
  25. 51
      content/posts/2021-02-07-storage-setup.md
  26. 14
      content/posts/2021-02-11-10-percent-100daystooffload.md
  27. 44
      content/posts/2021-02-17-notes-on-flutter-web.md
  28. 98
      content/posts/2021-02-20-changelogs.md
  29. 57
      content/posts/2021-02-24-vim-terminal-strategies.md
  30. 28
      content/posts/2021-03-13-git-builtin-lifesaver.md
  31. 247
      content/posts/2021-04-07-pgp-guide.md
  32. 8
      content/posts/_2021-01-14-diy-software.md
  33. 12
      content/posts/_2021-01-22-library-of-babel.md
  34. 37
      content/posts/_2021-01-25-kotlin-refactor-strategies.md
  35. 81
      content/posts/fighting-array-functions-with-es6.md
  36. 59
      content/posts/introducing-slashdev-space.md
  37. 83
      content/posts/lightweight-vpn-with-wireguard.md
  38. 63
      content/posts/patch-based-git-workflow.md
  39. 28
      content/posts/quick-tip-terminal-pastebin.md
  40. 120
      content/posts/testing-isnt-hard.md
  41. 40
      content/posts/whom-do-you-trust.md
  42. 1
      favicon.svg
  43. 93
      fonts/work-sans/OFL.txt
  44. BIN
      fonts/work-sans/WorkSans-Italic-VariableFont_wght.ttf
  45. BIN
      fonts/work-sans/WorkSans-VariableFont_wght.ttf
  46. 20
      gen-post.sh
  47. 131
      index.html
  48. 93
      lib/rss.js
  49. 12
      next.config.js
  50. 13393
      package-lock.json
  51. 21
      package.json
  52. 5
      pages/_app.js
  53. 208
      pages/posts/[post].js
  54. 49
      pages/posts/index.js
  55. 4
      public/.well-known/brave-rewards-verification.txt
  56. BIN
      public/assets/flutter_web_renderer_canvaskit.png
  57. BIN
      public/assets/flutter_web_renderer_html.png
  58. BIN
      public/assets/signed_commit.png
  59. 1
      public/favicon.svg
  60. 4
      public/vercel.svg
  61. 36
      styles/atoms.css
  62. 17
      styles/contact.css
  63. 18
      styles/darkmode.css
  64. 39
      styles/index.css
  65. 61
      styles/services.css
  66. 16
      styles/support.css
  67. 25
      styles/typography.css
  68. 35
      styles/welcome.css

3
.Dockerignore

@ -0,0 +1,3 @@
node_modules/
out/
.next/

19
.github/workflows/deploy.yml

@ -0,0 +1,19 @@
on:
push:
branches:
- master
name: Deploy to Github Pages
jobs:
deploy:
name: deploy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Deploy
uses: JamesIves/github-pages-deploy-action@master
env:
ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: out
BUILD_SCRIPT: npm ci && npm run build && touch out/.nojekyll
CNAME: blog.garrit.xyz

36
.gitignore vendored

@ -0,0 +1,36 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# next.js
/.next/
/out/
# production
/build
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# local env files
.env.local
.env.development.local
.env.test.local
.env.production.local
# vercel
.vercel
public/rss.xml

9
Dockerfile

@ -0,0 +1,9 @@
FROM node:13-alpine
COPY . /app
WORKDIR /app
RUN npm install && npm run build
FROM nginx:alpine
COPY --from=0 /app/out /usr/share/nginx/html
EXPOSE 80

2
LICENSE

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021 Garrit Franke <garrit@slashdev.space>
Copyright (c) 2020 Garrit Franke
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

14
README.md

@ -1,3 +1,13 @@
# /dev.space Landing Page
# blog.garrit.xyz
Sourcecode for [slashdev.space](https://slashdev.space).
This is the repository for my personal blog.
## Generating posts
Running the following command will generate a new blog post with the necessary boilerplate.
```
./contrib/gen-post.sh My first post
# -> 2021-04-12-my-first-post.md
```
https://blog.garrit.xyz

141
components/BlogList.js

@ -0,0 +1,141 @@
import Link from "next/link";
import ReactMarkdown from "react-markdown";
function reformatDate(fullDate) {
const date = new Date(fullDate);
return date.toDateString().slice(4);
}
function truncateSummary(content) {
return content.slice(0, 200).trimEnd() + "...";
}
const BlogList = ({ posts }) => {
return (
<>
<ul className="list">
{posts.length > 1 &&
posts
// Filter drafts
.filter((post) => !post.slug.startsWith("_"))
// Ternary operator is used to fix chromium sorting
// See: https://stackoverflow.com/a/36507611
.sort((a, b) => (a.frontmatter.date < b.frontmatter.date ? 1 : -1))
.map((post) => (
<Link key={post.slug} href={{ pathname: `/posts/${post.slug}` }}>
<a>
<li>
<div className="blog__info">
<h2>{post.frontmatter.title}</h2>
<h3> {reformatDate(post.frontmatter.date)}</h3>
<p>
<ReactMarkdown
source={truncateSummary(post.markdownBody)}
/>
</p>
</div>
</li>
</a>
</Link>
))}
</ul>
<style jsx>
{`
margin-bottom: 0;
a:hover {
opacity: 1;
}
a:hover li div.hero_image img {
opacity: 0.8;
transition: opacity 0.3s ease;
}
a:hover li .blog__info h2,
a:hover li .blog__info h3,
a:hover li .blog__info p {
transform: translateX(10px);
transition: transform 0.5s ease-out;
}
@media (prefers-reduced-motion) {
a:hover li .blog__info h2,
a:hover li .blog__info h3,
a:hover li .blog__info p {
transform: translateX(0px);
}
}
.hero_image {
width: 100%;
height: 33vh;
overflow: hidden;
background-color: #000;
}
.hero_image img {
object-fit: cover;
object-position: 50% 50%;
opacity: 1;
transition: opacity 0.3s ease;
min-height: 100%;
}
.blog__info {
display: flex;
flex-direction: column;
justify-content: center;
padding: 1.5rem 1.25rem;
transform: translateX(0px);
transition: transform 0.3s ease-in;
}
.blog__info h2,
.blog__info h3,
.blog__info p {
transform: translateX(0px);
transition: transform 0.5s ease-out;
}
li {
opacity: inherit;
display: flex;
flex-direction: column;
min-height: 38vh;
margin-bottom: 0;
}
h2 {
margin-bottom: 0.5rem;
}
h3 {
margin-bottom: 1rem;
}
p {
max-width: 900px;
}
@media (min-width: 768px) {
li {
min-height: 250px;
height: 33.333vh;
flex-direction: row;
}
.hero_image {
height: 100%;
}
.hero_image img {
min-width: 100%;
height: 100%;
width: auto;
min-height: 0;
}
.blog__info {
min-width: 70%;
}
}
@media (min-width: 1280px) {
.blog__info {
padding: 3rem;
}
h3 {
margin-bottom: 1.2rem;
}
}
`}
</style>
</>
);
};
export default BlogList;

66
components/Header.js

@ -0,0 +1,66 @@
import { useLayoutEffect, useState } from "react";
import Link from "next/link";
import Profile from "./Profile";
function useWindowSize() {
const [size, setSize] = useState([0, 0]);
useLayoutEffect(() => {
function updateSize() {
setSize([window.innerWidth, window.innerHeight]);
}
window.addEventListener("resize", updateSize);
updateSize();
return () => window.removeEventListener("resize", updateSize);
}, []);
return size;
}
export default function Header(props) {
const [windowWidth, windowHeight] = useWindowSize();
return (
<header className="header">
<nav className="nav" role="navigation" aria-label="main navigation">
<Link href="/">
<a>
<h1>{props.siteTitle}</h1>
</a>
</Link>
{windowWidth >= 768 && <Profile></Profile>}
</nav>
<style jsx>
{`
h1 {
margin-bottom: 0;
}
h1:hover {
cursor: pointer;
}
nav {
padding: 1.5rem 1.25rem;
display: flex;
justify-content: space-between;
flex-direction: row;
align-items: center;
}
@media (min-width: 768px) {
.header {
height: 100vh;
position: fixed;
left: 0;
top: 0;
}
.nav {
padding: 2rem;
width: 30vw;
height: 100%;
border-bottom: none;
flex-direction: column;
align-items: flex-start;
}
}
`}
</style>
</header>
);
}

62
components/Layout.js

@ -0,0 +1,62 @@
import { useState, useLayoutEffect } from "react";
import Header from "./Header";
import Meta from "./Meta";
import Profile from "./Profile";
function useWindowSize() {
const [size, setSize] = useState([0, 0]);
useLayoutEffect(() => {
function updateSize() {
setSize([window.innerWidth, window.innerHeight]);
}
window.addEventListener("resize", updateSize);
updateSize();
return () => window.removeEventListener("resize", updateSize);
}, []);
return size;
}
export default function Layout({
siteTitle,
siteDescription,
children,
pathname,
}) {
const [windowWidth, windowHeight] = useWindowSize();
return (
<section className={`layout`}>
<Meta siteTitle={siteTitle} siteDescription={siteDescription} />
<Header siteTitle="~/garrit" />
<div className="content">{children}</div>
{windowWidth <= 768 && <Profile className="content"></Profile>}
<style jsx>
{`
.layout {
overflow-x: hidden;
display: flex;
flex-direction: column;
min-height: 100vh;
}
.layout .info_page {
color: #ebebeb;
}
.content {
flex-grow: 1;
}
@media (min-width: 768px) {
.layout {
display: block;
}
.content {
flex-grow: none;
width: 70vw;
margin-left: 30vw;
}
}
`}
</style>
</section>
);
}

278
components/Meta.js

@ -0,0 +1,278 @@
import Head from "next/head";
export default function Meta(props) {
return (
<>
<Head>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta charSet="utf-8" />
<title>{props.siteTitle}</title>
<meta
name="Description"
content="Random thoughts, tips and rants about software"
></meta>
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<script async defer data-domain="blog.garrit.xyz" src="https://analytics.slashdev.space/js/plausible.js"></script>
</Head>
<style jsx global>
{`
@import url("https://fonts.googleapis.com/css?family=Work+Sans&display=swap");
* {
box-sizing: inherit;
}
html {
box-sizing: border-box;
overflow-y: scroll;
}
body {
margin: 0;
font-family: "Work Sans", "Helvetica Neue", Helvetica, sans-serif;
overflow-x: hidden;
color: #000;
font-size: 16px;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
a {
text-decoration: none;
color: inherit;
transition: opacity 0.2s ease;
}
a:hover {
transition: opacity 0.2s ease;
opacity: 0.5;
text-decoration-color: inherit;
}
ul {
list-style: none;
margin: 0;
padding-bottom: 0;
padding-left: 0;
padding-right: 0;
padding-top: 0;
list-style-position: outside;
list-style-image: none;
}
ol {
margin: 0;
padding-bottom: 0;
padding-left: 0;
padding-right: 0;
padding-top: 0;
list-style-position: outside;
list-style-image: none;
}
ul,
ol,
p {
margin-bottom: 1.45rem;
}
img {
max-width: 100%;
}
img,
figure,
table,
fieldset {
margin-left: 0;
margin-right: 0;
margin-top: 0;
padding-bottom: 0;
padding-left: 0;
padding-right: 0;
padding-top: 0;
margin-bottom: 1.45rem;
}
pre {
margin-left: 0;
margin-right: 0;
margin-top: 0;
margin-bottom: 1.45rem;
font-size: 0.85rem;
line-height: 1.42;
background: hsla(0, 0%, 0%, 0.04);
border-radius: 3px;
overflow: auto;
word-wrap: normal;
padding: 1.45rem;
}
table {
font-size: 1rem;
line-height: 1.45rem;
border-collapse: collapse;
width: 100%;
}
blockquote {
margin-left: 1.45rem;
margin-right: 1.45rem;
margin-top: 0;
padding-bottom: 0;
padding-left: 0;
padding-right: 0;
padding-top: 0;
margin-bottom: 1.45rem;
}
strong {
font-weight: bold;
}
li {
margin-bottom: calc(1.45rem / 2);
}
ol li {
padding-left: 0;
}
ul li {
padding-left: 0;
}
li > ol {
margin-left: 1.45rem;
margin-bottom: calc(1.45rem / 2);
margin-top: calc(1.45rem / 2);
}
li > ul {
margin-left: 1.45rem;
margin-bottom: calc(1.45rem / 2);
margin-top: calc(1.45rem / 2);
}
blockquote *:last-child {
margin-bottom: 0;
}
li *:last-child {
margin-bottom: 0;
}
p *:last-child {
margin-bottom: 0;
}
li > p {
margin-bottom: calc(1.45rem / 2);
}
code {
line-height: 1.45rem;
}
p code {
background: hsla(0, 0%, 0%, 0.1);
padding: 0 0.4rem;
}
@media (prefers-reduced-motion) {
* {
transition: none !important;
}
}
{
/* //TYPOGRAPHY------------------------------------- */
}
h1,
h2,
h3,
h4,
h5,
h6,
p {
font-family: "Work Sans", "Helvetica Neue", Helvetica, sans-serif;
margin-left: 0;
margin-right: 0;
margin-top: 0;
padding-bottom: 0;
padding-left: 0;
padding-right: 0;
padding-top: 0;
margin-bottom: 1.45rem;
color: inherit;
text-rendering: optimizeLegibility;
}
h1,
h2 {
font-weight: 500;
}
h1 {
font-size: 2rem;
letter-spacing: -1px;
line-height: 1.1875;
}
h2 {
font-size: 1.7rem;
letter-spacing: -0.75px;
line-height: 1.2;
}
h3 {
font-size: 1.2rem;
letter-spacing: -0.5px;
line-height: 1.1875;
color: #a0a0a0;
font-weight: normal;
}
p {
font-size: 1.2rem;
letter-spacing: -0.5px;
line-height: 1.5;
color: #464646;
}
@media (min-width: 1280px) {
h1 {
font-size: 2rem;
letter-spacing: -1px;
line-height: 1.1875;
}
h2 {
font-size: 1.5rem;
letter-spacing: -0.75px;
line-height: 1.1667;
}
h3 {
font-size: 1rem;
letter-spacing: -0.5px;
line-height: 1.1875;
color: #a0a0a0;
font-weight: normal;
}
p {
line-height: 1.4375;
}
}
// FIXME: I could not get this to work inside the post component,
// but here it apparently works. Maybe an overriding selector?
.blog__body a,
.blog__footer a {
text-decoration: underline;
}
@media (prefers-color-scheme: dark) {
:root {
background-color: #161618;
color: #dbd7db;
}
html {
scrollbar-color: #dbd7db #161618 !important;
}
h1,
h2,
h3,
h4,
p,
pre,
a,
ul,
li,
blog__body > * {
color: #dbd7db;
}
.button__link {
background-color: #67676c;
}
a {
color: #dbd7db;
}
}
`}
</style>
</>
);
}

42
components/Profile.js

@ -0,0 +1,42 @@
import Link from "next/link";
export default function Profile(props) {
return (
<div className="profile">
<h2>Garrit Franke</h2>
<p>Random thoughts, tips and rants about software</p>
<Link href="https://lists.sr.ht/~garritfra/public-inbox">
Public Inbox
</Link>
<br />
<Link href="https://matrix.to/#/@garrit:matrix.slashdev.space">
Matrix
</Link>
<br />
<Link href="https://garrit.xyz">Website</Link>
<br />
<Link href="https://github.com/garritfra">Github</Link>
<br />
<Link href="https://www.linkedin.com/in/garritfranke/">LinkedIn</Link>
<br />
<Link href="/rss.xml">RSS</Link>
<style jsx>
{`
img {
width: 12rem;
}
Link {
margin: 1rem;
}
@media (max-width: 767px) {
.profile {
padding: 1.5rem 1.25rem;
}
}
`}
</style>
</div>
);
}

11
content/posts/2020-11-06-current-doings.md

@ -0,0 +1,11 @@
---
title: Updates, November 2020
date: "2020-11-06"
---
Hi, I wanted to share some things I'm currently working on. Maybe I'll turn this into a monthly thing, who knows. :)
One major goal I set for myself in the upcoming months is to build a SaaS for freelancers. Some features of this will include handling clients, projects and expenses. A thing I'm struggeling with right now it to find a lightweight way to host it. It is currently deployable through docker containers, but I am not 100% satisfied with my current setup. I will give some updates on this in the future. I aim to release a very early alpha version for free soon, so that some people can stress test it extensively. For now, you can of course self-host it. I really don't want to impose subscription fees for it's users, but I will see how it goes. You can find the source code for it [here](https://github.com/garritfra/omega-crm).
Recently, I increasingly gained interested in the [Gemini project](https://gemini.circumlunar.space/). In a nutshell, this is a very minimal alternative to HTTP, with a strong emphasis on simplicity. The maintainers clearly embrace a DIY mindset, which I want to follow. I set myself the rule to only interact with gemini using tools I wrote myself. To achive this, I am currently writing [my own Gemini server called "Taurus"](https://git.sr.ht/~garritfra/taurus) to eventually set up my own geminispace. I have not yet looked deeply into building a client, but I might do this once I'm happy with my server. I admit that I'm currently cheating a bit, by testing my server using a browser recommended by the gemini team ;)
If you are interested in this project, I highly recommend to check out the [gemini specification](https://gemini.circumlunar.space/docs/specification.html), and play around with some geminispaces. Maybe you could set up a server for yourself?

75
content/posts/2020-11-17-booleans-are-wasted-memory.md

@ -0,0 +1,75 @@
---
title: Booleans are wasted memory
date: "2020-11-17"
---
A boolean is either `true` or `false`. That translates to `1` or `0`. If you think that one bit is enough to store this information, you'd be wrong.
In order to keep the binary layout of a program simple and convenient, most languages store information in 8 bit (1 byte) blocks.
If you allocate a `bool` in Rust or (most) other languages that are based on LLVM, [it will take up 1 `i1`, or 1 byte of memory](https://llvm.org/docs/LangRef.html#simple-constants). If you allocate a boolean value in C, you will get [an integer constant with a value of either 1 or 0](https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/stdbool.h.html).
If you find yourself having to store multiple boolean states somewhere, you might simply declare those booleans and call it a day:
```c
#include <stdbool.h>
#include <stdio.h>
int main()
{
bool can_read = true;
bool can_write = true;
bool can_execute = false;
if (can_read)
printf("read bit set\n");
if (can_write)
printf("write bit set\n");
if (can_execute)
printf("execute bit set\n");
// Output:
// read bit set
// write bit set
}
```
## We can do better than this
An alternative approach to store boolean values is to share a "chunk" of bits with other values. This is usually done using bitwise operations:
```c
#include <stdbool.h>
#include <stdio.h>
// Define permissions
#define PERM_NONE 0b000
#define PERM_READ 0b001
#define PERM_WRITE 0b010
#define PERM_EXECUTE 0b100
#define PERM_ALL PERM_READ | PERM_WRITE | PERM_EXECUTE
int main()
{
// Allocate 1 byte for permissions
char permissions = PERM_READ | PERM_WRITE;
if (permissions & PERM_READ)
printf("write bit set\n");
if (permissions & PERM_WRITE)
printf("read bit set\n");
if (permissions & PERM_EXECUTE)
printf("execute bit set\n");
// Output:
// read bit set
// write bit set
}
```
This example still wastes 5 bits since we only use 3 out of 8 possible bits of the char type, but I'm sure you get the point. Allocating 3 boolean values independently would waste 7 * 3 = 21 bits, so it's a massive improvement. Whenever you find yourself needing multiple boolean values, think twice if you can use this pattern.
Microcontrollers have a very constrainted environment, therefore bitwise operations are essential in those scenarios. 7 wasted bits are a lot if there are only 4 kb of total memory available. For larger systems we often forget about these constraints, until they add up.
## My Plea
* Be mindful about the software you create.
* Appreciate the resources at your disposal.

8
content/posts/2020-12-18-update-december.md

@ -0,0 +1,8 @@
---
title: Updates, December 2020
date: "2020-12-18"
---
It's christmas season! I hope you and your family are safe and sound. My main focus this month is to expand my knowledge about compilers, by [building one from scratch](https://sr.ht/~garritfra/sabre/). It's a compiler for a very simple language, but it gets the job done. So far, you can write [simple algorithms](https://git.sr.ht/~garritfra/sabre/tree/master/examples), and compile them to JavaScript. A more sophisticated C backend is in development, but I still need a plan for expanding the target-specific builtin functions to provide more features in the standard library. An important topic at the moment is the [documentation of the project](https://garritfra.github.io/sabre/). Since the compiler itself has gotten relatively stable, all the language-features now need to be captured and written down. There is also a [contributing guide](https://garritfra.github.io/sabre/developers/contributing.html), if you want to help out, or want to get into compiler design.
Stay home and stay safe!

10
content/posts/2021-01-07-delete-facebook.md

@ -0,0 +1,10 @@
---
title: I closed my Facebook account, and you should too
date: "2021-01-07"
---
I know I should have done this a while ago, but with ever-increasing scandals about data privacy surrounding Facebook, I finally decided to get rid of it.
I haven't used the service in a long time anyway, but I always told myself "what if I needed the data later?", or "what if a friend contacted me, and I didn't respond?", "what if I missed the birthday of someone I'm close with?!". Well, according to my facebook inbox, the only messages I received lately were some random links from people I'm not really in touch with anymore. Birthdays? Do you think someone you haven't talked to in over three years will get mad at you, for forgetting their birthday? And regarding your data: you won't loose it! [This guide](https://www.facebook.com/help/212802592074644) describes how you can download a copy of your data as html and/or json.
Go ahead and ask yourself: Is there anything holding you back from deleting your Facebook account? What would you loose? How often do you even use the service? Do the social benefits of Facebook **really** outweigh the negative aspects (privacy concerns, data collection, etc.)?

8
content/posts/2021-01-11-100daystooffload.md

@ -0,0 +1,8 @@
---
title: 100DaysToOffload
date: "2021-01-11"
---
For some time now, I've seen this #100DaysToOffload hashtag on my social medias. I knew that it was some kind of writing challenge, but I never thought about taking part in it. Since I recently started to blog more frequently though, I think this challenge could be very beneficial to my writing skills, and just jotting my thoughts down in general. So, starting with this entry, I will try to publish 100 (hopefully) useful posts on this blog within one year. My "deadline" for this will be January 11, 2022. I will post every entry to [my mastodon account](https://fosstodon.org/@garritfra).
This is post 001 of [#100DaysToOffload](https://100daystooffload.com/).

14
content/posts/2021-01-11-are-humans-still-evolving.md

@ -0,0 +1,14 @@
---
title: Are humans still evolving?
date: "2021-01-11"
---
This is by no means a scientifically accurate article. These are my thoughts, and I'd be happy to discuss this topic. Take it with a grain of salt.
Evolution builds upon natural selection. With every generation of a species, there is a slight chance of mutation, possibly giving an individual advantages or disadvantages in the ability to survive and give offspring. Somewhere way up in our evolutionary tree, a microbe might have mutated a gene that allowed it expand and retract a part of its body. As it turns out, this proves useful in fleeing from predators, while other individuals of this species might fall prey on their first day. The microbe has a slightly larger chance to survive and give offspring. On the other hand, mutations might also result in a fatal illness (cancer, in other words). Oftentimes, this individual does not survive long enough to give offspring.
150 years ago, giving birth was literally an act of life and death. Many children died at a young age. They were not tough enough from an evolutionary standpoint, and were therefore "filtered" out by natural selection. Only the strongest survived and gave offspring.
Todays medicine is (fortunately!) very powerful. Very few children die at birth and a lot less people are dying from an illness like the flu. This is an evolutionary anomaly. Natural selection has been defeated to a certain degree. We don't need to run from predators anymore and are less prone to desease. Every one of us is more or less equally able to give offspring. We can't really gain an advantage over others anymore. We are not evolving.
This is post 002 of [#100DaysToOffload](https://100daystooffload.com/).

16
content/posts/2021-01-13-512kb-club.md

@ -0,0 +1,16 @@
---
title: I joined the 512KB club
date: "2021-01-13"
---
JavaScript rules the web, literally. In fact, this website is built with JavaScript (Next.js). I recently started to think about if I really needed this much overhead for a simple site like this. After all, I don't have any fancy user interaction features or complex animation that would justify the JavaScript on this page.
There is a new (no, not that new) philosophy called [the lean web](https://leanweb.dev/). It essentially tries to keep websites tiny and semantically correct. This has many benefits, ranging from less pollution generated by your site to improved SEO, since many search engines favor a semantically correct website over a site that abuses JavaScript to mimic the features, that are baked into html anyway.
In order to get lean, I decided to join [the 512KB club](https://512kb.club/). This website lists sites that are below 512KB in total (uncompressed, with all dependencies). To get below that mark, I had to remove my face from the frontpage (I'm sure you'll miss it😅), since the image itself was roughly 750KB. I'm now just below 500KB, which qualifies me to join the blue team.
[![Blue Team](https://512kb.club/images/blue-team.svg)](https://512kb.club)
I'm not planning to stop here though. I think keeping a website small and simple is an excellent practice. My next step will be to get rid of all the JS junk on this site and only rely on HTML and CSS. I still want to be able to write my posts in Markdown, so I will have to come up with a way to generate pages from them. A safe bet for this would be to use a SSG like [Hugo](https://gohugo.io/). Frankly, [writing my own simple SSG probably wouldn't hurt either](https://erikwinter.nl/articles/2020/why-i-built-my-own-shitty-static-site-generator/). Let's see how high I can climb the ranks of the 512KB club. Care to join me?
This is post 003 of [#100DaysToOffload](https://100daystooffload.com/).

53
content/posts/2021-01-15-compiling-your-own-kernel.md

@ -0,0 +1,53 @@
---
title: Compiling your own kernel
date: "2021-01-15"
---
I'm currently in the midst of fiddling around with the kernel a bit, and I figured I just documented my process a bit. Unfortunately, since I'm using a Mac for day to day work, I have to rely on a virtual machine to run anything Linux-related. VirtualBox doesn't support the most recent kernels (5.9 is the most recent supported one), so there won't be any cutting-edge development happening here. I decided to use ubuntu as my guest system, since it's very easy to set up.
So, the first step is to get the sources. You could simply go ahead and download a specific release from [kernel.org](https://kernel.org/), but since I want to hack on it, I decided to go the git-route. Simply download the sources from [their repo](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/) and check out the tag you want to build.
> **Note**: this might take a while. Their repository is huge! If you want to only need the `HEAD` and want to build on bare-metal (no VirtualBox), you could only clone the latest commit using `git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git --depth=1`.
Next up, you need to generate a `.config`. This file describes which features you want to compile into your kernel. To make a generic config that only compiles drivers for the hardware of your system, you can run the following commands:
```bash
# Copy the config of your current kernel into the repo
make oldconfig
# Only enable modules that are currently used by the system
make localmodconfig
```
Now, let's get to actually compiling the kernel. In my case, I assigned 4 cores to my VM. The `-j` option tells make to run 4 jobs in parallel.
> **Caution**: Just providing -j will freeze your system, since make will try to launch an infinite amount of processes!
```
make -j4
```
Again, this might take some time. Go for a walk, get a coffee or watch your favorite TV-show. After compilation has finished, we need to install the kernel. To do so, run the following commands:
```
sudo make modules_install
sudo make install
```
In order to boot, we need to tell our bootloader about our new kernel. Run this command to update your grub config:
```
sudo update-grub2
```
And voila! Your new kernel should be ready.
Reboot the system, and grub should pick up the new kernel and boot to it. If that's not the case, you should be able to pick the kernel from the grub menu under `advanced options`.
## Retrospective
I found that building my own kernel is a highly educational and fun experience. Using VirtualBox is a pain in the `/dev/null` to work with, since it has to add a lot of overhead to the system in order to work. You sometimes have to wait over 6 month until the support for a new kernel arrives. This problem should not apply if you compile on bare metal systems.
Thanks for your time!
This is post 004 of [#100DaysToOffload](https://100daystooffload.com/).

62
content/posts/2021-01-18-reasons-the-fediverse-is-better.md

@ -0,0 +1,62 @@
---
title: 6 reasons the Fediverse is better than regular social media
date: "2021-01-18"
---
Social media sucks. Platforms like Twitter, Facebook and Instagram are designed to turn your precious free time into money. What we see as a nice way to stay in touch with our friends, in reality are just many hits of dopamine stimulating precise spots in your brain, leading to you spending more time on the platform consuming ads.
But what if I told you that there is a huge ad-free social network out there, not governed by a central authority, full of great people and completely free to use? This place is called the fediverse. Well, it's not really **a** place, it's many places.
## What is the fediverse?
At its core, the fediverse is a mesh of interconnected nodes on the internet, all communicating in the same language. Every instance on the fediverse implements the [ActivityPub](https://activitypub.rocks/) protocol, which allows it to talk to other instances on the network. The phrase "Fedi" comes from "federated", meaning that content on the network is shared and accessible by anyone.
There are many different software projects for the fediverse out there. If you like the concept of Twitter, you could take a look at [Mastodon](https://joinmastodon.org/), a microblogging-platform for the fediverse. There's also [PeerTube](https://joinpeertube.org/), a federated clone of YouTube (PeerTube recently [released support for peer2peer live streaming support](https://framablog.org/2021/01/07/peertube-v3-its-a-live-a-liiiiive/), like what?! 🤯). You like instagram? [Pixelfed](https://pixelfed.org/) got you covered. Of course, there are many other services worth mentioning, so feel free to dig around a bit! As mentioned, the great thing about the fediverse is that all of these services are connected with each other. If I signed up for an account on a mastodon instance, I can subscribe to your posts on Pixelfed, and vice versa. If I want to get notified about your videos on PeerTube, I can just go ahead and follow your account and comment on your videos.
> "This all sounds great, but why should I bother?"
Let me give you 6 reasons why the fediverse is far superior to all other social media platforms out there, and why you should consider signing up for an account on one of the many instances of the fediverse.
## Reason 1: It's decentralized
Regular social media platforms like Facebook have a **single point of failure**. If their servers go down, your content goes down with it. Content on the fediverse on the other hand is scattered around many instances, which means it is very resilient. If your instance dies, you can move to a new instance. This goes hand in hand with the next reason.
## Reason 2: It can't be censored
You probably heard that the Twitter-account of Donald Trump recently got compromised by the owners of the platform. I don't want to engage in any political discussions, but the main flaw with this is the violation of **freedom of speech**. Even if everything someone says is controversial nonsense, it is still in his good right to express his thoughts.
On the fediverse, a scenario like this would certainly not happen, given its decentralized nature. Some instances still moderate their content, meaning that if someone posts inappropriate content, it might get blocked. The twist here is, if that person disagrees with the rules of the instance, he is free to join another instance.
## Reason 3: Free as in freedom
There's this saying, criticizing modern software projects:
> "If it's free, you're the product"
This is not true in all cases. "Free" can be understood in two ways.
**"Free as in beer"** means that something might seem free at first glance (E.g. Free beer at Oktoberfest), but in the end you often leave with less than you came with. In the case of beer, you often buy another beer after the first free one. Therefore, even though you think you've saved the money for one beer, in reality bought an extra beer. In the case of proprietary software, it's a similar story. While you think that a service is free, you give up your privacy and get monetized with ads.
**"Free as in freedom"** on the other hand means that you won't get "screwed over" like this. Most, if not all of the software for the fediverse is **free and open source software** (FOSS). If you don't like how a certain feature works, **you are completely free to change it**. You can look at the source code and propose changes to the main project, or launch your own spin of that product.
Since everyone can openly look at the source code, it is **audited** by many people, including security experts. This vastly improves the security and stability of the product. If the developers would do shady things, they will most certainly get called out by people, as soon as that code enters the main repositories. Proprietary (closed sourced) platforms like Facebook and Twitter can not be audited. The owners can do whatever they want, including spying on their users, or collect and sell the data of their users.
## Reason 4: It respects your privacy
Since the software on the fediverse is audited by a lot of people, you can be almost 100% certain that joining an instance will not collect any of your personal data. If you are still concerned about your privacy though, you can still be part of the network by launching your own instance, with your own rules. There are tutorials out there, explaining how you can set up a small instance for a very cheap price on your local network.
## Reason 5: It's all about the community
I used to spend a lot of time on "regular" social media platforms. From personal experiences, these platforms are all about promoting yourself and building up your follower count. Connecting with your friends is of little importance. In the eye of some people, you are not worthy to talk to if the amount of followers or likes-per-post didn't exceed a certain threshold.
It has now been about 5 months since I created [my mastodon account](https://fosstodon.org/@garritfra). Talking to people on the fediverse is a completely different experience compared to Facebook or Twitter. Almost everyone I talked to is a polite, grounded person, willing to engage in constructive and fun discussions. I met many people who disagree with my views, but instead of leaving a comment saying that this post sucks, all of them took the time to express their alternative opinions. Every member of the fediverse wants to drive the network forward, which is reflected in their posts.
## Reason 6: There's an instance for everyone
Whether you're into Gaming, Painting, or Spanish dancing music, there is an instance for you. If it isn't, you are free to create one and promote it to people of that niche. You won't loose the social aspect by launching your own instance, since you are still available to other people on the network. If you just want to get started with the fediverse, I recommend that you check out one of the many [lists of mastodon instances](https://instances.social/). If you like instagram and want to stay in a familiar environment, take a look at [Pixelfed](https://pixelfed.org/), and join an instance from the list they provide.
I myself am a person who cares a lot about free and open source software, therefore my choice of instance was [fosstodon.org](https://fosstodon.org/), a mastodon instance geared towards awesome like-minded people.
**Note**: This post has generated some interesting discussions on [Hacker News](https://news.ycombinator.com/item?id=25820646).
This is post 005 of [#100DaysToOffload](https://100daystooffload.com/).

14
content/posts/2021-01-23-signal-to-noise.md

@ -0,0 +1,14 @@
---
title: Signal-to-Noise, or why nobody cares about your GitHub project
date: "2021-01-23"
---
For a very long time, the thought of leaving GitHub and moving to another platform daunted me. Having more users on one platforms means that more people will contribute to my project, right? Wrong.
The problem with GitHub is that there's a lot of things going on around you. How many times have you discovered a cool project on GitHub, starred it and never heard from it again? In essence, this is the same phenomenon as with modern social media. A bombardment of positive stimulants makes the user crave for more, letting them forget about previously consumed content. Sure, if you just want to get your code out there, GitHub might be a great place, but if you are just starting out as a developer and you're looking for contributers and feedback, you will probably be very bummed to find out that nobody cares about your work. Many developers are using the platform because other developers are using it. Your project on GitHub is a drop in an ocean of other projects.
A few months ago, I decided to make the leap and switch most of my development over to [Sourcehut](https://sourcehut.org/), a free and open source code-hosting platform. Besides its great tooling (mailing lists, automated builds, etc.), it has the benefit of a high **signal-to-noise ratio**. Less developers are using the platform, but most of them are very passionate about their work. They care about collaborating with others and they believe in what they are doing, which probably lead them to sign up for this platform in the first place.
Of course, switching away from a platform like GitHub alone does not ensure more contributions. You might be trying to advertise your projects by spamming links on popular newsboards and forums, but this only generates **noise**. Instead, you should intentionally talk about your personal journey with the project in a smaller circle. If other developers in your niche see that you continuously give updates about the project and its improvements, they will eventually start to relate to it. Some of them will look at your project and give feedback, or even contribute patches.
This is post 006 of [#100DaysToOffload](https://100daystooffload.com/).

72
content/posts/2021-01-26-vim-macros.md

@ -0,0 +1,72 @@
---
title: Using Macros in Vim
date: "2021-01-26"
---
For a long time, macros in Vim were a huge mystery for me. I knew they existed, but I didn't know how or why you'd use them. A recent task of mine involved replacing the unsafe operator (`!!`) in a large kotlin codebase with a null-safe operator (`?`). This game me a good opportunity to learn about macros. This is a snippet I encountered numerous times:
```kt
mLeftButton!!.text = "Left"
mLeftButton!!.setOnClickListener(leftListener)
mLeftButton!!.visibility = View.VISIBLE
mRightButton!!.text = "Right"
mRightButton!!.setOnClickListener(rightListener)
mRightButton!!.visibility = View.VISIBLE
```
You could go ahead and change each line individually, or use the IDEs built in "multi-cursor" tool to save you some work. But, let me show you how I automated this using a Vim-Plugin for Android Studio. Not that the plugin matter, it will work in every Vim-like editor.
A macro in Vim works like this:
1. Record any sequence of keystrokes and assign them to a key
1. Execute that sequence as often as you wish
So let's see how we'd do that.
## Recording a macro
To record a macro in Vim, you press `q` (In normal mode) followed by a key you want to assign the macro to. So, if you wanted to record a macro and save it to the `q` key, you'd press `qq`. Vim will notify you that a macro is being recorded. Now, you can press the keystrokes that define your actions. When you're done, press `q` in normal mode again to quit your macro.
Coming back to my task, I would want to do the following:
1. `qq` Record a macro and save it to the `q` key
1. `_` - Jump to the beginning of the line
1. `f!` - Find next occurrence of `!`
1. `cw` - Change word (Delete word and enter insert mode)
1. `?.` - Insert the new characters
1. `<esc>` - Enter normal mode
1. `j` - go down a line
1. `q` - Finish macro
If everything went right, this line:
```
mLeftButton!!.text = "Left"
```
Should now look like this:
```
mLeftButton?.text = "Left"
```
and your macro should be saved under the `q` key.
## Using the macro
In order to use a macro in vim, you press the `@` key, followed by the key the macro is saved under. Since our macro is defined as `q`, we'd press `@q`, and the macro is executed immediately.
Let's take this further. You might have noticed that I went down a line before closing the macro. This becomes handy when you want to execute it many times. In our case we have 6 lines we want to refactor. 1 line has already been altered, so we have to execute it 5 more times. As per usual with vim, you can execute an action n times by specifying a number before doing the action. Let's press `5@q` to execute the macro 5 times. And voila! Our unsafe code is now null-safe.
```kt
mLeftButton?.text = "Left"
mLeftButton?.setOnClickListener(leftListener)
mLeftButton?.visibility = View.VISIBLE
mRightButton?.text = "Right"
mRightButton?.setOnClickListener(rightListener)
mRightButton?.visibility = View.VISIBLE
```
Macros are really satisfying to watch, if you ask me!
This is post 007 of [#100DaysToOffload](https://100daystooffload.com/).

52
content/posts/2021-01-29-sudo-to-doas.md

@ -0,0 +1,52 @@
---
title: From sudo to doas
date: "2021-01-29"
---
You might have heard that there is currently [a pretty significant vulnerability](https://www.qualys.com/2021/01/26/cve-2021-3156/baron-samedit-heap-based-overflow-sudo.txt) affecting `sudo`, the program we all know and love. It is the de facto standard for when you want to run a command as a priviledged user, but that's really it. Under the hood, sudo is a very powerful tool with a lot of features. It can be used to build out complex permission-systems that span across entire clusters of servers. But all of these features come at a price: **complexity**. Last time I checked, the [source code](https://www.sudo.ws/repos/sudo) of sudo had about 330k lines of code (using cloc as a benchmark). This massive complexity plays a large role in its security.
Luckily, there is a **far** more lightweight alternative to sudo called [doas](https://github.com/Duncaen/OpenDoas.git). It essentially does all the things you'd expect from sudo for your average end user. Doas is written in just over 3k lines of code, which, if you think of it, should be more than enough to provide a tool that executes a command as a priviledged user.
## Setup
While there are packages for [some distibutions](https://github.com/slicer69/doas#installation-via-packagesrepositories), I personally had trouble setting it up on arch using yay (for permission reasons, ironically). I recommend going the extra mile and building it from source, which consists of a few commands and some seconds of your time:
```sh
git clone https://github.com/slicer69/doas
cd doas
make
sudo make install
```
Next, you will need to create a config file at `/usr/local/etc/doas.conf`. Paste the following line into it to give your user root access:
```sh
permit alice as root
```
You obviously want to substitute alice with your username. If you have multiple users on your system, simply duplicate that line and substitute the username accordingly. Just restart your terminal window, and you should be able to run programs as root using doas instead of sudo:
```sh
➜ ~ doas id
uid=0(root) gid=0(root) groups=0(root)
```
## Bonus: Save your muscle memory
If you still want to "use" sudo on your machine, you can set up a simple alias in your `.{bash|zsh|fish}rc`. This will also help with compatibility issues of some scripts, if you decide to ditch the actual sudo from your Box entirely. Just paste this line into your corresponding rc file:
```
alias sudo="doas"
```
## Bonus Bonus: Passwordless authentification
You can setup doas to skip the password prompt every time you run a command with it. Simply add the `nopass` option in your doas configuration file:
```sh
permit nopass alice as root
```
I hope you found this useful!
This is post 008 of [#100DaysToOffload](https://100daystooffload.com/).

72
content/posts/2021-02-02-bem-methodology.md

@ -0,0 +1,72 @@
---
title: Notes about BEM (Block Element Modifier)
date: "2021-02-02"
---
In the coming weeks, months and years, I will be working on frontend-development as part of my dayjob. These are some personal notes I took during my research about the BEM methodology. If you want to read the official introduction, you should visit [their website](http://getbem.com/).
# Overview - What is BEM?
BEMBlock Element Modifier is a methodology that helps you to create reusable components and code sharing in front-end development. It aims to group css-classes in a meaningful way, making it easier to understand
1. where this class is used
2. what it describes and
3. what state the element is in.
The BEM-notation is divided into three main parts: Blocks, Elements and Modifiers.
## Blocks
A standalone entity that is meaningful on its own. Some examples might be **headers, containers, menus, inputs, checkboxes**, etc.
## Elements
A part of a block that has no standalone meaning and is semantically tied to its block. This could be a **menu item or an input placeholder**.
## Modifiers
A flag on a block or an element. Used to change appearance or behavior. This might be **disabled, checked, fixed, big**, etc.
# Putting it together
A block itself is referenced though its name.
```css
.button {
}
```
To reference elements inside of the block, you add it to the block element with two underscores (`__`):
```css
.button {
}
.button__text {
}
```
If you want to add a modifier to a block or an element, you separate it with two dashes (`--`):
```css
.button {
}
.button--disabled {
}
.button__text--inverted {
}
```
# Benefits of BEM
**Modularity**: Block styles never depend on one another. They can easily be moved to other parts of the app.
**Reusability**: Composing styles in a meaningful way reduces the amount of code duplication.
**Structure**: BEM gives your code a solid structure that is both easy to understand and to expand.
# References
- http://getbem.com/
- https://csswizardry.com/2013/01/mindbemding-getting-your-head-round-bem-syntax/
This is post 009 of [#100DaysToOffload](https://100daystooffload.com/).

51
content/posts/2021-02-07-storage-setup.md

@ -0,0 +1,51 @@
---
title: My storage setup (Feburary 2021)
date: "2021-02-07"
---
I used to rely on Google Drive and Photos to store my entire data. Now, that [Google has decided to ditch unlimited photo storage in the near future](https://blog.google/products/photos/storage-changes/) and Google basically being the devil himself, I decided to step up my game and get my hands dirty on a DIY storage solution.
## The goal
Before I got started, I thought about the expectations I have towards a system like this. It boils down to these four points (in this order): I want my solution to be **resiliant**, **scalable**, **easy to maintain** and **easy to access**. Looking back, I think I met all of these requirements fairly well. Let me walk you through how I managed to do that.
## Data resiliance
Keeping data on a single device is obviously a really bad idea. Drives eventually fail, which means that your data will be lost. Heck, even my house could burn down, which means that any number of local copies could burn to ashes. To prevent data loss, I strictly adhere to the [3-2-1 backup strategy](https://www.backblaze.com/blog/the-3-2-1-backup-strategy/). A 3-2-1 strategy means having **at least three total copies of your data, two of which are local but on different mediums (read: devices), and at least one copy off-site**. If a drive fails, I can replace it. If my house burns down, I get two new drives and clone my offsite backup to them.
To get an offsite backup, I set up a spare Raspberry Pi with a single large HDD and instructed it to do daily backups of my entire data. I asked a family member if they would be willing to have a tiny computer plugged in to their router 24/7, and they kindly agreed. A Pi and a HDD are very efficient in terms of power, so there is not a lot to worry about.
## Scalability
I currently don't have a huge amount of data. If that were to change (i.e. if I continue to shoot a lot of high-res photos and shove them into my setup), I need a way to simply attach more drives, or ones with more capacity. I looked at different file-systems that allowed to easy extendability while also being resiliant.
An obvious candidate was **ZFS**, but there are a couple of reasons I ditched this idea. First of all, it is really hard to get up and running on Raspberry Pi running Linux, since it's not natively supported by all distributions. This increases the complexity of the setup. Another reason is that I don't like the way it scales. Please correct me if I'm wrong here, since I only did limited research on this. From what I know though, ZFS can only be extended by shoving a large amount of drives in the setup to achieve perfect redundancy.
In the end, I settled on **BTRFS**. For me, it scratches all the itches that ZFS has. It is baked into the linux kernel, which makes it really easy to install on most distributions, and I can scale it to any number of drives I want. If I find a spare drive somewhere with any storage capacity, I can plug it into the system and it will just work, without having to think about balancing or redundancy shenanigans.
## Maintainability
I need my setup to be easy to maintain. If a drive fails, I want to be able to replace it within a matter of minutes, not hours. If my host (a Raspberry Pi) bursts into flames, I want to be able to swap in a new one and still access my data. If I'm out and about and something goes south, I want to be able to fix it remotely. BTRFS helps a lot here. It's really the foundation for all the key points mentioned here. It gives me a simple interface to maintain the data on the drives, and tries to fix issues itself whenever possible.
Exposing random ports to the general public is a huge security risk. To still be able to access the Pi remotely, I set up **an encrypted WireGuard tunnel**. This way, I only have to expose a single port for WireGuard to talk to the device as if I'm sitting next to it.
## Accessibility
Since the data needs to be accessed frequently, I need a simple interface for it that can be used on any device. I decided to host a **Nextcloud** instance and mount the drive as external storage. Why external storage? Because Nextcloud does some weird thing with the data it stores. If I decide to ditch Nextcloud at some point, I have the data on the disks "as is", without some sort of abstraction on top of it. This also has the benefit of allowing access from multiple sources. I don't have to use Nextcloud, but instead can mount the volume as a FTP, SMB or NFS share and do whatever I want with it. From the nextcloud perspective, this has some drawbacks like inefficient caching or file detection, but I'm willing to make that tradeoff.
## In a nutshell
This entire setup cost me about 150€ in total. Some components were scraped from old PC parts. So, what does the solution look like? Here is the gist:
- A Raspberry Pi 4 as a main host and an older Raspberry Pi 3 for offsite backup, both running Raspberry Pi OS
- Two external harddrives in a RAID 1 (mirrored) configuration, running on an external USB3 hub
- A single internal HDD that served no purpose in my old PC, now serving as backup storage
- All drives are using BTRFS
- WireGuard tunnels between main and remote host, as well as most access devices
- Nextcloud on main host, accessible over TLS (if I need to access data from outside the secure tunnel-network)
- SMB share accessible from within the tunnel-network
- Circa 4.5 terabyte total disk size; 1.5 terabyte of usable storage
- Snapper for local incremental backups on main host; BTRBK for remote incremental backups
- Cron jobs for regular backups and repairs (scrub/rebalance)
This is post 010 of [#100DaysToOffload](https://100daystooffload.com/).

14
content/posts/2021-02-11-10-percent-100daystooffload.md

@ -0,0 +1,14 @@
---
title: Thoughts after 10 Days of 100DaysToOffload
date: "2021-02-11"
---
Coming into this, I didn't know what to expect. I'm not a huge writer, but so far I am pleasantly surprised about how relaxing this challenge is.
At first glance, writing a blog post every 3-5 days seems daunting. But the more I write, the more it becomes an enjoyable habit. I'm oftentimes looking forward to writing these posts. Whenever I have something on my mind, I jot it down without a plan or structure. And that's exactly the point of [#100DaysToOffload](https://100daystooffload.com/): **Just. Write.**
So far, these blog posts have helped me get a lot of my thoughts out of my head and onto paper (or on a screen). While writing, I reflect on what I think. I sometimes realize that what I thought is utter nonsense, but this in itself is an important reflection. With each post, I feel like I am getting more confident about the process.
If you're reading this and you don't have a blog yet, I would encourage you to give this technique a try. It doesn't matter that you produce quality content, nor does anyone have to see this. It's not the content that matters, but the process of producing it. Set up your own blog on [write.as](https://write.as/) or simply open your text editor of your choice and **Just. Write.**
This is post 011 of [#100DaysToOffload](https://100daystooffload.com/).

44
content/posts/2021-02-17-notes-on-flutter-web.md

@ -0,0 +1,44 @@
---
title: Flutter Web - the Good, the Bad and the Ugly
date: "2021-02-17"
---
These are some notes I took for the evaluation of Flutter web for a potential project at work. I decided to build a frontend for [Miniflux](https://miniflux.app/), since I figured it may enclose many pitfalls an application could potentially have. You can find the current prototype [here](https://github.com/garritfra/FlutterFlux).
## The Good
- **Trivial to set up**: Running a Flutter application in a browser, no matter if it is an existing app or a fresh project, can be done by simply specifying the -d chrome flag.
- **Same behavior compared to mobile app**: Since the app is quite literally a mobile application running in the browser, the page looks and feels like a mobile application. It gives the app a consistent look and feel across all devices. I can imagine this coming in handy for web applications that are primarily used on phones and tablets.
- **Browser API integration**: It seems like many of the libraries make use of Web APIs. For example: I was able to get location updates using the location package, and store data using [localstorage](https://pub.dev/packages/localstorage). Whether the Web target is supported, is noted as a flag in the package documentation.
- **Alternative Backends**: There are two [rendering backends](https://flutter.dev/docs/development/tools/web-renderers), both with its own benefits and drawbacks. The HTML renderer optimizes the page for the browser, which improves performance at the cost of consistency. The CanvasKit renderer renders WebGL using WebAssembly. This gives a consistent look and feel across all devices, at the cost of Performance and download size. If auto is specified, the renderer will be determined based on the device type. Here’s a comparison of the same app rendered with both backends:
| HTML | CanvasKit |
| :----------------------: | :-----------------------: |
| ![](/assets/flutter_web_renderer_html.png) | ![](/assets/flutter_web_renderer_canvaskit.png) |
## The Bad
- **Still in Beta**: Flutter web requires the developer to use the beta channel of Flutter. I didn’t encounter any issues, but it could be that some features are unstable.
- **No native HTML (With an exception)**: Flutter Web renders the application into its own container, which is not using semantic HTML. The resulting application is also not debuggable using standard web-dev tools, but flutters debugging features can be used. There is a workaround though. Using the [easy_web_view](https://pub.dev/packages/easy_web_view) package, I was able to embed html components as flutter widgets. The embedded code is actual HTML code that the browser itself is rendering, not Flutter. This solution is cross-platform, meaning that it also works flawlessly for mobile builds of the application. This might come in handy if the project demands to embed a javascript component like a video player. This approach could technically also improve SEO, but I’m unsure how a full-blown application only using this approach would behave.
## The Ugly
- **Scrolling feels sluggish**: The scrolling behavior is implemented by flutter itself and does not feel as smooth as the native scrolling behavior of modern browsers.
- **SEO nearly impossible**: Since the application is a SPA and it is not using semantic HTML, it’s very difficult to do any kind of SEO. Lighthouse rated the demo application with a perfect 100 for SEO, but this is probably because it is only aware of the html that surrounds the flutter container. I didn’t find a way to Inject Metatags on a per-site basis.
- **Heavy and slow on old devices**: Even a basic application like the Todo app is very heavy and slow when compared to a “regular” website.
## Conclusion
Flutter Web seems to be a viable candidate to build truly cross-platform applications. Adding Web as a target for existing Flutter mobile apps should be relatively easy. The layout will probably need to be optimized to get the full experience. Native Web APIs seem to be well supported and documented.
The resulting web application is a PWA running inside a container. It is relatively heavy and requires much more resources to run, when compared to a “regular” web application.
I hope you found this useful!
This is post 012 of [#100DaysToOffload](https://100daystooffload.com/).

98
content/posts/2021-02-20-changelogs.md

@ -0,0 +1,98 @@
---
title: Writing good changelogs
date: "2021-02-20"
---
Today, I finally added a proper changelog to [my current project](https://github.com/garritfra/sabre/blob/master/CHANGELOG.md). My obvious first step was to search the web for `changelog.md`, since that's the naming convention many projects are using for their changelog. I was surprised that I was immediately redirected to "[changelog.md](https://changelog.md)", since it is a valid domain name. This website is a great guide on the essense of a good changelog. This is also where I got most of my findings from. Let me walk you through some of the most important ones:
## Changelogs are a vital part of every serious project
The whole point of a changelog is to keep track of how the project evolves over time. When working with multiple people, it helps getting everyone on the same page. Keeping a changelog reduces a possible monopoly of information, since all contributers know what is going on. Of course, users also benefit from your changelog. They will know what changes they can expect when they do an update.
## Entries should have a standardized format
Changelogs are mainly meant to be readable by humans. Here are some important points to watch out for when writing a changelog:
- Every version of your software (major, minor and patch) should have one section and one section only
- Recent releases should be added at the top of the changelog (reverse chronological order)
- Each version _should_ display its release date in ISO format (YYYY-MM-DD) next to the version name
## What types of changes need to be included?
You could just go ahead and throw some changes in a big list and call it a day. To make the changelog more readable though, you should categorize every change by its type. Here's an example of a set of categories that could be included:
- **Features**: New features or additions
- **Fixes**: Bugfixes
- **Security** Important changes regarding holes in your security
- **Documentation**: Changes or additions in your documentation should go here
This is just an example that illustrates how **I** decided to note down my changes. [changelog.md](https://changelog.md) suggests a slightly different convention, but how you're handling it doesn't really matter.
## An example
Here's an example of how a changelog could look like. It's taken from [Sabre](https://github.com/garritfra/sabre), a project I'm currently working on. The full changelog can be found [here](https://github.com/garritfra/sabre/blob/master/CHANGELOG.md).
```md
# Changelog
## v0.4.0 (2021-02-20)
This release introduces the concept of structs, alongside many improvements to the documentation.
**Features**
- Assignment operators (#10)
- Structs (#12)
**Fixes**
None
**Documentation**
- Fixed some typose and broken links
- Document boolean values
- Added this changelog!
## v0.3.0 (2021-02-12)
This release adds type inference to Sabre. There are also a lot of improvements in terms of documentation. The docs are now at a state that can be considered "usable".
**Features**
- Type inference
- The `any` type
- First attempt of LLVM backend
**Fixes**
- Fixed an error when printing numbers
**Documentation**
- Added documentation for for loops
- Added documentation for while loops
- Documented LLVM backend
- Documented comments
- Updated contributing guidelines
```
## Personal recommendations
When releasing a new version, don't just add an entry to your changelog. You should use **git tags** whenever working with versions, to mark the exact commit of the released version.
Read up on **semantic versioning**! This is the most common convention when it comes to versioning your software. ([here](https://www.geeksforgeeks.org/introduction-semantic-versioning/) is a simple guide, [here](https://semver.org/) is the official specification).
I'd also advise you to keep a log of your commits in the description of the tag. Here's a command that does all of this for you:
```
git tag -a <new release> -m "$(git shortlog <last release>..HEAD)"
```
So, if you're releasing version `v0.2.0` after `v0.1.5`, you would run this command to tag your current commit with a good commit history:
```
git tag -a v0.2.0 -m "$(git shortlog v0.1.5..HEAD)"
```
This is post 013 of [#100DaysToOffload](https://100daystooffload.com/).

57
content/posts/2021-02-24-vim-terminal-strategies.md

@ -0,0 +1,57 @@
---
title: Strategies to use a terminal alongside (Neo)Vim
date: "2021-02-23"
---
One thing that bothered me about vim for a long time, was the lack of a terminal
directly in your editor. If I'm not using Vim, I'm most definetely using VSCode
and its built-in Terminal. After searching the webs for possible solutions, I
came across a couple of strategies to achive this.
## Executing single commands
If you just want to issue a single command without spawning an entire shell,
you can just use the `:!` command:
```
:! printf "Hello Sailor"
```
## Vims builtin terminal
I couldn't believe my eyes when I read this, but Vim ships with a builtin
terminal! Executing `:term` will spawn it in your current buffer. How you
integrate it in your workflow is up to you. You could use tabs or open a
horizontal buffer and spawn it there. I must say that it is rather clunky to
use, since its literally a Vim buffer that forwards stdin and stdout to the
buffer, but it's there for you to use.
## Vim x Tmux
Another great alternative is to set up Tmux with two windows, one for Vim and
one for your terminal, and switch between them. This works great on a minimal
system, but on MacOS for example, it is easier to simply use cmd+1 and cmd+2 to
switch between two tabs of the Terminal application.
## Pausing and resuming Vim
This one is my personal favorite. The idea comes from
[this](https://stackoverflow.com/a/1258318/9046809) stackoverflow answer.
The plan is to pause the Vim process and resume it later. To pause Vim, you
press `<ctrl>-z`. This sends the process in the background. Then, to resume the
process, simply issue the `fg` command and Vims process resumes in the
foreground.
## Conclusion
I'm sure there are many more strategies that could be added to this list. I'd be
interested to hear how your setup works! If you liked these techniques, you
might be interested in
[@lopeztel](https://fosstodon.org/web/accounts/211905)s
[cheat sheet](https://lopeztel.xyz/2021/02/21/my-neovim-cheatsheet/) for Vim.
This is post 014 of [#100DaysToOffload](https://100daystooffload.com/).

28
content/posts/2021-03-13-git-builtin-lifesaver.md

@ -0,0 +1,28 @@
---
title: Git's built-in lifesaver
date: "2021-03-13"
---
Everyone was in this situation at some point. You wasted a days worth of work by accidentally deleting a branch. But, all hope is not lost! Git never forgets.
Every action, be it committing changes, deleting or switching branches, is noted down by Git. To see your latest actions, you can simply run `git reflog` (It's pronounced `ref-log` but `re-flog` sounds just as reasonable):
```
5704fba HEAD@{45}: commit: docs: update changelog
b471457 HEAD@{46}: commit: chore: refactor binop checks in parse_expression
5f5c5d4 HEAD@{47}: commit: fix: struct imports
76db271 HEAD@{48}: commit: chore: fix clippy warning
ac3e11c HEAD@{49}: commit: fix: circular imports
0cbdc88 HEAD@{50}: am: lexer: handle ' or " within the string properly
27699f9 HEAD@{51}: commit: docs: spec: add notation
```
Commits in Git are just data that is not associated by anything. If you accidentally delete a branch, the commits will stay where they are, and you can reference them directly. To recreate your deleted branch, simply run this command:
```
git checkout -b <branch> <sha>
```
And that's it! Your branch is restored. Remember to commit early and often, or prepare to loose your work!
This is post 015 of [#100DaysToOffload](https://100daystooffload.com/).

247
content/posts/2021-04-07-pgp-guide.md

@ -0,0 +1,247 @@
---
title: A pretty good guide to pretty good privacy
date: "2021-04-07"
---
In the past week, I've been experimenting with PGP, or GPG in particular. In a nutshell, PGP is an encryption standard with a wide range of use cases. For quite some time, I didn't see the point of keeping a PGP keypair. It seemed like a burden to securely keep track of the key(s). Once you loose it, you will loose the trust of others. But after doing some research on the topic, I found that it's not actually that much of a hassle, while giving you many benefits.
# The Why
The most obvious benefit is encrypting and decrypting messages and files. If you upload your public key, I can encrypt our private conversations. Nobody will be able to read what we're chatting about. If you fear that cloud providers will read through your documents, you can also go ahead and encrypt all of your data with your keypair.
But PGP is not just about encryption. A keypair also gives you a proof of identity. If I see that a piece of work is signed by you, I can be certain that you and only you have worked on this. By signing the keys of people we trust, we build a "chain of trust". A key with many signatures generally has a higher reputation than one without any signatures.
Take Git commits for example. All it takes is a `git config user.email "elon@spacex.com"` and I can publish code under a different identity. But if everyone on the team signed their work, they will quickly see that a commit is missing its signature, because I'm simply not able to sign my work with Elon Musk's keypair. Only if they see a badge like this, they will know that they can trust it.
Your keypair can also come in handy as a SSH key. Before I knew about PGP, I always had to install one key per machine I was working on. With PGP, you only have a single identity, and therefore you only have to install one key on your servers.
# The How
Let's first go over the process of setting up a keypair. For this, we will need the `gpg` command installed on our system. Usually, this is just a `<package manager> install gpg` away. Then, we will have to generate a keypair. The quickest way to get one is to use `gpg --gen-key`, but that will make some quirky assumptions about how you want to use your key.
In PGP, there is this concept of a **keyring**. A keyring has one master key and many subkeys. It is generally a good idea to have one fat master key that never expires and many small subkeys that last about a year or two. The benefit of structuring your keys like this is that you will always have your trusted keychain, and in case something goes south, E.g. your key gets compromised, you can replace that subkey and keep your identity.
With that in mind, let's create our master key. Run `gpg --full-gen-key` and follow the instructions. You probably want to use the `RSA and RSA (default)` option, and a key that is 4096 bits long (remember, this is the fat master key that never expires, so it must be secure). The comment can be left blank, unless you know what you are doing with that field. Enter a strong passphrase! If your private key were to get compromised, this passphrase is your last line of defense. Make it long, hard to crack but still rememberable. If everything went well, your key should be generated. Here's the full example output:
```
root@c6acc9eb4fd1:/# gpg --full-gen-key
gpg (GnuPG) 2.2.19; Copyright (C) 2019 Free Software Foundation, Inc.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Please select what kind of key you want:
(1) RSA and RSA (default)
(2) DSA and Elgamal
(3) DSA (sign only)
(4) RSA (sign only)
(14) Existing key from card
Your selection? 1
RSA keys may be between 1024 and 4096 bits long.
What keysize do you want? (3072) 4096
Requested keysize is 4096 bits
Please specify how long the key should be valid.
0 = key does not expire
<n> = key expires in n days
<n>w = key expires in n weeks
<n>m = key expires in n months
<n>y = key expires in n years
Key is valid for? (0)
Key does not expire at all
Is this correct? (y/N) y
GnuPG needs to construct a user ID to identify your key.
Real name: Foo
Name must be at least 5 characters long
Real name: Foo Bar
Email address: foo@bar.com
Comment:
You selected this USER-ID:
"Foo Bar <foo@bar.com>"
Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O
We need to generate a lot of random bytes. It is a good idea to perform
some other action (type on the keyboard, move the mouse, utilize the
disks) during the prime generation; this gives the random number
generator a better chance to gain enough entropy.
We need to generate a lot of random bytes. It is a good idea to perform
some other action (type on the keyboard, move the mouse, utilize the
disks) during the prime generation; this gives the random number
generator a better chance to gain enough entropy.
gpg: key C8E4854970B7A1A3 marked as ultimately trusted
gpg: revocation certificate stored as '/root/.gnupg/openpgp-revocs.d/4E83F95221E92EDB933F155AC8E4854970B7A1A3.rev'
public and secret key created and signed.
pub rsa4096 2021-04-07 [SC]
4E83F95221E92EDB933F155AC8E4854970B7A1A3
uid Foo Bar <foo@bar.com>
sub rsa4096 2021-04-07 [E]
```
You could stop here and use this key, but let's instead create some subkeys under that key, to make our lives a bit easier. Take the fingerprint of the key (that large number in the output) and run `gpg --edit-key --expert <your fingerprint>`. Run `addkey` three times to add these three keys:
## Signing key
This key will be used to sign your work (git commits, tags, etc.).
```
gpg> addkey
```
1. Choose option "RSA (set your own capabilities)", which is currently number 8.
1. Toggle E (Encryption) so the "Current allowed actions" only lists Sign and confirm with Q.
1. Choose the keysize 2048 (or whatever you prefer).
1. Choose the key expire date 1y (or whatever you prefer).
1. Confirm twice, then enter your passphrase.
## Encryption key
This key will be used to encrypt and decrypt messages.
```
gpg> addkey
```
1. Choose option "RSA (set your own capabilities)", which is currently number 8.
1. Toggle S (Sign) so the "Current allowed actions" only lists Encryption and confirm with Q.
1. Choose the keysize 2048 (or whatever you prefer).
1. Choose the key expire date 1y (or whatever you prefer).
1. Confirm twice, then enter your passphrase.
## Authentication key
This key will be used for SSH authentication.
```
gpg> addkey
```
1. Choose option "RSA (set your own capabilities)", which is currently number 8.
1. Toggle S (Signing), E (Encryption) and A (Authentication) so the "Current allowed actions" only lists Authenticate and confirm with Q.
1. Choose the keysize 2048 (or whatever you prefer).
1. Choose the key expire date 1y (or whatever you prefer).
1. Confirm twice, then enter your passphrase.
Now you should have one key per use case: signing, encrypting and authentication, each with an expiration date:
```
sec rsa4096/C8E4854970B7A1A3
created: 2021-04-07 expires: never usage: SC
trust: ultimate validity: ultimate
ssb rsa4096/C5F71423813B40A0
created: 2021-04-07 expires: never usage: E
ssb rsa2048/52D4D1D19533D8A5
created: 2021-04-07 expires: 2022-04-07 usage: S
ssb rsa2048/072D841844E3F949
created: 2021-04-07 expires: 2022-04-07 usage: E
ssb rsa2048/42E4F6E376DD92F6
created: 2021-04-07 expires: 2022-04-07 usage: A
[ultimate] (1). Foo Bar <foo@bar.com>
```
Save your key, and optionally upload it to one of the many keyservers:
```
gpg> save
$ gpg --keyserver keys.openpgp.org --send-keys foo@bar.com
```
**Pro tip**: To set a default keyserver (I use `keys.opengpg.org`, but there are many others out there!), simply add it in your `~/.gnupg/gpg.conf` file:
```
keyserver keys.openpgp.org
```
People can now import your public key via `gpg --keyserver keys.opengpg.org --search-keys foo@bar.com`.
We're done with the setup, let's put our keys to use!
## Code Signing
To sign your code, you will have to tell git which key to use. Edit your global git options (`~/.gitconfig`) and add these fields:
```
[commit]
gpgsign = true
[tag]
gpgsign = true
[user]
name = Foo Bar
signingkey = 52D4D1D19533D8A5 # Use the ID of your signing key
email = foo@bar.com
```
Now, whenever you add a commit, git will sign it with your key. You will have to let your git hosting provider know that this key is yours. Go to your account settings and look for a tab that says "Manage (GPG) keys". Where this tab is depends on your choice of service. Next, run `gpg --export --armor <your master key id>` and copy the resulting key into the input field of your git hosting service.
Whenever you push a commit, its signature will be checked against that of your account. And that's all the magic!
![A signed commit](/assets/signed_commit.png)
## Encrypting messages
In order to send an encrypted message to someone, you will need his public key. There are numerous ways to obtain a public key of someone. The simplest way is to ask the person for the raw key. If it's in a text file, you can import it like so:
```
cat some_key.txt | gpg --import
```
Oftentimes, people will store their keys on a keyserver, just like you have probably done it. To import someones key, simply search for it on a keyserver. I'll use my key here as an example.
```
gpg --keyserver keys.openpgp.org --search-keys garrit@slashdev.space
```
Now, your computer should know about my key. To verify that it's actually me you have imported, you can check if the output of `gpg --fingerprint garrit@slashdev.space` matches my actual fingerprint: `2218 337E 54AA 1DBE 207B 404D BB54 AF7E B093 9F3D`.
Optionally, if you trust that the key is actually associated to me, you can sign it. This let's other people know that you trust me, which helps build a so called "chain of trust". A key which has been signed by many people is generally more trustworthy than one that has no signatures.
```
gpg --sign-key garrit@slashdev.space
```
Now, let's encrypt a message that only I will be able to read:
```
printf "If you can read this, you've successfully decrypted this message" | gpg --encrypt --sign --armor -r garrit@slashdev.space
```
Feel free to send this message to my email-address, I'm happy to chat with you!
Decrypting something is as easy as encrypting something. Say the encrypted message lives in `message.txt.asc`. If you are the recipient, all you have to do is to run `gpg --decrypt message.txt.asc`.
## SSH
Your PGP key can also be used as an SSH key to authenticate on your servers.
First we need to add the following to `~/.gnupg/gpg-agent.conf` to enable SSH support in gpg-agent.
```
enable-ssh-support
```
Next, we'll need to tell gpg which key to use. We need to get the so called `keygrip` of your authentication key and add it to the `~/.gnupg/sshcontrol`. The keygrip can be obtained by running `gpg -K --with-keygrip`. Just copy the keygrip of the authentication key and paste it into the `~/.gnupg/sshcontrol` file.
Then, we want the ssh agent to know where to look for the key. Put this in your `.bashrc` file (or corresponding config):
```
export GPG_TTY=$(tty)
export SSH_AUTH_SOCK=$(gpgconf --list-dirs agent-ssh-socket)
gpgconf --launch gpg-agent
```
Then, run `ssh-add -l` to load the key directly.
To get the public ssh key of your keypair, run this command:
```
gpg --export-ssh-key foo@bar.com
```
and add the output to the `~/.ssh/authorized_keys` file on your server. When signing in, you should be prompted to enter the passphrase of your key and then authenticated.
## Closing thoughts
I hope by now you see the benefits you gain from having a PGP keypair. Whether you find it useful enough to set one up is of course up to you. It is however a good practice to at least sign your git commits as a proof of identity. There are services like [Keyoxide](https://keyoxide.org) that let you keep a "public record" of your key, so that other people can verify your identity more easily. If you set up your key, let me know by sending an encrypted message!
This is post 016 of [#100DaysToOffload](https://100daystooffload.com/).

8
content/posts/_2021-01-14-diy-software.md

@ -0,0 +1,8 @@
---
title: DIY Software
date: "2021-01-14"
---
We often take free software as granted. We think of many programs are a "black box" that are supposed to _just work_. I think, this goes against the very nature of free and open source software.
This is post 004 of [#100DaysToOffload](https://100daystooffload.com/).

12
content/posts/_2021-01-22-library-of-babel.md

@ -0,0 +1,12 @@
---
title: The library of babel
date: "2021-01-22"
---
What if I told you that everything you ever have said, thought and wrote, and everything you will say, think or write, has already been written down? Let me share my discovery of a construct called the _library of babel_.
The library of babel is a
## Real world use cases
This is post 006 of [#100DaysToOffload](https://100daystooffload.com/).

37
content/posts/_2021-01-25-kotlin-refactor-strategies.md

@ -0,0 +1,37 @@
---
title: Fixing nullability issues when migrating to Kotlin
date: "2021-01-23"
---
The last couple of days, I had the opportunity to refactor some of the Kotlin code in our massive android project. The app is quite old, and a lot of legacy code has accumulated over the years. At one point, we decided to migrate a lot of our view-logic from Java to Kotlin. Instead of cleaning the code up, this lead to an even bigger buildup of technical debt. The problem was that we mainly used the migration-tools that Android Studio provides, which, at that time where not very sophisticated. Sometimes we had to manually fix some things which, since it is such a massive codebase, we didn't really take the time to clean every converted file up.
Once the number of warnings approached infinity, we finally decided to dedicate a chunk of each sprint to house-cleaning in our codebase. My recent refactoring involved cleaning up unsafe usages, and I want to share some of my approaches to this kind of refactoring.
## Obvious fixes
I encountered this kind of pattern a number of times during my refactoring:
```kt
textView!!.setOnClickListener { showDialog() }
```
If you see something like this, your alarm bells should immediately ring. If textView is `null`, the app would crash. The same behavior is happening with java, so a switch from Java to kotlin would be pointless.
The simple fix is to replace the `!!` operator with a `?`, which only executes the line of code if the element is not null.
```kt
textView?.setOnClickListener { showDialog() }
```
Of course, if the view _is_ null even though you're expecting a value, you should probably do something about this. A simple way to handle this is to add some "sanity checks" to your code. Create an `assert` function that throws an `AssertionError` if a given condition is false. Unfortunately, android does not use the builtin `assert` function that kotlin provides, so you will have to write your own. The neat thing about `assert` is that you can toggle this behavior, meaning you can enable assertion errors in the debug build, but disable it in the final production build to prevent the app from randomly crashing.
```kt
assert(textView != null)
textView?.setOnClickListener { showDialog() }
```
## More complex cases
Let's take a look at this code: ...
This is post 007 of [#100DaysToOffload](https://100daystooffload.com/).

81
content/posts/fighting-array-functions-with-es6.md

@ -0,0 +1,81 @@
---
title: Fighting Array Functions with ES6
date: "2019-04-07"
---
Yesterday, I came across an interesting bug regarding JavaScript Arrays, and I wanted to share my approach to fixing it.
At a basic level, I wanted to pass part of an array to a function, but wanted to use the original array later on.
```js
let arr = [1, 2, 3, 4, 5]
let something = arr.splice(0, 3)
do(something) // []
DoSomethingWithOriginal(arr)
```
Thinking that Array.prototype functions don’t mutate the array directly, I moved on with my day. This lead to a bunch of problems down the line.
Some array methods in the EcmaScript specification are designed to mutate arrays, while others do not.
### Non-mutating functions
- Array.prototype.map()
- Array.prototype.slice()
- Array.prototype.join()
- …
These functions do not mutate the array they are called on. For example:
```js
let arr = [1, 2, 3, 4, 5];
let partOfArr = arr.slice(1, 2);
console.log(partOfArr); // [2, 3]
console.log(arr); // [1, 2, 3, 4, 5]
```
### Mutating functions
- Array.prototype.sort()
- Array.prototype.splice()
- Array.prototype.reverse()
- …
These methods mutate the array directly. This can lead to unreadable code, as the value can be manipulated from anywhere. For example:
```js
let arr = [5, 2, 4];
arr.sort();
console.log(arr); // [2, 4, 5]
```
To me, it is very unclear, which functions do, and which don’t mutate arrays directly. But, there’s a simple trick you can use to stop letting the functions mutate arrays directly, ultimately leading to more readable and reliable code.
## Enter: The ES6 Spread Operator!
![Spread Operator](https://images.unsplash.com/photo-1518297056586-889f796873e0?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1225&q=80)
Take a look at this snippet:
```js
let arr = [3, 5, 1, 2, 4];
let sorted = [...arr].sort();
console.log(arr); // [3, 5, 1, 2, 4]
console.log(sorted); // [1, 2, 3, 4, 5]
```
Voilà! We have a sorted array, and the original one is also around. The spread operator(`[...arr]`) is used to create a new array with every value of arr .
You can use this for arrays, as well as objects:
```js
let obj = {
field: "example",
};
let extendedObj = {
...obj,
anotherField: 42,
};
console.log(extendedObj.field); // "example"
```
## Conclusion
ES6 brought us awesome features like let and const assignments, as well as arrow functions. A more unknown feature however is the spread operator. I hope you now know how to use the spread operator, and that you can adopt it for cleaner and simpler code.

59
content/posts/introducing-slashdev-space.md

@ -0,0 +1,59 @@
---
title: Introducing my new blog - slashdev.space
date: "2020-10-08"
---
Hi! Welcome to the new home of my blog. Let me give you a small tour of why I built it, its underlying architecture and my ambitions with this project.
My old blog was based on [Gatsby.js](https://www.gatsbyjs.com/), a static site generator built on React. Back then, I used a quick and dirty blogging template I stole from the Gatsby themes page. Gatsby themes are essentially npm packages, that you throw in your project as a dependency. While it was super easy to set up, I had a hard time configuring it to my likings, since I relied on a framework someone else has provided.
The real turning point came, when I tried to build the blog after a few months of not maintaining it. I wasn't able to compile it, since some dependency of the blog template broke. Of course I could have forked the template and fixed it to my likings, but I didn't want to maintain yet another library until the end of my blogs life. You could draw some parallels to propriotary software, where you don't have the chance to look under the hood and see what's wrong, except in this case, it was just me being lazy.
## A new approach
What I want is a project that I have full control over. I want to be able to customize styling and add features whenever I want to. Gatsby would have been able to give me all of this, but I have the feeling that it sometimes overcomplicates things too much (Having a GraphQL backend is nice, but do you really need that?). I looked at frameworks like [Hugo](https://gohugo.io/) which offers lightning fast compilation, but with it, I would have been tied to "the Hugo way" of templating and configuring the project.
In the end it was [Next.js](https://nextjs.org/) that caught my attention the most, given how simple it is. There's not much configuration involved in the setup process (although getting it to work with github pages was somewhat tedious). Each component in the `pages/` directory corresponds to a full page on the website. The `public/` directory is served statically. That's really all I needed to build a modular webpage.
## Wiring things up
Because Next.js is so minimalistic, there are some parts that you have to set up by yourself. Rendering markdown files for example does not come included, it has to be done manually. Thankfully, there are some packages that can do this for you. All you have to do is write the markdown to the specific pages. It basically boils down to this:
```js
const posts = ((context) => {
const keys = context.keys();
const values = keys.map(context);
const data = keys.map((key, index) => {
// Create slug from filename
const slug = key
.replace(/^.*[\\\/]/, "")
.split(".")
.slice(0, -1)
.join(".");
const value = values[index];
// Parse yaml metadata & markdownbody in document
const document = matter(value.default);
return {
frontmatter: document.data,
markdownBody: document.content,
slug,
};
});
return data;
})(require.context("../content/posts", true, /\.md$/));
```
Another special case was setting up a RSS feed. I basically had to write a simple script that generates the feed from all posts in the `content/posts` directory, let it run during every build and throws the output in the `public/` directory, so that it can be served as `/rss.xml`. You might argue that this is quite a tedious process for such a feature, but it gives me all the flexibility I want over the features of this project.
## Deployment
I've considered self hosting this blog on my server. While that would have been a fun learning-experience, I wanted to stick to the simple deploy-and-forget workflow I was used to from GitHub Actions. Every push to the master branch triggers a full deployment. No manual work required. Doing it this way, I also save the time and energy to set up SSL encryption, plus it is highly scalable (not that I expect a traffic-explosion, but you never know). Setting Next.js up to deploy to GitHub Pages takes some time, because there are some pitfalls that you have to be aware of. [This article](https://dev.to/jameswallis/deploying-a-next-js-app-to-github-pages-24pn) helped me a lot.
## Ambitions
In the future, I want /dev.space to become more than just a blog. I want it to become a platform for my thoughts and ideas. I'm also playing with the idea to migrate my main website (https://garrit.xyz) over to /dev.space, therefore made sure that my current setup is very future-proof and can be easily extended.
Feel free to dig through the source code for this project. You can find it on my GitHub: https://github.com/garritfra/slashdev.space
Let me know if there is anything that you miss on this blog. Searchable posts? Dark-mode? This is only the beginning of /dev.space!

83
content/posts/lightweight-vpn-with-wireguard.md

@ -0,0 +1,83 @@
---
title: Quick Tip! Setting up a lightweight Server-Client VPN with wireguard
date: "2020-08-19"
---
This blog post has been taken over from my [collection of "Today I Learned" articles](https://garrit.xyz/til).
You can easily set up a private network of your devices. This way you can "talk" to your phone, raspberry pi etc. over an **encrypted** network, with simple IP-addresses.
![](https://images.unsplash.com/photo-1505659903052-f379347d056f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2550&q=80)
Firstly, install wireguard on all of your systems. Simply install the `wireguard` package from your package manager respectively. Check out [the official installation guide](https://www.wireguard.com/install/) if you can't find the package. If you're on debian, try [this](https://wiki.debian.org/WireGuard?action=show&redirect=Wireguard) guide. There's also an app for Android, iOS and MacOS.
Every participent (Client and server) needs a key-pair. To generate this, run this command first on the server, and on all clients:
```bash
wg genkey | tee wg-private.key | wg pubkey > wg-public.key
```
It might make sense to do this as root. This way you don't have to type `sudo` with every command.
## Server Configuration
You will need to create a configuration for the server. Save this template at `/etc/wireguard/wg0.conf`, and replace the fields where needed:
```conf
[Interface]
PrivateKey = <Server private key from wg-private.key>
Address = 10.0.0.1/24 # IP Address of the server. Using this IP Address, you can assign IPs ranging from 10.0.0.2 - 10.0.0.254 to your clients
ListenPort = 51820 # This is the standard port for wireguard
# The following fields will take care of routing
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
# Laptop
[Peer]
PublicKey = <Public Key of Laptop Client>
AllowedIPs = 10.0.0.2/32 # The client will be reachable at this address
# Android Phone
[Peer]
PublicKey = <Public Key of Phone Client>
AllowedIPs = 10.0.0.3/32
# ...
```
Then run `wg-quick up wg0` to start the wireguard interface with the configuration from `/etc/wireguard/wg0`.
## Setting up clients
Setting up clients is very similar to the server setup process. Generate a keypair on each client, save the following config to `/etc/wireguard/wg0.conf` and replace the nessessary fields:
```conf
[Interface]
PrivateKey = <Client Private Key from wg-private.key>
Address = 10.0.0.2/32 # The fixed address of the client. Needs to be specified in the server config as well
[Peer]
PublicKey = <Server Public key>
AllowedIPs = 10.0.0.0/24 # Routes all traffic in this subnet to the server. If you want to tunnel all traffic through the wireguard connection, use 0.0.0.0/0 here instead
Endpoint = <Public Server IP>:51820
PersistentKeepalive = 25 # Optional. Will ping the server every 25 seconds to remain connected.
```
On every client, run `wg-quick up wg0` to start the interface using the config at `/etc/wireguard/wg0.conf`.
This whole proccess might be easier on GUIs like Android or MacOS.
Now, try to ping your phone from your laptop:
```
ping 10.0.0.3
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
64 bytes from 10.0.0.3: icmp_seq=1 ttl=64 time=5382 ms
64 bytes from 10.0.0.3: icmp_seq=2 ttl=64 time=4364 ms
```
### References
- [Official Documentation](https://www.wireguard.com/)
- [https://www.stavros.io/posts/how-to-configure-wireguard/](https://www.stavros.io/posts/how-to-configure-wireguard/)

63
content/posts/patch-based-git-workflow.md

@ -0,0 +1,63 @@
---
title: The Patch-Based Git Workflow
date: "2020-09-28"
---
If you have ever contributed to an open source project, chances are you have opened a pull request on GitHub or a similar platform to present your code to the maintainers. While this is a very approachable way of getting your code reviewed, some projects have decided against using pull requests and instead accept patches via email.
## An introduction to patches
A patch is essentially a git commit expressed in plain text. It describes what commit the change is based on, and what has changed. A basic patch looks like this:
```
From 92132241233033a123c4fa833449d6a0d550219c Mon Sep 17 00:00:00 2001
From: Bob <bob@example.com>
Date: Tue, 25 May 2009 15:42:16 +0200
Subject: [PATCH 1/2] first change
---
test.txt | 1 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/test.txt b/test.txt
index 7634da4..270eb95 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1 @@
-Hallo Bob
+Hallo Alice!
```
As you can see, it is very readable for both the reviewer and the machine.
## Sending and receiving patches
The easiest way you can generate a patch from a commit is to use `git-format-patch`:
```
git format-patch HEAD^
```
This will generate a `.patch` file, that can be embedded into an email and sent to the maintainers. Oftentimes they will then reply to your mail with some inline comments about your code.
To simplify this process further, git has the `send-email` command, which let's you send the patch directly to someone without needing to embed it manually. I won't go into details about this, but there is a [well written guide](https://git-send-email.io/) on how to set it up.
If you have received a patch from someone, you can apply it to your tree with the `am` (apply mail) command:
```
git am < 0001-first-change.patch
```
check your `git log` to see the patch in form of the latest commit.
## Why even bother
You might think that this is just a silly and outdated approach to collaborative development. "Why not simply open a pull request?" you might ask. Some projects, especially low-level oriented ones like the Linux kernel, do not want to rely on third-party platforms like GitHub to host their code, with good reasons:
1. Everyone can participate! You don't need to register an account on some proprietary website to collaborate in a project that uses a patch-based workflow. You don't even have to expose your identity, if you don't want to. All you need is an email-address, and frankly most of us have one.
2. It's plain simple! Once you get used to generating and applying patches on the command line, it is in fact easier and faster than opening a pull request in some clunky GUI. It doesn't get simpler than plain text.
3. It is rewarding! Once you have submitted a patch to a project, there is no better feeling than getting a simple "Applied, thanks!" response from a maintainer. And if it's a response that contains feedback rather than an approval, it feels even better to submit that reworked code again and get it eventually applied.
## Conclusion
The patch-based workflow is an alternative way to collaborate with developers. If it helps you in your day to day business depends on the projects you are contributing to, but in the end it is always good to have many tools under your belt.

28
content/posts/quick-tip-terminal-pastebin.md

@ -0,0 +1,28 @@
---
title: Quick Tip! Sharing terminal output with Termbin
date: "2019-12-31"
---
Ever find yourself in a situation where you simply want to save or share the output of a terminal command? Selecting, copying and pasting text from stdout always feels quite tedious, if you just want to share the contents of a file.
A project called [Termbin](https://termbin.com/) tries to simplify this process. Just pipe the command you want to save to the following url on port `9999`, using Netcat:
```sh
echo "Hello, Blog!" | nc termbin.com 9999
```
Instead of showing the output, it will be forwarded to Termbin and show the URL, under which your output will be available:
```sh
➜ blog git:(master) ✗ cat ./some_file.txt | nc termbin.com 9999
https://termbin.com/faos
➜ blog git:(master) ✗
```
Sure enough, after navigating to [`https://termbin.com/faos`](https://termbin.com/faos), we will see the contents of `some_file.txt`. Neat!
### ⚠Word of Caution⚠
Do not pipe any personal information, credentials or any other private data into termbin. It will be instantly available to the general public, and theres no quick way to remove it.
Happy Pasting!✨

120
content/posts/testing-isnt-hard.md

@ -0,0 +1,120 @@
---
title: Testing isn't hard
date: "2019-11-08"
---
"I write two tests before implementing a method", "My project has 90% coverage".
I don't know about you, but that's something I don't hear very often. But why is that?
Testing is not even that difficult to do, but yet it is always coming short in my projects. About a year ago, I've tried to implement tests in my React applications with little success, mostly because integrating `enzyme` and configuring it correctly is not that intuitive as a relatively new developer. I want to share my (partly opinionated) approach to JavaScript testing with `jest`, to get you started. In a later post I will demonstrate a way to implement `enzyme` into your React projects.
# The basics of testing JavaScript functions
To get started, you need a npm-project. I don't think I have to explain that, but just in case:
```bash
mkdir awesome-testing-project
cd awesome-testing-project
npm init -y
```
Of course, we need a unit we want to test. What about a method that returns the first element of an array?
```js
module.exports = function firstElement(arr) {
return arr[1];
};
```
You already spotted a bug, huh? Let's keep it simple for now.
Install and initialize Jest, an open-source testing framework maintained by Facebook. When initializing, you should check every question with `y`.
```bash
npm i --save-dev jest
npx jest --init
```
Next up, we need to define our first test. Conventionally, we create a folder named `__tests__` in the directory of the module we want to test. inside it, there should be a file named `<module>.test.js`. Something like this:
```bash
▶ tree
.
├── package.json
└── src
├── __tests__
   └── firstElement.test.js
└── firstElement.js
```
Jest provides global functions that do not need to be imported in a file. A simple test can look like this:
```js
const firstElement = require("../firstElement.js");
test("firstElement gets first element of array", () => {
expect(firstElement([1, 2])).toBe(1);
});
```
`expect` is another word for `assert`. If you ask me, "Expect firstElement of [1, 2] to be 1" sounds reasonably english, doesn't it? After defining the test, all there is to do left is to run the `npm test` command, which has been created for us by running `npx jest --init` earlier.
```bash
▶ npm test
> jest
FAIL src/__tests__/firstElement.test.js
✕ firstElement (6ms)
● firstElement
expect(received).toBe(expected) // Object.is equality
Expected: 1
Received: 2
2 |
3 | test('firstElement', () => {
> 4 | expect(firstElement([1, 2])).toBe(1);
| ^
5 | });
6 |
at Object.<anonymous>.test (src/__tests__/firstElement.test.js:4:32)
Test Suites: 1 failed, 1 total
Tests: 1 failed, 1 total
Snapshots: 0 total
Time: 1.1s
Ran all test suites.
npm ERR! Test failed. See above for more details.
```
Whoops! Looks like we have found a bug! Let's fix it by adjusting the index of the return value in the firstElement function:
```js
module.exports = function firstElement(arr) {
return arr[0];
};
```
And after rerunning `npm test`:
```bash
▶ npm test
> jest
PASS src/__tests__/firstElement.test.js
✓ firstElement (4ms)
Test Suites: 1 passed, 1 total
Tests: 1 passed, 1 total
Snapshots: 0 total
Time: 0.666s, estimated 2s
Ran all test suites.
```
Yay, your first unit test! Of course, there is much more to find out about the Jest framework. To see a full guide, read the [official docs](https://jestjs.io/).
I have prepared a [template repository](https://github.com/garritfra/react-parcel-boilerplate) for building react apps. It also uses Jest to run tests, you don't have to worry about a thing! If you found this interesting, consider checking out my other blog posts, and/or check out my [GitHub](https://github.com/garritfra)!

40
content/posts/whom-do-you-trust.md

@ -0,0 +1,40 @@
---
title: Whom do you trust?
date: "2020-03-17"
---
Nowadays, password managers are a necessity if you care even the slightest about your personal belongings on the interwebs. But think about it, do you really want to trust another company to store your most sensitive information?
![](https://images.unsplash.com/photo-1522251670181-320150ad6dab?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2566&q=80)
##### TL;DR
Use a **stateless** password manager like [LessPass](https://lesspass.com/) to access your password without relying on a third party to store your data.
## Why use a password manager in the first place?
Having a single password for multiple accounts is convenient. What's also convenient, is **using** this password on multiple accounts once you have access to a single one. What might be convenient to you, might also be convenient to others. Many people, [especially celebrities](https://web.archive.org/web/20170225163642/http://uk.businessinsider.com/twitter-says-it-wasnt-hacked-passwords-reused-older-hacks-malware-to-blame-2016-6), fall victim to this trap of comfort.
To counteract this, people are (and should be) using different passwords for different accounts. These passwords differ in a single letter or digit (Twitter: `porridge4president`, GitHub: `poridge5president`, etc.), or they don't match at all (Twitter: `porridge4president`, GitHub: `YouWontGuessThisOne`).
The problem that most likely arises from this technique is called _password chaos_ ([Source](https://encyclopedia2.thefreedictionary.com/password+chaos)):
> _"The confusion that arises when users have many unique passwords."_
The aim of a password manager is [to solve this problem by storing all of your passwords in a single place and securing it with an _ultra secure superpassword!_ (©)](https://www.businessinsider.com/how-to-use-password-manager-store-protect-yourself-hackers-lastpass-1password-dashlane-2017-2?r=DE&IR=T). This way, you can use arbitrary passwords - preferebly gibberish that doesn't make sense to humans (nor machines) - without losing them, as long as you have your _ultra secure superpassword!_ (©) - aka your masterpassword. The benefits are obvious: You get rid of the password chaos problem while staying relatively secure. Eventhough password managers are quite benificial, some people (including myself) see a catch in them.
## Relying on a third party
Relying on third party companies doesn't seem like a big deal. After all, you are probably using some form of cloud service to host your photos. Yet there's a lot of trust involved in letting others handle your private data, especially your passwords. In 2017, [a major password manager got hacked, exposing sensitive data including users and their passwords](https://www.zdnet.com/article/onelogin-hit-by-data-breached-exposing-sensitive-customer-data/). This shows the potential for security breaches in an application that inherently seemed safe. But what if I told you that there is an alternative to this? A password manager that does not store your data at all?
## A Stateless password manager
Recently, I stumbled across [LessPass](https://lesspass.com/#/). LessPass is a password manager that is very different from what I have seen so far. Instead of storing passwords that either you or a random password generator came up with, it computes passwords on the fly, given a website, username, your masterpassword and some options.
![LessPass](../assets/lesspass.gif)
The key here is that LessPass uses a **pure function**, i.e. a function that given the same parameters will always give the same result. Using this technique, there's no need to store your password in a database owned by a large company, nor do you have to sync your passwords between your devices ([but there's an app](https://play.google.com/store/apps/details?id=com.lesspass.android&hl=de)). The computation happens right on your machine, and **only** on your machine. If you want to find out more about how it works under the hood, you can check out [the authors blog post](https://blog.lesspass.com/lesspass-how-it-works-dde742dd18a4#.vbgschksh). He goes into great detail on what alorithm is used to compute your passwords and how to utilize every feature of LessPass.
## Conclusion
Being a little privacy nerd myself, I often think twice about what services I want to use, often even looking into self-hosted alternatives to major products. There are multiple products that offer self hosted solutions to store your passwords, however I also don't even trust _myself_ with such sensitive data either. LessPass eliminates the need to have a third party watch over your data, let alone to store it on their servers.

1
favicon.svg

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="256" viewBox="0 0 100 100"><rect width="100" height="100" rx="0" fill="#000000"></rect><path d="M36.76 18.00L42.76 18.00L18.28 82L12.28 82L36.76 18.00ZM68.04 77.20L68.04 77.20Q63.08 77.20 58.92 74.72Q54.76 72.24 52.28 67.56Q49.80 62.88 49.80 56.40L49.80 56.40Q49.80 49.76 52.44 45.12Q55.08 40.48 59.40 38.04Q63.72 35.60 68.68 35.60L68.68 35.60Q73.64 35.60 77.28 38.16Q80.92 40.72 82.12 44.96L82.12 44.96L81.32 45.52L81.32 18.00L87.72 18.00L87.72 76.40L82.12 76.40L81.56 66.72L82.44 66.24Q81.88 69.76 79.76 72.20Q77.64 74.64 74.56 75.92Q71.48 77.20 68.04 77.20ZM68.68 71.60L68.68 71.60Q72.52 71.60 75.36 69.68Q78.20 67.76 79.76 64.20Q81.32 60.64 81.32 55.84L81.32 55.84Q81.32 51.28 79.76 48Q78.20 44.72 75.44 42.96Q72.68 41.20 69 41.20L69 41.20Q62.92 41.20 59.56 45.36Q56.20 49.52 56.20 56.40L56.20 56.40Q56.20 63.28 59.48 67.44Q62.76 71.60 68.68 71.60Z" fill="#fff"></path></svg>

Before

Width:  |  Height:  |  Size: 941 B

93
fonts/work-sans/OFL.txt

@ -1,93 +0,0 @@
Copyright 2019 The Work Sans Project Authors (https://github.com/weiweihuanghuang/Work-Sans)
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at:
http://scripts.sil.org/OFL
-----------------------------------------------------------
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
-----------------------------------------------------------
PREAMBLE
The goals of the Open Font License (OFL) are to stimulate worldwide
development of collaborative font projects, to support the font creation
efforts of academic and linguistic communities, and to provide a free and
open framework in which fonts may be shared and improved in partnership
with others.
The OFL allows the licensed fonts to be used, studied, modified and
redistributed freely as long as they are not sold by themselves. The
fonts, including any derivative works, can be bundled, embedded,
redistributed and/or sold with any software provided that any reserved
names are not used by derivative works. The fonts and derivatives,
however, cannot be released under any other type of license. The
requirement for fonts to remain under this license does not apply
to any document created using the fonts or their derivatives.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this license and clearly marked as such. This may
include source files, build scripts and documentation.
"Reserved Font Name" refers to any names specified as such after the
copyright statement(s).
"Original Version" refers to the collection of Font Software components as
distributed by the Copyright Holder(s).
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to a
new environment.
"Author" refers to any designer, engineer, programmer, technical
writer or other person who contributed to the Font Software.
PERMISSION & CONDITIONS
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Font Software, to use, study, copy, merge, embed, modify,
redistribute, and sell modified and unmodified copies of the Font
Software, subject to the following conditions:
1) Neither the Font Software nor any of its individual components,
in Original or Modified Versions, may be sold by itself.
2) Original or Modified Versions of the Font Software may be bundled,
redistributed and/or sold with any software, provided that each copy
contains the above copyright notice and this license. These can be
included either as stand-alone text files, human-readable headers or
in the appropriate machine-readable metadata fields within text or
binary files as long as those fields can be easily viewed by the user.
3) No Modified Version of the Font Software may use the Reserved Font
Name(s) unless explicit written permission is granted by the corresponding
Copyright Holder. This restriction only applies to the primary font name as
presented to the users.
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
Software shall not be used to promote, endorse or advertise any
Modified Version, except to acknowledge the contribution(s) of the
Copyright Holder(s) and the Author(s) or with their explicit written
permission.
5) The Font Software, modified or unmodified, in part or in whole,
must be distributed entirely under this license, and must not be
distributed under any other license. The requirement for fonts to
remain under this license does not apply to any document created
using the Font Software.
TERMINATION
This license becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
OTHER DEALINGS IN THE FONT SOFTWARE.

BIN
fonts/work-sans/WorkSans-Italic-VariableFont_wght.ttf

Binary file not shown.

BIN
fonts/work-sans/WorkSans-VariableFont_wght.ttf

Binary file not shown.

20
gen-post.sh

@ -0,0 +1,20 @@
#!/bin/sh
# This script generates a new blog post.
# Example usage:
# ./contrib/gen-post.sh My first post
DATE=$(date +"%Y-%m-%d")
TITLE="$@"
FILE_TITLE=$(printf "$TITLE" | tr " " "-" | tr "[A-Z]" "[a-z]")
FILE_NAME="$DATE-$FILE_TITLE.md"
FULL_PATH="content/posts/$FILE_NAME"
cat > $FULL_PATH <<EOF
---
title: $TITLE
date: "$DATE"
---
EOF

131
index.html

@ -1,131 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="description" content="Where developers can collectively learn and grow.">
<meta name="keywords" content="Developers, Dev, Community, Open, Free">
<title>/dev.space</title>
<link rel="stylesheet" href="styles/index.css" />
<link rel="shortcut icon" href="favicon.svg" type="image/svg+xml" />
<!-- Plausible analytics -->
<script async defer data-domain="slashdev.space" src="https://analytics.slashdev.space/js/plausible.js"></script>
</head>
<body>
<section class="welcome">
<div class="welcome__container">
<h1 class="welcome__text-area__heading">/dev.space</h1>
<h3 class="welcome__text-area__subheading">
Where developers can collectively learn and grow.
</h3>
<p class="description">
Whether you're freshly starting out, or you are just looking
to connect with likeminded people. /dev.space is the
community you've been looking for.
</p>
</div>
</section>
<section class="services">
<div class="services__container">
<h2 class="services__heading">Our Services</h2>
<p class="description">
Our suite of online services helps you turn your project
ideas into reality.
</p>
<div class="services__offers">
<div class="services__offers__card">
<h1 class="services__offers__card__heading">Chat</h1>
<p class="services__offers__card__description">
We're hosting our own Matrix server to keep in touch
and share ideas.
</p>
<a
href="https://matrix.slashdev.space"
class="button__link"
>
<p>Join a room</p>
</a>
</div>
<div class="services__offers__card">
<h1 class="services__offers__card__heading">
Code Hosting
</h1>
<p class="services__offers__card__description">
We use Gitea to collaboratively work on our code.
</p>
<a
href="https://git.slashdev.space"
class="button__link"
>
<p>Set up your account</p>
</a>
</div>
<div class="services__offers__card">
<h1 class="services__offers__card__heading">Boards</h1>
<p class="services__offers__card__description">
We provide free Kanban boards to plan your projects.
</p>
<a
href="https://boards.slashdev.space"
class="button__link"
>
<p>Get agile</p>
</a>
</div>
</div>
</div>
</section>
<section class="contact">
<div class="contact__container">
<h2>How to join</h2>
<p>
/dev.space is a free and open community. Anyone and everyone
is welcome. Simply join our Matrix server and chat with us.
Already got an account on another instance? Great!
</p>
<a
class="button__link button__contact"
href="https://matrix.to/#/#lobby:matrix.slashdev.space"
><p>#lobby:matrix.slashdev.space</p></a
>
</div>
</section>
<section class="support-us">
<div class="support-us__container">
<h2>Support Us</h2>
<p>
The services on this platform are 100% free of charge.
Hosting servers does have its price though. Consider
supporting us by leaving a small donation.
</p>
<div class="support-us__donations__container">
<a
class="button__link support-us__donations__button"
href="https://donate.slashdev.space"
><p>Donate with Fosspay</p></a
>
<a
class="button__link support-us__donations__button"
href="https://liberapay.com/slashdev.space/donate"
><p>Donate with Liberapay</p></a
>
</div>
</div>
</section>
</body>
<footer>
Copyright © 2022 <a href="https://garrit.xyz">Garrit Franke</a> and /dev.space contributers.<br />
This website is licensed under the
<a
href="https://git.slashdev.space/garrit/slashdev.space-landing/src/branch/main/LICENSE"
>MIT License</a
>. The source code for this website can be found
<a href="https://git.slashdev.space/garrit/slashdev.space-landing"
>here</a
>.
</footer>
</html>

93
lib/rss.js

@ -0,0 +1,93 @@
const fs = require("fs");
const path = require("path");
const matter = require("gray-matter");
const rfc822Date = require("rfc822-date");
const markdown = require("markdown").markdown;
const files = fs
.readdirSync(path.join(__dirname, "../content/posts"))
// Filter subdirectories
.filter(
(p) =>
!fs.lstatSync(path.join(__dirname, "../content/posts", p)).isDirectory()
)
.map((filename) => {
return {
filename,
content: fs
.readFileSync(path.join(__dirname, "../content/posts", filename))
.toString(),
};
});
const keys = Array.from(files.keys());
const posts = files.map((file) => {
// Create slug from filename
const slug = file.filename
.replace(/^.*[\\\/]/, "")
.split(".")
.slice(0, -1)
.join(".");
// Parse yaml metadata & markdownbody in document
const document = matter(file.content);
return {
frontmatter: document.data,
markdownBody: document.content,
slug,
};
});
const getRssXml = (blogPosts) => {
const { rssItemsXml, latestPostDate } = blogPostsRssXml(blogPosts);
return `<?xml version="1.0" ?>
<rss version="2.0">
<channel>
<title>/dev.space</title>
<link>https://blog.garrit.xyz</link>
<description>/dev.space</description>
<language>en</language>
<lastBuildDate>${rfc822Date(
new Date(latestPostDate)
)}</lastBuildDate>
${rssItemsXml}
</channel>
</rss>`;
};
const blogPostsRssXml = (blogPosts) => {
let latestPostDate = "";
let rssItemsXml = "";
blogPosts
.filter((post) => !post.slug.startsWith("_"))
// Ternary operator is used to fix chromium sorting
// See: https://stackoverflow.com/a/36507611
.sort((a, b) => (a.frontmatter.date < b.frontmatter.date ? 1 : -1))
.forEach((post) => {
const postDate = Date.parse(post.frontmatter.date);
if (!latestPostDate || postDate > Date.parse(latestPostDate)) {
latestPostDate = post.frontmatter.date;
}
rssItemsXml += `
<item>
<title>${post.frontmatter.title}</title>
<link>
https://blog.garrit.xyz/posts/${post.slug}
</link>
<pubDate>${rfc822Date(new Date(postDate))}</pubDate>
<description>
<![CDATA[${markdown.toHTML(post.markdownBody)}]]>
</description>
</item>`;
});
return {
rssItemsXml,
latestPostDate,
};
};
const feedPath = path.join(__dirname, "../public/rss.xml");
fs.writeFileSync(feedPath, getRssXml(posts), { flag: "w" });

12
next.config.js

@ -0,0 +1,12 @@
const isProd = process.env.NODE_ENV === 'production'
module.exports = {
assetPrefix: isProd ? 'https://blog.garrit.xyz' : '',
webpack: function (config) {
config.module.rules.push({
test: /\.md$/,
use: "raw-loader",
});
return config;
},
};

13393
package-lock.json generated

File diff suppressed because it is too large Load Diff

21
package.json

@ -0,0 +1,21 @@
{
"name": "slashdev.space",
"version": "0.1.0",
"private": true,
"scripts": {
"start": "npm run build:rss && next dev",
"build": "npm run build:rss && next build && next export",
"build:rss": "node lib/rss.js"
},
"dependencies": {
"glob": "^7.1.6",
"gray-matter": "^4.0.2",
"markdown": "^0.5.0",
"next": "9.5.3",
"raw-loader": "^4.0.2",
"react": "16.13.1",
"react-dom": "16.13.1",
"react-markdown": "^4.3.1",
"rfc822-date": "0.0.3"
}
}

5
pages/_app.js

@ -0,0 +1,5 @@
function MyApp({ Component, pageProps }) {
return <Component {...pageProps} />;
}
export default MyApp;

208
pages/posts/[post].js

@ -0,0 +1,208 @@
import matter from "gray-matter";
import ReactMarkdown from "react-markdown";
import Layout from "../../components/Layout";
import glob from "glob";
export default function BlogTemplate(props) {
function reformatDate(fullDate) {
const date = new Date(fullDate);
return date.toDateString().slice(4);
}
/*
** Odd fix to get build to run
** It seems like on first go the props
** are undefined could be a Next bug?
*/
if (!props.frontmatter) return <></>;
return (
<Layout siteTitle={props.frontmatter.title}>
<article className="blog">
<div className="blog__info">
<h1>{props.frontmatter.title}</h1>
<h3>{reformatDate(props.frontmatter.date)}</h3>
</div>
<div className="blog__body">
<ReactMarkdown source={props.markdownBody} />
<p>
If you enjoyed this post, consider{" "}
<a href="https://donate.slashdev.space">buying me a coffee</a>! Got
comments? Drop a mail in my{" "}
<a href="https://lists.sr.ht/~garritfra/public-inbox">
public inbox
</a>
, or send me a message on{" "}
<a href="https://matrix.to/#/@garrit:matrix.slashdev.space">
Matrix
</a>
.
</p>
</div>
<div className="blog__footer">
<h2>Written By: Garrit Franke</h2>
</div>
</article>
<style jsx>
{`
.blog h1 {
margin-bottom: 0.7rem;
}
.blog__hero {
min-height: 300px;
height: 60vh;
width: 100%;
margin: 0;
overflow: hidden;
}
.blog__hero img {
margin-bottom: 0;
object-fit: cover;
min-height: 100%;
min-width: 100%;
object-position: center;
}
.blog__info {
padding: 1.5rem 1.25rem;
width: 100%;
max-width: 768px;
margin: 0 auto;
}
.blog__info h1 {
margin-bottom: 0.66rem;
}
.blog__info h3 {
margin-bottom: 0;
}
.blog__body {
width: 100%;
padding: 0 1.25rem;
margin: 0 auto;
display: flex;
flex-direction: column;
justify-content: center;
}
.blog__body a {
padding-bottom: 1.5rem;
}
.blog__body:last-child {
margin-bottom: 0;
}
.blog__body h1 h2 h3 h4 h5 h6 p {
font-weight: normal;
}
.blog__body ul {
list-style: initial;
}
.blog__body ul ol {
margin-left: 1.25rem;
margin-bottom: 1.25rem;
padding-left: 1.45rem;
}
.blog__footer {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1.5rem 1.25rem;
width: 100%;
max-width: 800px;
margin: 0 auto;
}
.blog__footer h2 {
margin-bottom: 0;
}
.blog__footer a {
display: flex;
justify-content: space-between;
align-items: center;
}
.blog__footer a svg {
width: 20px;
}
@media (max-width: 768px) {
.blog__footer {
display: none;
}
}
@media (min-width: 768px) {
.blog {
display: flex;
flex-direction: column;
}
.blog__body {
max-width: 800px;
padding: 0 2rem;
}
.blog__body span {
width: 100%;
margin: 1.5rem auto;
}
.blog__body ul ol {
margin-left: 1.5rem;
margin-bottom: 1.5rem;
}
.blog__hero {
min-height: 600px;
height: 75vh;
}
.blog__info {
text-align: center;
padding: 2rem 0;
}
.blog__info h1 {
max-width: 500px;
margin: 0 auto 0.66rem auto;
}
.blog__footer {
padding: 2.25rem;
}
}
@media (min-width: 1440px) {
.blog__hero {
height: 70vh;
}
.blog__info {
padding: 3rem 0;
}
.blog__footer {
padding: 2rem 2rem 3rem 2rem;
}
}
`}
</style>
</Layout>
);
}
export async function getStaticProps({ ...ctx }) {
const { post } = ctx.params;
const content = await import(`../../content/posts/${post}.md`);
const data = matter(content.default);
return {
props: {
siteTitle: "~/garrit",
frontmatter: data.data,
markdownBody: data.content,
},
};
}
export async function getStaticPaths() {
//get all .md files in the posts dir
const blogs = glob.sync("content/posts/**/*.md");
//remove path and extension to leave filename only
const blogSlugs = blogs.map((file) =>
file.split("/")[2].replace(/ /g, "-").slice(0, -3).trim()
);
// create paths with `slug` param
const paths = blogSlugs.map((slug) => `/posts/${slug}`);
return {
paths,
fallback: false,
};
}

49
pages/posts/index.js

@ -0,0 +1,49 @@
import Layout from "../../components/Layout";
import BlogList from "../../components/BlogList";
import matter from "gray-matter";
const Index = (props) => {
return (
<Layout pathname="/" siteTitle="~/garrit" siteDescription="">
<section>
<BlogList posts={props.posts} />
</section>
</Layout>
);
};
export async function getStaticProps() {
//get posts & context from folder
const posts = ((context) => {
const keys = context.keys();
const values = keys.map(context);
const data = keys.map((key, index) => {
// Create slug from filename
const slug = key
.replace(/^.*[\\\/]/, "")
.split(".")
.slice(0, -1)
.join(".");
const value = values[index];
// Parse yaml metadata & markdownbody in document
const document = matter(value.default);
return {
frontmatter: document.data,
markdownBody: document.content,
slug,
};
});
return data;
})(require.context("../../content/posts", true, /\.md$/));
return {
props: {
posts,
title: "~/garrit",
description: "",
},
};
}
export default Index;

4
public/.well-known/brave-rewards-verification.txt

@ -0,0 +1,4 @@
This is a Brave Rewards publisher verification file.
Domain: blog.garrit.xyz
Token: 426e30fb7cb125131ef27dc902cb10fe14d3114f60c57a190adb672def1b59b5

BIN
public/assets/flutter_web_renderer_canvaskit.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

BIN
public/assets/flutter_web_renderer_html.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

BIN
public/assets/signed_commit.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

1
public/favicon.svg

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="256" viewBox="0 0 100 100"><rect width="100" height="100" rx="0" fill="#000000"></rect><path d="M36.76 18.00L42.76 18.00L18.28 82L12.28 82L36.76 18.00ZM68.04 77.20L68.04 77.20Q63.08 77.20 58.92 74.72Q54.76 72.24 52.28 67.56Q49.80 62.88 49.80 56.40L49.80 56.40Q49.80 49.76 52.44 45.12Q55.08 40.48 59.40 38.04Q63.72 35.60 68.68 35.60L68.68 35.60Q73.64 35.60 77.28 38.16Q80.92 40.72 82.12 44.96L82.12 44.96L81.32 45.52L81.32 18.00L87.72 18.00L87.72 76.40L82.12 76.40L81.56 66.72L82.44 66.24Q81.88 69.76 79.76 72.20Q77.64 74.64 74.56 75.92Q71.48 77.20 68.04 77.20ZM68.68 71.60L68.68 71.60Q72.52 71.60 75.36 69.68Q78.20 67.76 79.76 64.20Q81.32 60.64 81.32 55.84L81.32 55.84Q81.32 51.28 79.76 48Q78.20 44.72 75.44 42.96Q72.68 41.20 69 41.20L69 41.20Q62.92 41.20 59.56 45.36Q56.20 49.52 56.20 56.40L56.20 56.40Q56.20 63.28 59.48 67.44Q62.76 71.60 68.68 71.60Z" fill="#fff"></path></svg>

After

Width:  |  Height:  |  Size: 939 B

4
public/vercel.svg

@ -0,0 +1,4 @@
<svg width="283" height="64" viewBox="0 0 283 64" fill="none"
xmlns="http://www.w3.org/2000/svg">
<path d="M141.04 16c-11.04 0-19 7.2-19 18s8.96 18 20 18c6.67 0 12.55-2.64 16.19-7.09l-7.65-4.42c-2.02 2.21-5.09 3.5-8.54 3.5-4.79 0-8.86-2.5-10.37-6.5h28.02c.22-1.12.35-2.28.35-3.5 0-10.79-7.96-17.99-19-17.99zm-9.46 14.5c1.25-3.99 4.67-6.5 9.45-6.5 4.79 0 8.21 2.51 9.45 6.5h-18.9zM248.72 16c-11.04 0-19 7.2-19 18s8.96 18 20 18c6.67 0 12.55-2.64 16.19-7.09l-7.65-4.42c-2.02 2.21-5.09 3.5-8.54 3.5-4.79 0-8.86-2.5-10.37-6.5h28.02c.22-1.12.35-2.28.35-3.5 0-10.79-7.96-17.99-19-17.99zm-9.45 14.5c1.25-3.99 4.67-6.5 9.45-6.5 4.79 0 8.21 2.51 9.45 6.5h-18.9zM200.24 34c0 6 3.92 10 10 10 4.12 0 7.21-1.87 8.8-4.92l7.68 4.43c-3.18 5.3-9.14 8.49-16.48 8.49-11.05 0-19-7.2-19-18s7.96-18 19-18c7.34 0 13.29 3.19 16.48 8.49l-7.68 4.43c-1.59-3.05-4.68-4.92-8.8-4.92-6.07 0-10 4-10 10zm82.48-29v46h-9V5h9zM36.95 0L73.9 64H0L36.95 0zm92.38 5l-27.71 48L73.91 5H84.3l17.32 30 17.32-30h10.39zm58.91 12v9.69c-1-.29-2.06-.49-3.2-.49-5.81 0-10 4-10 10V51h-9V17h9v9.2c0-5.08 5.91-9.2 13.2-9.2z" fill="#000"/>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

36
styles/atoms.css

@ -1,36 +0,0 @@
.button__link {
font-weight: 350;
background: black;
border: 1px solid black;
color: white;
text-decoration: none;
padding: 0.6rem 2rem;
margin: 2rem 2.5rem;
transition: 0.2s;
display: flex;
align-items: center;
}
.button__link > p {
font-size: 1rem;
margin: auto;
}
.button__link:hover {
color: black;
background: white;
}
@media only screen and (max-width: 520px) {
.button__link p {
font-size: 1.3rem;
}
}
/* Description */
.description {
margin-right: 12vw;
margin-left: 12vw;
}

17
styles/contact.css

@ -1,17 +0,0 @@
.contact__container {
width: 80vw;
margin: auto;
justify-content: center;
}
.button__contact {
margin: auto;
margin-bottom: 2rem;
width: 20rem;
}
@media only screen and (max-width: 520px) {
.button__contact {
width: auto;
}
}

18
styles/darkmode.css

@ -1,18 +0,0 @@
@media (prefers-color-scheme: dark) {
:root {
background-color: #161618;
color: #dbd7db;
}
html {
scrollbar-color: #dbd7db #161618 !important;
}
.button__link {
background-color: #67676c;
}
a {
color: #dbd7db;
}
}

39
styles/index.css

@ -1,39 +0,0 @@
@import url("./typography.css");
@import url("./atoms.css");
@import url("./darkmode.css");
/* Components */
@import url("./welcome.css");
@import url("./services.css");
@import url("./contact.css");
@import url("./support.css");
/* Fonts */
@font-face {
font-family: "Work Sans";
src: url("../fonts/work-sans/WorkSans-VariableFont_wght.ttf");
font-style: normal;
}
@font-face {
font-family: "Work Sans";
src: url("../fonts/work-sans/WorkSans-Italic-VariableFont_wght.ttf");
font-style: italic;
}
html {
scrollbar-color: black white;
}
body {
margin: 0;
font-family: "Work Sans", sans-serif;
font-weight: 250;
overflow-x: hidden;
text-align: center;
}
footer {
margin-bottom: 2rem;
}

61
styles/services.css

@ -1,61 +0,0 @@
.services {
align-items: center;
display: flex;
justify-content: center;
}
.services__heading {
margin-bottom: 1.5rem;
}
.services__offers {
display: flex;
flex-wrap: wrap;
justify-content: space-evenly;
width: 80vw;
padding: 0 10vw;
}
.services__offers__card {
height: 15rem;
width: 20rem;
}
.services__offers__card__heading {
font-weight: 350;
font-size: 2rem;
}
.services__offers__card__description {
margin-bottom: 1rem;
margin: 0 3rem;
height: 3rem;
font-size: 1rem;
}
@media only screen and (max-width: 520px) {
.services__offers__card {
margin-bottom: 3rem;
}
.services__offers__card__description {
font-size: 1.3rem;
height: auto;
}
}
@media only screen and (max-width: 380px) {
.services__offers {
width: 100vw;
padding: 0;
}
.services__offers__card {
width: 25rem;
margin-bottom: 3rem;
}
.services__offers__card__description {
font-size: 1.3rem;
height: auto;
}
}

16
styles/support.css

@ -1,16 +0,0 @@
.support-us__container {
width: 80vw;
margin: auto;
justify-content: center
}
.support-us__donations__container {
display: flex;
flex-wrap: wrap;
justify-content: center;
margin-bottom: 2rem;
}
.support-us__donations__button {
margin: 1rem;
}

25
styles/typography.css

@ -1,25 +0,0 @@
h1 {
font-size: 5rem;
font-weight: 400;
margin: 1rem;
}
h2 {
font-size: 3.3rem;
font-weight: 350;
}
h3 {
font-size: 2rem;
font-weight: 300;
}
p {
font-size: 1.5rem;
font-weight: 200;
line-height: 1.3;
}
a {
color: black;
}

35
styles/welcome.css

@ -1,35 +0,0 @@
.welcome {
align-items: center;
display: flex;
justify-content: center;
}
.welcome__container {
margin-top: 8vh;
}
.welcome__text-area__subheading {
margin-top: 0;
margin-bottom: 2em;
max-width: 80vw;
margin-left: auto;
margin-right: auto;
}
.welcome__text-area__description {
margin-left: 12vw;
margin-right: 12vw;
}
/* At smaller than 520px the fonts get too large to fit in one line */
@media only screen and (max-width: 520px) {
:root {
font-size: .8em;
}
}
/* At smaller than 380px the fonts get too large again to fit in one line */
@media only screen and (max-width: 380px) {
:root {
font-size: .6em;
}
}
Loading…
Cancel
Save