Compare commits

...

5 commits

Author SHA1 Message Date
dbd75eaef0
actuall install attic-client 2024-10-29 17:08:20 -05:00
56de313d42
fix login command 2024-10-29 17:02:55 -05:00
a112d3b2f2
switch to attic 2024-10-29 16:59:14 -05:00
6a42bc97aa
add support for attic 2024-10-29 16:59:14 -05:00
718b2d6f75
setup attic on algiz 2024-10-29 16:58:36 -05:00
9 changed files with 134 additions and 42 deletions

View file

@ -32,10 +32,16 @@ jobs:
experimental-features = pipe-operator
accept-flake-config = true
- uses: cachix/cachix-action@v15
with:
name: daylin
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Install and login to attic cache
run: |
nix profile install "nixpkgs#attic-client"
attic login oizys https://attic.dayl.in "${{ secrets.ATTIC_TOKEN }}"
# - uses: cachix/cachix-action@v15
# with:
# name: daylin
# authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Update nix flake
run: |
@ -51,7 +57,7 @@ jobs:
run: >
nix run .
--
build --minimal
cache
--host "othalan"
--flake .
--debug

View file

@ -49,12 +49,14 @@
nixConfig = {
extra-substituters = [
"https://attic.dayl.in/oizys"
"https://hyprland.cachix.org"
"https://nixpkgs-wayland.cachix.org"
"https://daylin.cachix.org"
# "https://cache.lix.systems"
];
extra-trusted-public-keys = [
"oizys:DSw3mwVMM/Y+PXSVpkDlU5dLwlORuiJRGPkwr5INSMc="
"hyprland.cachix.org-1:a7pgxzMz7+chwVL3/pzj6jIBMioiJM7ypFP8PwtkuGc="
"nixpkgs-wayland.cachix.org-1:3lwxaILxMRkVhehr5StQprHdEo4IrE8sRho9R9HOLYA="
"daylin.cachix.org-1:fLdSnbhKjtOVea6H9KqXeir+PyhO+sDSPhEW66ClE/k="

View file

@ -27,4 +27,20 @@ To point gitea/forgejo to the shim gitea binary for SSH I symlink the current sy
ln -s /run/current-system/sw/bin/gitea /usr/local/bin/gitea
```
## Setting up Attic
Generated a key using command provided in attic docs:
```sh
nix run nixpkgs#openssl -- genrsa -traditional 4096 | base64 -w0
```
And wrote `ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64="output from above"` to `/etc/attic.env`
I generated a token to configure the caches using the following command:
```
atticd-atticadm make-token --sub daylin --push "*" --pull "*" --validity '1y' --create-cache "*" --configure-cache "*" --configure-cache-retention "*" --destroy-cache "*" --delete "*"
```
If I handled secrets via `sops` or `agenix` I think this could be stored directly in the repo.
I also had to modify the firewall so that docker would forward along the requests by caddy to `host.docker.internal` correctly.

58
hosts/algiz/services.nix Normal file
View file

@ -0,0 +1,58 @@
{ pkgs, enabled, ... }:
let
atticPort = "5656";
in
{
services.resolved = enabled;
services.fail2ban = enabled // {
maxretry = 5;
bantime = "24h";
};
services.openssh = enabled // {
settings.PasswordAuthentication = false;
};
security.polkit = enabled; # attic was looking for this...
environment.systemPackages = [ pkgs.attic-client ];
# allow docker to forward the request to the host running attic
# https://discourse.nixos.org/t/docker-container-not-resolving-to-host/30259/6
networking.firewall.extraCommands = "iptables -A INPUT -p tcp --destination-port ${atticPort} -s 172.16.0.0/12 -j ACCEPT";
services.atticd = enabled // {
# Replace with absolute path to your credentials file
environmentFile = "/etc/atticd.env";
settings = {
listen = "[::]:${atticPort}";
jwt = { };
# Data chunking
#
# Warning: If you change any of the values here, it will be
# difficult to reuse existing chunks for newly-uploaded NARs
# since the cutpoints will be different. As a result, the
# deduplication ratio will suffer for a while after the change.
chunking = {
# The minimum NAR size to trigger chunking
#
# If 0, chunking is disabled entirely for newly-uploaded NARs.
# If 1, all NARs are chunked.
nar-size-threshold = 64 * 1024; # 64 KiB
# The preferred minimum size of a chunk, in bytes
min-size = 16 * 1024; # 16 KiB
# The preferred average size of a chunk, in bytes
avg-size = 64 * 1024; # 64 KiB
# The preferred maximum size of a chunk, in bytes
max-size = 256 * 1024; # 256 KiB
};
};
};
}

View file

@ -2,12 +2,6 @@
{
security.sudo.wheelNeedsPassword = false;
services.resolved = enabled;
services.fail2ban = enabled // {
maxretry = 5;
bantime = "24h";
};
# # added to make using `pip install` work in docker build
# networking.nameservers = [ "8.8.8.8"];
@ -20,12 +14,6 @@
];
};
services.openssh = enabled // {
settings.PasswordAuthentication = false;
};
# users.mutableUsers = false;
# Use the GRUB 2 boot loader.
boot.loader.grub = enabled // {
device = "/dev/sda"; # or "nodev" for efi only

View file

@ -10,5 +10,6 @@
graphviz
typst
charm-freeze
attic-client
]);
}

View file

@ -1,5 +1,5 @@
## nix begat oizys
import std/[os, tables, sequtils, strformat, strutils]
import std/[os, osproc, tables, sequtils, strformat, strutils]
import hwylterm, hwylterm/[cligen, logging]
import oizys/[context, github, nix, overlay, logging]
@ -53,9 +53,9 @@ overlay:
## nix build
nixBuild(minimal, rest)
proc cache(minimal: bool = false, name: string = "daylin") =
## build and push to cachix
nixBuildWithCache(minimal, name, rest)
proc cache(name: string = "oizys", service: string = "attic", jobs: int = countProcessors()) =
## build and push store paths
nixBuildWithCache(name, rest, service, jobs)
proc osCmd() =
## nixos-rebuild
@ -97,7 +97,8 @@ when isMainModule:
"ref" : "git ref/branch/tag to trigger workflow on"
}
cacheHelp = //{
"name" : "name of cachix binary cache"
"name" : "name of cachix binary cache",
"jobs" : "jobs when pushing paths"
} // sharedHelp
let
osUsage = $bb("$command [[subcmd] $args\n$doc[bold]Options[/]:\n$options")

View file

@ -9,7 +9,7 @@ import hwylterm
func addArgs*(cmd: var string, args: openArray[string]) =
cmd &= " " & args.join(" ")
func addArg*(cmd: var string, arg: string) =
func addArg*(cmd: var string, arg: string ) =
cmd &= " " & arg
proc runCmd*(cmd: string): int =

View file

@ -193,6 +193,7 @@ proc writeDervationsToStepSummary(drvs: seq[string]) =
let output = open(summaryFilePath,fmAppend)
output.writeLine("| derivation | hash |\n|---|---|")
output.writeLine(rows.join("\n"))
close output
proc nixBuild*(minimal: bool, rest: seq[string]) =
var cmd = nixCommand("build")
@ -233,23 +234,42 @@ proc nixBuildHostDry*(minimal: bool, rest: seq[string]) =
let output = parseDryRunOutput err
display output
proc nixBuildWithCache*(minimal: bool, name: string, rest:seq[string]) =
if findExe("cachix") == "": fatalQuit "is cachix installed?"
info bbfmt"building and pushing to cache: [b]{name}"
var cmd = "cachix"
cmd.addArgs ["watch-exec","--"]
cmd.addArg "nix build"
if minimal:
debug "populating args with derivations not built/cached"
let drvs = systemPathDrvsToBuild()
if drvs.len == 0:
info "nothing to build"
quit "exiting...", QuitSuccess
cmd.addArgs drvs
cmd.addArg "--no-link"
else:
cmd.addArgs nixosConfigAttrs()
cmd.addArgs rest
let err = runCmd(cmd)
quit err
proc nixBuildWithCache*(name: string, rest:seq[string], service: string, jobs: int) =
## build individual derivations not cached and push to cache
if findExe(service) == "": fatalQuit fmt"is {service} installed?"
info bbfmt"building and pushing to cache: [b]{name}"
debug "determining missing cache hits"
let drvs = systemPathDrvsToBuild()
if drvs.len == 0:
info "nothing to build"
quit "exiting...", QuitSuccess
for drv in drvs:
var cmd = "nix build"
cmd.addArg drv
cmd.addArgs rest
let buildErr = runCmd(cmd)
if buildErr != 0:
error "failed to build: " & drv
continue
let results = collect(
for k, p in walkDir(".", relative = true):
if k in { pcLinkToDir, pcLinkToFile} and p.startsWith("result"):
p
)
cmd = service
cmd.addArg "push"
cmd.addArg name
cmd.addArg "--jobs"
cmd.addArg $jobs
cmd.addArgs results
let pushErr = runCmd(cmd)
if pushErr != 0:
errorQuit "failed to push build to cache"
for p in results:
removeFile p