This commit is contained in:
parent
5f644d0ccd
commit
a394b9cefd
167 changed files with 2795 additions and 2122 deletions
|
@ -1,4 +1,4 @@
|
|||
{
|
||||
programs.adb.enable = true;
|
||||
ccr.extraGroups = ["adbusers"];
|
||||
ccr.extraGroups = [ "adbusers" ];
|
||||
}
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [3000 53];
|
||||
networking.firewall.allowedUDPPorts = [53];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
3000
|
||||
53
|
||||
];
|
||||
networking.firewall.allowedUDPPorts = [ 53 ];
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.atuin = {
|
||||
enable = true;
|
||||
openFirewall = false; # use only in the VPN
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
sound.enable = true;
|
||||
|
||||
hardware.pulseaudio = {
|
||||
|
@ -6,5 +7,5 @@
|
|||
package = pkgs.pulseaudioFull;
|
||||
};
|
||||
|
||||
users.extraUsers.ccr.extraGroups = ["audio"];
|
||||
users.extraUsers.ccr.extraGroups = [ "audio" ];
|
||||
}
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
{
|
||||
config,
|
||||
options,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
system.autoUpgrade = {
|
||||
enable = false;
|
||||
flake = "github:aciceri/nixfleet#${config.networking.hostName}";
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.tlp.enable = true;
|
||||
|
||||
services.upower.enable = true;
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
(_self: super: {
|
||||
tlp = super.tlp.override {
|
||||
enableRDW = config.networkmanager.enable;
|
||||
};
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
{
|
||||
boot.binfmt.emulatedSystems = ["i686-linux" "aarch64-linux" "riscv64-linux"];
|
||||
boot.binfmt.emulatedSystems = [
|
||||
"i686-linux"
|
||||
"aarch64-linux"
|
||||
"riscv64-linux"
|
||||
];
|
||||
nix.extraOptions = ''
|
||||
extra-platforms = aarch64-linux arm-linux i686-linux riscv64-linux
|
||||
'';
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
services.blueman.enable = true;
|
||||
hardware.pulseaudio.enable = true;
|
||||
hardware.bluetooth = {
|
||||
|
@ -19,6 +20,6 @@
|
|||
};
|
||||
};
|
||||
};
|
||||
services.dbus.packages = with pkgs; [blueman];
|
||||
ccr.extraGroups = ["bluetooth"];
|
||||
services.dbus.packages = with pkgs; [ blueman ];
|
||||
ccr.extraGroups = [ "bluetooth" ];
|
||||
}
|
||||
|
|
|
@ -2,10 +2,13 @@
|
|||
virtualisation.oci-containers.containers = {
|
||||
bubbleupnpserver = {
|
||||
image = "bubblesoftapps/bubbleupnpserver";
|
||||
ports = ["58050:58050"];
|
||||
extraOptions = ["--network=host" "-device /dev/dri:/dev/dri"];
|
||||
ports = [ "58050:58050" ];
|
||||
extraOptions = [
|
||||
"--network=host"
|
||||
"-device /dev/dri:/dev/dri"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [58050];
|
||||
networking.firewall.allowedTCPPorts = [ 58050 ];
|
||||
}
|
||||
|
|
|
@ -7,10 +7,12 @@
|
|||
vpn,
|
||||
options,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.ccr;
|
||||
inherit (lib) types;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.ccr = {
|
||||
enable = lib.mkEnableOption "ccr";
|
||||
|
||||
|
@ -31,12 +33,12 @@ in {
|
|||
|
||||
modules = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
packages = lib.mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
autologin = lib.mkOption {
|
||||
|
@ -56,63 +58,74 @@ in {
|
|||
|
||||
extraGroups = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
|
||||
extraModules = lib.mkOption {
|
||||
type = types.listOf types.deferredModule;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
backupPaths = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable (lib.mkMerge [
|
||||
(lib.optionalAttrs (builtins.hasAttr "backup" options) {
|
||||
backup.paths = cfg.backupPaths;
|
||||
})
|
||||
{
|
||||
# FIXME shouldn't set these groups by default
|
||||
ccr.extraGroups = ["wheel" "fuse" "video" "dialout" "systemd-journal" "camera"];
|
||||
ccr.modules = ["shell" "git" "nix-index" "btop"];
|
||||
config = lib.mkIf cfg.enable (
|
||||
lib.mkMerge [
|
||||
(lib.optionalAttrs (builtins.hasAttr "backup" options) {
|
||||
backup.paths = cfg.backupPaths;
|
||||
})
|
||||
{
|
||||
# FIXME shouldn't set these groups by default
|
||||
ccr.extraGroups = [
|
||||
"wheel"
|
||||
"fuse"
|
||||
"video"
|
||||
"dialout"
|
||||
"systemd-journal"
|
||||
"camera"
|
||||
];
|
||||
ccr.modules = [
|
||||
"shell"
|
||||
"git"
|
||||
"nix-index"
|
||||
"btop"
|
||||
];
|
||||
|
||||
users.users.${cfg.username} = {
|
||||
inherit (config.ccr) hashedPassword extraGroups description;
|
||||
uid = 1000;
|
||||
isNormalUser = true;
|
||||
shell = cfg.shell;
|
||||
openssh.authorizedKeys.keys = config.ccr.authorizedKeys;
|
||||
};
|
||||
users.users.${cfg.username} = {
|
||||
inherit (config.ccr) hashedPassword extraGroups description;
|
||||
uid = 1000;
|
||||
isNormalUser = true;
|
||||
shell = cfg.shell;
|
||||
openssh.authorizedKeys.keys = config.ccr.authorizedKeys;
|
||||
};
|
||||
|
||||
programs.fish.enable = true;
|
||||
programs.fish.enable = true;
|
||||
|
||||
services.getty.autologinUser =
|
||||
if config.ccr.autologin
|
||||
then cfg.username
|
||||
else null;
|
||||
services.getty.autologinUser = if config.ccr.autologin then cfg.username else null;
|
||||
|
||||
home-manager.useGlobalPkgs = true;
|
||||
home-manager.useUserPackages = true;
|
||||
home-manager.users.${cfg.username} = {
|
||||
imports =
|
||||
fleetHmModules cfg.modules
|
||||
++ [
|
||||
{
|
||||
_module.args = {
|
||||
inherit (config.age) secrets;
|
||||
inherit (cfg) username;
|
||||
inherit vpn;
|
||||
hostname = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
]
|
||||
++ cfg.extraModules;
|
||||
home.packages = cfg.packages;
|
||||
home.stateVersion = config.system.stateVersion;
|
||||
};
|
||||
}
|
||||
]);
|
||||
home-manager.useGlobalPkgs = true;
|
||||
home-manager.useUserPackages = true;
|
||||
home-manager.users.${cfg.username} = {
|
||||
imports =
|
||||
fleetHmModules cfg.modules
|
||||
++ [
|
||||
{
|
||||
_module.args = {
|
||||
inherit (config.age) secrets;
|
||||
inherit (cfg) username;
|
||||
inherit vpn;
|
||||
hostname = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
]
|
||||
++ cfg.extraModules;
|
||||
home.packages = cfg.packages;
|
||||
home.stateVersion = config.system.stateVersion;
|
||||
};
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,28 +1,31 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
repos-path = "/var/lib/cgit-repos";
|
||||
cgit-setup-repos =
|
||||
pkgs.writers.writePython3 "cgit-setup-repos" {
|
||||
libraries = with pkgs.python3Packages; [PyGithub];
|
||||
} ''
|
||||
from github import Github
|
||||
from pathlib import Path
|
||||
pkgs.writers.writePython3 "cgit-setup-repos"
|
||||
{
|
||||
libraries = with pkgs.python3Packages; [ PyGithub ];
|
||||
}
|
||||
''
|
||||
from github import Github
|
||||
from pathlib import Path
|
||||
|
||||
c = Path("${repos-path}")
|
||||
c.unlink(missing_ok=True)
|
||||
c = Path("${repos-path}")
|
||||
c.unlink(missing_ok=True)
|
||||
|
||||
with open(c, "w") as f:
|
||||
for repo in Github().get_user("aciceri").get_repos():
|
||||
f.writelines([
|
||||
f"repo.url={repo.name}\n"
|
||||
f"repo.path=/home/ccr/projects/aciceri/{repo.name}/.git\n"
|
||||
f"repo.desc={repo.description}\n"
|
||||
])
|
||||
'';
|
||||
in {
|
||||
with open(c, "w") as f:
|
||||
for repo in Github().get_user("aciceri").get_repos():
|
||||
f.writelines([
|
||||
f"repo.url={repo.name}\n"
|
||||
f"repo.path=/home/ccr/projects/aciceri/{repo.name}/.git\n"
|
||||
f"repo.desc={repo.description}\n"
|
||||
])
|
||||
'';
|
||||
in
|
||||
{
|
||||
services.nginx.virtualHosts."git.aciceri.dev" = {
|
||||
cgit = {
|
||||
enable = true;
|
||||
|
@ -31,10 +34,12 @@ in {
|
|||
virtual-root = "/";
|
||||
cache-size = 1000;
|
||||
include = [
|
||||
(builtins.toString (pkgs.writeText "cgit-extra" ''
|
||||
source-filter=${pkgs.cgit-pink}/lib/cgit/filters/syntax-highlighting.py
|
||||
about-filter=${pkgs.cgit-pink}/lib/cgit/filters/about-formatting.sh
|
||||
''))
|
||||
(builtins.toString (
|
||||
pkgs.writeText "cgit-extra" ''
|
||||
source-filter=${pkgs.cgit-pink}/lib/cgit/filters/syntax-highlighting.py
|
||||
about-filter=${pkgs.cgit-pink}/lib/cgit/filters/about-formatting.sh
|
||||
''
|
||||
))
|
||||
repos-path
|
||||
];
|
||||
};
|
||||
|
@ -48,13 +53,13 @@ in {
|
|||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = builtins.toString cgit-setup-repos;
|
||||
};
|
||||
|
||||
systemd.timers.cgit-setup-repos = {
|
||||
wantedBy = ["timers.target"];
|
||||
partOf = ["cgit-setup-repos.service"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
partOf = [ "cgit-setup-repos.service" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 4:00:00"; # daily at 4 AM
|
||||
Unit = "cgit-setup-repos.service";
|
||||
|
|
|
@ -4,134 +4,149 @@
|
|||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
with lib;
|
||||
let
|
||||
globalConfig = config;
|
||||
settingsFormat = {
|
||||
type = with lib.types; let
|
||||
value =
|
||||
oneOf [int str]
|
||||
// {
|
||||
description = "INI-like atom (int or string)";
|
||||
};
|
||||
values =
|
||||
coercedTo value lib.singleton (listOf value)
|
||||
// {
|
||||
type =
|
||||
with lib.types;
|
||||
let
|
||||
value =
|
||||
oneOf [
|
||||
int
|
||||
str
|
||||
]
|
||||
// {
|
||||
description = "INI-like atom (int or string)";
|
||||
};
|
||||
values = coercedTo value lib.singleton (listOf value) // {
|
||||
description = value.description + " or a list of them for duplicate keys";
|
||||
};
|
||||
in
|
||||
in
|
||||
attrsOf values;
|
||||
generate = name: values:
|
||||
pkgs.writeText name (lib.generators.toKeyValue {listsAsDuplicateKeys = true;} values);
|
||||
generate =
|
||||
name: values:
|
||||
pkgs.writeText name (lib.generators.toKeyValue { listsAsDuplicateKeys = true; } values);
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../nginx-base
|
||||
./config.nix
|
||||
];
|
||||
|
||||
options.services.nginx.virtualHosts = mkOption {
|
||||
type = types.attrsOf (types.submodule ({config, ...}: let
|
||||
cfg = config.cgit;
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ config, ... }:
|
||||
let
|
||||
cfg = config.cgit;
|
||||
|
||||
# These are the global options for this submodule, but for nicer UX they
|
||||
# are inlined into the freeform settings. Hence they MUST NOT INTERSECT
|
||||
# with any settings from cgitrc!
|
||||
options = {
|
||||
enable = mkEnableOption "cgit";
|
||||
# These are the global options for this submodule, but for nicer UX they
|
||||
# are inlined into the freeform settings. Hence they MUST NOT INTERSECT
|
||||
# with any settings from cgitrc!
|
||||
options = {
|
||||
enable = mkEnableOption "cgit";
|
||||
|
||||
location = mkOption {
|
||||
default = "/";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Location to serve cgit on.
|
||||
'';
|
||||
};
|
||||
};
|
||||
location = mkOption {
|
||||
default = "/";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Location to serve cgit on.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# Remove the global options for serialization into cgitrc
|
||||
settings = removeAttrs cfg (attrNames options);
|
||||
in {
|
||||
options.cgit = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
inherit options;
|
||||
config = {
|
||||
css = mkDefault "/cgit.css";
|
||||
logo = mkDefault "/cgit.png";
|
||||
favicon = mkDefault "/favicon.ico";
|
||||
# Remove the global options for serialization into cgitrc
|
||||
settings = removeAttrs cfg (attrNames options);
|
||||
in
|
||||
{
|
||||
options.cgit = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
inherit options;
|
||||
config = {
|
||||
css = mkDefault "/cgit.css";
|
||||
logo = mkDefault "/cgit.png";
|
||||
favicon = mkDefault "/favicon.ico";
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
example = literalExample ''
|
||||
{
|
||||
enable = true;
|
||||
virtual-root = "/";
|
||||
source-filter = "''${pkgs.cgit-pink}/lib/cgit/filters/syntax-highlighting.py";
|
||||
about-filter = "''${pkgs.cgit-pink}/lib/cgit/filters/about-formatting.sh";
|
||||
cache-size = 1000;
|
||||
scan-path = "/srv/git";
|
||||
include = [
|
||||
(builtins.toFile "cgitrc-extra-1" '''
|
||||
# Anything that has to be in a particular order
|
||||
''')
|
||||
(builtins.toFile "cgitrc-extra-2" '''
|
||||
# Anything that has to be in a particular order
|
||||
''')
|
||||
];
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Verbatim contents of the cgit runtime configuration file. Documentation
|
||||
(with cgitrc example file) is available in "man cgitrc". Or online:
|
||||
http://git.zx2c4.com/cgit/tree/cgitrc.5.txt
|
||||
'';
|
||||
};
|
||||
};
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{
|
||||
enable = true;
|
||||
virtual-root = "/";
|
||||
source-filter = "''${pkgs.cgit-pink}/lib/cgit/filters/syntax-highlighting.py";
|
||||
about-filter = "''${pkgs.cgit-pink}/lib/cgit/filters/about-formatting.sh";
|
||||
cache-size = 1000;
|
||||
scan-path = "/srv/git";
|
||||
include = [
|
||||
(builtins.toFile "cgitrc-extra-1" '''
|
||||
# Anything that has to be in a particular order
|
||||
''')
|
||||
(builtins.toFile "cgitrc-extra-2" '''
|
||||
# Anything that has to be in a particular order
|
||||
''')
|
||||
];
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Verbatim contents of the cgit runtime configuration file. Documentation
|
||||
(with cgitrc example file) is available in "man cgitrc". Or online:
|
||||
http://git.zx2c4.com/cgit/tree/cgitrc.5.txt
|
||||
'';
|
||||
};
|
||||
|
||||
config = let
|
||||
location = removeSuffix "/" cfg.location;
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
locations."${location}/" = {
|
||||
root = "${pkgs.cgit-pink}/cgit/";
|
||||
tryFiles = "$uri @cgit";
|
||||
};
|
||||
locations."~ ^${location}/(cgit.(css|png)|favicon.ico|robots.txt)$" = {
|
||||
alias = "${pkgs.cgit-pink}/cgit/$1";
|
||||
};
|
||||
locations."~ ^${location}/custom.css$" = {
|
||||
alias = ./custom.css;
|
||||
};
|
||||
locations."@cgit" = {
|
||||
extraConfig =
|
||||
''
|
||||
include ${pkgs.nginx}/conf/fastcgi_params;
|
||||
fastcgi_param CGIT_CONFIG ${settingsFormat.generate "cgitrc" settings};
|
||||
fastcgi_param SCRIPT_FILENAME ${pkgs.cgit-pink}/cgit/cgit.cgi;
|
||||
fastcgi_param QUERY_STRING $args;
|
||||
fastcgi_param HTTP_HOST $server_name;
|
||||
fastcgi_pass unix:${globalConfig.services.fcgiwrap.socketAddress};
|
||||
''
|
||||
+ (
|
||||
if cfg.location == "/"
|
||||
then ''
|
||||
fastcgi_param PATH_INFO $uri;
|
||||
''
|
||||
else ''
|
||||
fastcgi_split_path_info ^(${location}/)(/?.+)$;
|
||||
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||
''
|
||||
);
|
||||
};
|
||||
};
|
||||
}));
|
||||
config =
|
||||
let
|
||||
location = removeSuffix "/" cfg.location;
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
locations."${location}/" = {
|
||||
root = "${pkgs.cgit-pink}/cgit/";
|
||||
tryFiles = "$uri @cgit";
|
||||
};
|
||||
locations."~ ^${location}/(cgit.(css|png)|favicon.ico|robots.txt)$" = {
|
||||
alias = "${pkgs.cgit-pink}/cgit/$1";
|
||||
};
|
||||
locations."~ ^${location}/custom.css$" = {
|
||||
alias = ./custom.css;
|
||||
};
|
||||
locations."@cgit" = {
|
||||
extraConfig =
|
||||
''
|
||||
include ${pkgs.nginx}/conf/fastcgi_params;
|
||||
fastcgi_param CGIT_CONFIG ${settingsFormat.generate "cgitrc" settings};
|
||||
fastcgi_param SCRIPT_FILENAME ${pkgs.cgit-pink}/cgit/cgit.cgi;
|
||||
fastcgi_param QUERY_STRING $args;
|
||||
fastcgi_param HTTP_HOST $server_name;
|
||||
fastcgi_pass unix:${globalConfig.services.fcgiwrap.socketAddress};
|
||||
''
|
||||
+ (
|
||||
if cfg.location == "/" then
|
||||
''
|
||||
fastcgi_param PATH_INFO $uri;
|
||||
''
|
||||
else
|
||||
''
|
||||
fastcgi_split_path_info ^(${location}/)(/?.+)$;
|
||||
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||
''
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
config = let
|
||||
vhosts = config.services.nginx.virtualHosts;
|
||||
in
|
||||
config =
|
||||
let
|
||||
vhosts = config.services.nginx.virtualHosts;
|
||||
in
|
||||
mkIf (any (name: vhosts.${name}.cgit.enable) (attrNames vhosts)) {
|
||||
# make the cgitrc manpage available
|
||||
environment.systemPackages = [pkgs.cgit-pink];
|
||||
environment.systemPackages = [ pkgs.cgit-pink ];
|
||||
|
||||
services.fcgiwrap.enable = true;
|
||||
};
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.cloudflare-dyndns = {
|
||||
enable = true;
|
||||
ipv4 = true;
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
lib,
|
||||
fleetModules,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
imports = fleetModules [
|
||||
"nix"
|
||||
"auto-upgrade"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
services.dbus.packages = [pkgs.dconf];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.dbus.packages = [ pkgs.dconf ];
|
||||
programs.dconf.enable = true;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
virtualisation.podman.enable = true;
|
||||
# virtualisation.docker.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
docker-compose
|
||||
podman-compose
|
||||
];
|
||||
ccr.extraGroups = ["docker" "podman"];
|
||||
ccr.extraGroups = [
|
||||
"docker"
|
||||
"podman"
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,10 +1,23 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
fonts = {
|
||||
packages = with pkgs; [powerline-fonts dejavu_fonts fira-code fira-code-symbols iosevka iosevka-comfy.comfy emacs-all-the-icons-fonts nerdfonts joypixels etBook vegur];
|
||||
packages = with pkgs; [
|
||||
powerline-fonts
|
||||
dejavu_fonts
|
||||
fira-code
|
||||
fira-code-symbols
|
||||
iosevka
|
||||
iosevka-comfy.comfy
|
||||
emacs-all-the-icons-fonts
|
||||
nerdfonts
|
||||
joypixels
|
||||
etBook
|
||||
vegur
|
||||
];
|
||||
fontconfig.defaultFonts = {
|
||||
monospace = ["DejaVu Sans Mono for Powerline"];
|
||||
sansSerif = ["DejaVu Sans"];
|
||||
serif = ["DejaVu Serif"];
|
||||
monospace = [ "DejaVu Sans Mono for Powerline" ];
|
||||
sansSerif = [ "DejaVu Sans" ];
|
||||
serif = [ "DejaVu Serif" ];
|
||||
};
|
||||
};
|
||||
nixpkgs.config.joypixels.acceptLicense = true;
|
||||
|
|
|
@ -4,10 +4,27 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
storeDeps = pkgs.runCommand "store-deps" {} ''
|
||||
}:
|
||||
let
|
||||
storeDeps = pkgs.runCommand "store-deps" { } ''
|
||||
mkdir -p $out/bin
|
||||
for dir in ${with pkgs; builtins.toString [coreutils findutils gnugrep gawk git nix bash jq nodejs nix-fast-build curl tea]}; do
|
||||
for dir in ${
|
||||
with pkgs;
|
||||
builtins.toString [
|
||||
coreutils
|
||||
findutils
|
||||
gnugrep
|
||||
gawk
|
||||
git
|
||||
nix
|
||||
bash
|
||||
jq
|
||||
nodejs
|
||||
nix-fast-build
|
||||
curl
|
||||
tea
|
||||
]
|
||||
}; do
|
||||
for bin in "$dir"/bin/*; do
|
||||
ln -s "$bin" "$out/bin/$(basename "$bin")"
|
||||
done
|
||||
|
@ -28,182 +45,194 @@
|
|||
exec nix copy --to "s3://cache?profile=default®ion=eu-south-1&scheme=https&endpoint=cache.aciceri.dev" $OUT_PATHS
|
||||
'';
|
||||
in
|
||||
lib.mkMerge [
|
||||
{
|
||||
# everything here has no dependencies on the store
|
||||
systemd.services.gitea-runner-nix-image = {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["podman.service"];
|
||||
requires = ["podman.service"];
|
||||
path = [config.virtualisation.podman.package pkgs.gnutar pkgs.shadow pkgs.getent];
|
||||
# we also include etc here because the cleanup job also wants the nixuser to be present
|
||||
script = ''
|
||||
set -eux -o pipefail
|
||||
mkdir -p etc/nix
|
||||
lib.mkMerge [
|
||||
{
|
||||
# everything here has no dependencies on the store
|
||||
systemd.services.gitea-runner-nix-image = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "podman.service" ];
|
||||
requires = [ "podman.service" ];
|
||||
path = [
|
||||
config.virtualisation.podman.package
|
||||
pkgs.gnutar
|
||||
pkgs.shadow
|
||||
pkgs.getent
|
||||
];
|
||||
# we also include etc here because the cleanup job also wants the nixuser to be present
|
||||
script = ''
|
||||
set -eux -o pipefail
|
||||
mkdir -p etc/nix
|
||||
|
||||
# Create an unpriveleged user that we can use also without the run-as-user.sh script
|
||||
touch etc/passwd etc/group
|
||||
groupid=$(cut -d: -f3 < <(getent group nixuser))
|
||||
userid=$(cut -d: -f3 < <(getent passwd nixuser))
|
||||
groupadd --prefix $(pwd) --gid "$groupid" nixuser
|
||||
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
|
||||
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
|
||||
# Create an unpriveleged user that we can use also without the run-as-user.sh script
|
||||
touch etc/passwd etc/group
|
||||
groupid=$(cut -d: -f3 < <(getent group nixuser))
|
||||
userid=$(cut -d: -f3 < <(getent passwd nixuser))
|
||||
groupadd --prefix $(pwd) --gid "$groupid" nixuser
|
||||
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
|
||||
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
|
||||
|
||||
echo -n "access-tokens = " > etc/nix/access-tokens
|
||||
cat ${config.age.secrets.forgejo-nix-access-tokens.path} >> etc/nix/access-tokens
|
||||
echo -n "access-tokens = " > etc/nix/access-tokens
|
||||
cat ${config.age.secrets.forgejo-nix-access-tokens.path} >> etc/nix/access-tokens
|
||||
|
||||
cat <<NIX_CONFIG > etc/nix/nix.conf
|
||||
accept-flake-config = true
|
||||
experimental-features = nix-command flakes
|
||||
post-build-hook = ${pushToCache}
|
||||
include access-tokens
|
||||
NIX_CONFIG
|
||||
cat <<NIX_CONFIG > etc/nix/nix.conf
|
||||
accept-flake-config = true
|
||||
experimental-features = nix-command flakes
|
||||
post-build-hook = ${pushToCache}
|
||||
include access-tokens
|
||||
NIX_CONFIG
|
||||
|
||||
cat <<NSSWITCH > etc/nsswitch.conf
|
||||
passwd: files mymachines systemd
|
||||
group: files mymachines systemd
|
||||
shadow: files
|
||||
cat <<NSSWITCH > etc/nsswitch.conf
|
||||
passwd: files mymachines systemd
|
||||
group: files mymachines systemd
|
||||
shadow: files
|
||||
|
||||
hosts: files mymachines dns myhostname
|
||||
networks: files
|
||||
hosts: files mymachines dns myhostname
|
||||
networks: files
|
||||
|
||||
ethers: files
|
||||
services: files
|
||||
protocols: files
|
||||
rpc: files
|
||||
NSSWITCH
|
||||
ethers: files
|
||||
services: files
|
||||
protocols: files
|
||||
rpc: files
|
||||
NSSWITCH
|
||||
|
||||
# list the content as it will be imported into the container
|
||||
tar -cv . | tar -tvf -
|
||||
tar -cv . | podman import - gitea-runner-nix
|
||||
'';
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "gitea-runner-nix-image";
|
||||
WorkingDirectory = "/run/gitea-runner-nix-image";
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
# list the content as it will be imported into the container
|
||||
tar -cv . | tar -tvf -
|
||||
tar -cv . | podman import - gitea-runner-nix
|
||||
'';
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "gitea-runner-nix-image";
|
||||
WorkingDirectory = "/run/gitea-runner-nix-image";
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
};
|
||||
|
||||
users.users.nixuser = {
|
||||
group = "nixuser";
|
||||
description = "Used for running nix ci jobs";
|
||||
home = "/var/empty";
|
||||
isSystemUser = true;
|
||||
# extraGroups = [ "podman" ];
|
||||
};
|
||||
users.groups.nixuser = {};
|
||||
}
|
||||
{
|
||||
# Format of the token file:
|
||||
virtualisation = {
|
||||
podman.enable = true;
|
||||
};
|
||||
users.users.nixuser = {
|
||||
group = "nixuser";
|
||||
description = "Used for running nix ci jobs";
|
||||
home = "/var/empty";
|
||||
isSystemUser = true;
|
||||
# extraGroups = [ "podman" ];
|
||||
};
|
||||
users.groups.nixuser = { };
|
||||
}
|
||||
{
|
||||
# Format of the token file:
|
||||
virtualisation = {
|
||||
podman.enable = true;
|
||||
};
|
||||
|
||||
# virtualisation.containers.storage.settings = {
|
||||
# storage.driver = "zfs";
|
||||
# storage.graphroot = "/var/lib/containers/storage";
|
||||
# storage.runroot = "/run/containers/storage";
|
||||
# storage.options.zfs.fsname = "zroot/root/podman";
|
||||
# };
|
||||
# virtualisation.containers.storage.settings = {
|
||||
# storage.driver = "zfs";
|
||||
# storage.graphroot = "/var/lib/containers/storage";
|
||||
# storage.runroot = "/run/containers/storage";
|
||||
# storage.options.zfs.fsname = "zroot/root/podman";
|
||||
# };
|
||||
|
||||
# virtualisation.containers.containersConf.settings = {
|
||||
# # podman seems to not work with systemd-resolved
|
||||
# containers.dns_servers = [ "8.8.8.8" "8.8.4.4" ];
|
||||
# };
|
||||
}
|
||||
{
|
||||
systemd.services = lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") numInstances) (name: {
|
||||
# TODO: systemd confinment
|
||||
serviceConfig = {
|
||||
# Hardening (may overlap with DynamicUser=)
|
||||
# The following options are only for optimizing output of systemd-analyze
|
||||
AmbientCapabilities = "";
|
||||
CapabilityBoundingSet = "";
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
UMask = "0066";
|
||||
ProtectProc = "invisible";
|
||||
SystemCallFilter = [
|
||||
"~@clock"
|
||||
"~@cpu-emulation"
|
||||
"~@module"
|
||||
"~@mount"
|
||||
"~@obsolete"
|
||||
"~@raw-io"
|
||||
"~@reboot"
|
||||
"~@swap"
|
||||
# needed by go?
|
||||
#"~@resources"
|
||||
"~@privileged"
|
||||
"~capset"
|
||||
"~setdomainname"
|
||||
"~sethostname"
|
||||
];
|
||||
RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK"];
|
||||
|
||||
# Needs network access
|
||||
PrivateNetwork = false;
|
||||
# Cannot be true due to Node
|
||||
MemoryDenyWriteExecute = false;
|
||||
|
||||
# The more restrictive "pid" option makes `nix` commands in CI emit
|
||||
# "GC Warning: Couldn't read /proc/stat"
|
||||
# You may want to set this to "pid" if not using `nix` commands
|
||||
ProcSubset = "all";
|
||||
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
|
||||
# ASLR (address space layout randomization) which requires the
|
||||
# `personality` syscall
|
||||
# You may want to set this to `true` if not using coverage tooling on
|
||||
# compiled code
|
||||
LockPersonality = false;
|
||||
|
||||
# Note that this has some interactions with the User setting; so you may
|
||||
# want to consult the systemd docs if using both.
|
||||
DynamicUser = true;
|
||||
};
|
||||
});
|
||||
|
||||
services.gitea-actions-runner = {
|
||||
package = pkgs.forgejo-actions-runner;
|
||||
instances = lib.genAttrs (builtins.genList (n: "nix${builtins.toString n}") numInstances) (name: {
|
||||
enable = true;
|
||||
name = "nix-runner";
|
||||
# take the git root url from the gitea config
|
||||
# only possible if you've also configured your gitea though the same nix config
|
||||
# otherwise you need to set it manually
|
||||
url = "https://git.aciceri.dev";
|
||||
# use your favourite nix secret manager to get a path for this
|
||||
tokenFile = config.age.secrets.forgejo-runners-token.path;
|
||||
labels = ["nix:docker://gitea-runner-nix"];
|
||||
settings = {
|
||||
container.options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm";
|
||||
# the default network that also respects our dns server settings
|
||||
container.network = "host";
|
||||
container.valid_volumes = [
|
||||
"/nix"
|
||||
"${storeDeps}/bin"
|
||||
"${storeDeps}/etc/ssl"
|
||||
# virtualisation.containers.containersConf.settings = {
|
||||
# # podman seems to not work with systemd-resolved
|
||||
# containers.dns_servers = [ "8.8.8.8" "8.8.4.4" ];
|
||||
# };
|
||||
}
|
||||
{
|
||||
systemd.services =
|
||||
lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") numInstances)
|
||||
(_name: {
|
||||
# TODO: systemd confinment
|
||||
serviceConfig = {
|
||||
# Hardening (may overlap with DynamicUser=)
|
||||
# The following options are only for optimizing output of systemd-analyze
|
||||
AmbientCapabilities = "";
|
||||
CapabilityBoundingSet = "";
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
UMask = "0066";
|
||||
ProtectProc = "invisible";
|
||||
SystemCallFilter = [
|
||||
"~@clock"
|
||||
"~@cpu-emulation"
|
||||
"~@module"
|
||||
"~@mount"
|
||||
"~@obsolete"
|
||||
"~@raw-io"
|
||||
"~@reboot"
|
||||
"~@swap"
|
||||
# needed by go?
|
||||
#"~@resources"
|
||||
"~@privileged"
|
||||
"~capset"
|
||||
"~setdomainname"
|
||||
"~sethostname"
|
||||
];
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
"AF_UNIX"
|
||||
"AF_NETLINK"
|
||||
];
|
||||
|
||||
# Needs network access
|
||||
PrivateNetwork = false;
|
||||
# Cannot be true due to Node
|
||||
MemoryDenyWriteExecute = false;
|
||||
|
||||
# The more restrictive "pid" option makes `nix` commands in CI emit
|
||||
# "GC Warning: Couldn't read /proc/stat"
|
||||
# You may want to set this to "pid" if not using `nix` commands
|
||||
ProcSubset = "all";
|
||||
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
|
||||
# ASLR (address space layout randomization) which requires the
|
||||
# `personality` syscall
|
||||
# You may want to set this to `true` if not using coverage tooling on
|
||||
# compiled code
|
||||
LockPersonality = false;
|
||||
|
||||
# Note that this has some interactions with the User setting; so you may
|
||||
# want to consult the systemd docs if using both.
|
||||
DynamicUser = true;
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
]
|
||||
|
||||
services.gitea-actions-runner = {
|
||||
package = pkgs.forgejo-actions-runner;
|
||||
instances = lib.genAttrs (builtins.genList (n: "nix${builtins.toString n}") numInstances) (name: {
|
||||
enable = true;
|
||||
name = "nix-runner";
|
||||
# take the git root url from the gitea config
|
||||
# only possible if you've also configured your gitea though the same nix config
|
||||
# otherwise you need to set it manually
|
||||
url = "https://git.aciceri.dev";
|
||||
# use your favourite nix secret manager to get a path for this
|
||||
tokenFile = config.age.secrets.forgejo-runners-token.path;
|
||||
labels = [ "nix:docker://gitea-runner-nix" ];
|
||||
settings = {
|
||||
container.options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm";
|
||||
# the default network that also respects our dns server settings
|
||||
container.network = "host";
|
||||
container.valid_volumes = [
|
||||
"/nix"
|
||||
"${storeDeps}/bin"
|
||||
"${storeDeps}/etc/ssl"
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
settings = {
|
||||
|
@ -37,7 +37,7 @@
|
|||
config.services.forgejo.stateDir
|
||||
];
|
||||
|
||||
imports = [../nginx-base];
|
||||
imports = [ ../nginx-base ];
|
||||
|
||||
services.nginx.virtualHosts = {
|
||||
"git.aciceri.dev" = {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
imports = [../pam];
|
||||
imports = [ ../pam ];
|
||||
|
||||
services.fprintd = {
|
||||
enable = false; # temporarily disable
|
||||
|
|
|
@ -4,19 +4,20 @@
|
|||
fleetFlake,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
users.users.garmin-collector = {
|
||||
isSystemUser = true;
|
||||
group = "garmin-collector";
|
||||
extraGroups = ["garmin-collector"];
|
||||
extraGroups = [ "garmin-collector" ];
|
||||
home = "/var/lib/garmin-collector";
|
||||
};
|
||||
|
||||
users.groups.garmin-collector = {};
|
||||
users.groups.garmin-collector = { };
|
||||
|
||||
systemd.services.garmin-collector = {
|
||||
description = "Garmin collector pushing to Prometheus Pushgateway";
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
PUSHGATEWAY_ADDRESS = config.services.prometheus.pushgateway.web.listen-address;
|
||||
};
|
||||
|
@ -32,7 +33,7 @@
|
|||
};
|
||||
|
||||
systemd.timers."garmin-collector" = {
|
||||
wantedBy = ["timers.target"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "5m";
|
||||
OnUnitActiveSec = "4h";
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
cfg = config.services.grafana;
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
sessions = builtins.concatStringsSep ":" [
|
||||
(pkgs.writeTextFile {
|
||||
name = "xorg-session.desktop";
|
||||
|
@ -23,7 +24,8 @@
|
|||
'';
|
||||
})
|
||||
];
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.greetd = {
|
||||
enable = true;
|
||||
vt = 2;
|
||||
|
|
|
@ -2,10 +2,11 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
grocy = super.grocy.overrideAttrs (old: {
|
||||
(_self: super: {
|
||||
grocy = super.grocy.overrideAttrs (_old: {
|
||||
meta.broken = false;
|
||||
version = "4.0.1";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
|
|
|
@ -1,14 +1,17 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
# Creates an user that home assistant can log in as to power off the system
|
||||
users.users.hass = {
|
||||
openssh.authorizedKeys.keys = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFcoVVrMFili8UBjziIu2wyFgcDGTlT1avBh2nLTa9aM"];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFcoVVrMFili8UBjziIu2wyFgcDGTlT1avBh2nLTa9aM"
|
||||
];
|
||||
isNormalUser = true;
|
||||
isSystemUser = false;
|
||||
group = "hass";
|
||||
createHome = false;
|
||||
};
|
||||
|
||||
users.groups.hass = {};
|
||||
users.groups.hass = { };
|
||||
|
||||
security.sudo.extraConfig = ''
|
||||
hass ALL=NOPASSWD:${pkgs.systemd}/bin/systemctl
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.hercules-ci-agent = {
|
||||
enable = true;
|
||||
settings = {
|
||||
|
|
|
@ -2,36 +2,26 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
smartthings-fork = pkgs.fetchFromGitHub {
|
||||
owner = "veista";
|
||||
repo = "smartthings";
|
||||
rev = "ba1a6f33c6ac37d81f4263073571628803e79697";
|
||||
sha256 = "sha256-X3SYkg0B5pzEich7/4iUmlADJneVuT8HTVnIiC7odRE=";
|
||||
};
|
||||
}:
|
||||
let
|
||||
pun_sensor = pkgs.fetchFromGitHub {
|
||||
owner = "virtualdj";
|
||||
repo = "pun_sensor";
|
||||
rev = "51b216fab5c0d454d66060647c36e81bebfaf059";
|
||||
hash = "sha256-bGVJx3bObXdf4AiC6bDvafs53NGS2aufRcTUmXy8nAI=";
|
||||
};
|
||||
cozy_life = pkgs.fetchFromGitHub {
|
||||
owner = "yangqian";
|
||||
repo = "hass-cozylife";
|
||||
rev = "9a40a2fa09b0f74aee0b278e2858f5600b3487a9";
|
||||
hash = "sha256-i+82EUamV1Fhwhb1vhRqn9aA9dJ0FxSSMD734domyhw=";
|
||||
};
|
||||
garmin_connect = pkgs.fetchFromGitHub {
|
||||
owner = "cyberjunky";
|
||||
repo = "home-assistant-garmin_connect";
|
||||
rev = "d42edcabc67ba6a7f960e849c8aaec1aabef87c0";
|
||||
hash = "sha256-KqbP6TpH9B0/AjtsW5TcWSNgUhND+w8rO6X8fHqtsDI=";
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.home-assistant = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
package = pkgs.home-assistant.overrideAttrs (old: {
|
||||
package = pkgs.home-assistant.overrideAttrs (_old: {
|
||||
doInstallCheck = false;
|
||||
# prePatch =
|
||||
# ''
|
||||
|
@ -61,8 +51,8 @@ in {
|
|||
"wake_on_lan"
|
||||
"prometheus"
|
||||
];
|
||||
extraPackages = python3Packages:
|
||||
with python3Packages; [
|
||||
extraPackages =
|
||||
python3Packages: with python3Packages; [
|
||||
# used by pun_sensor
|
||||
holidays
|
||||
beautifulsoup4
|
||||
|
@ -72,10 +62,13 @@ in {
|
|||
tzlocal
|
||||
];
|
||||
config = {
|
||||
default_config = {};
|
||||
default_config = { };
|
||||
http = {
|
||||
use_x_forwarded_for = true;
|
||||
trusted_proxies = ["127.0.0.1" "::1"];
|
||||
trusted_proxies = [
|
||||
"127.0.0.1"
|
||||
"::1"
|
||||
];
|
||||
};
|
||||
# ffmpeg = {};
|
||||
# camera = [
|
||||
|
@ -105,7 +98,7 @@ in {
|
|||
# data.mac = "20:28:bc:74:14:c2";
|
||||
# };
|
||||
# }];
|
||||
wake_on_lan = {};
|
||||
wake_on_lan = { };
|
||||
switch = [
|
||||
{
|
||||
name = "Picard";
|
||||
|
@ -164,7 +157,7 @@ in {
|
|||
containers = {
|
||||
whisper = {
|
||||
image = "rhasspy/wyoming-whisper:latest";
|
||||
ports = ["10300:10300"];
|
||||
ports = [ "10300:10300" ];
|
||||
cmd = [
|
||||
"--model"
|
||||
"medium-int8"
|
||||
|
@ -174,7 +167,7 @@ in {
|
|||
};
|
||||
piper = {
|
||||
image = "rhasspy/wyoming-piper:latest";
|
||||
ports = ["10200:10200"];
|
||||
ports = [ "10200:10200" ];
|
||||
cmd = [
|
||||
"--voice"
|
||||
"it_IT-riccardo-x_low"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
services.my-hydra.repos = {
|
||||
emacs = {};
|
||||
nixfleet = {};
|
||||
trotten = {};
|
||||
blog = {};
|
||||
emacs = { };
|
||||
nixfleet = { };
|
||||
trotten = { };
|
||||
blog = { };
|
||||
};
|
||||
}
|
||||
|
|
|
@ -3,58 +3,63 @@
|
|||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.services.my-hydra;
|
||||
toSpec = {
|
||||
name,
|
||||
owner,
|
||||
...
|
||||
}: let
|
||||
spec = {
|
||||
enabled = 1;
|
||||
hidden = false;
|
||||
description = "Declarative specification jobset automatically generated";
|
||||
checkinterval = 120;
|
||||
schedulingshares = 10000;
|
||||
enableemail = false;
|
||||
emailoverride = "";
|
||||
keepnr = 1;
|
||||
nixexprinput = "src";
|
||||
nixexprpath = "jobsets.nix";
|
||||
inputs = {
|
||||
src = {
|
||||
type = "path";
|
||||
value = pkgs.writeTextFile {
|
||||
name = "src";
|
||||
text = builtins.readFile ./jobsets.nix;
|
||||
destination = "/jobsets.nix";
|
||||
};
|
||||
emailresponsible = false;
|
||||
};
|
||||
repoInfoPath = {
|
||||
type = "path";
|
||||
value = pkgs.writeTextFile {
|
||||
name = "repo";
|
||||
text = builtins.toJSON {
|
||||
inherit name owner;
|
||||
toSpec =
|
||||
{
|
||||
name,
|
||||
owner,
|
||||
...
|
||||
}:
|
||||
let
|
||||
spec = {
|
||||
enabled = 1;
|
||||
hidden = false;
|
||||
description = "Declarative specification jobset automatically generated";
|
||||
checkinterval = 120;
|
||||
schedulingshares = 10000;
|
||||
enableemail = false;
|
||||
emailoverride = "";
|
||||
keepnr = 1;
|
||||
nixexprinput = "src";
|
||||
nixexprpath = "jobsets.nix";
|
||||
inputs = {
|
||||
src = {
|
||||
type = "path";
|
||||
value = pkgs.writeTextFile {
|
||||
name = "src";
|
||||
text = builtins.readFile ./jobsets.nix;
|
||||
destination = "/jobsets.nix";
|
||||
};
|
||||
emailresponsible = false;
|
||||
};
|
||||
repoInfoPath = {
|
||||
type = "path";
|
||||
value = pkgs.writeTextFile {
|
||||
name = "repo";
|
||||
text = builtins.toJSON {
|
||||
inherit name owner;
|
||||
};
|
||||
};
|
||||
emailresponsible = false;
|
||||
};
|
||||
prs = {
|
||||
type = "githubpulls";
|
||||
value = "${owner} ${name}";
|
||||
emailresponsible = false;
|
||||
};
|
||||
emailresponsible = false;
|
||||
};
|
||||
prs = {
|
||||
type = "githubpulls";
|
||||
value = "${owner} ${name}";
|
||||
emailresponsible = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
drv = pkgs.writeTextFile {
|
||||
name = "hydra-jobset-specification-${name}";
|
||||
text = builtins.toJSON spec;
|
||||
destination = "/spec.json";
|
||||
};
|
||||
in "${drv}";
|
||||
in {
|
||||
drv = pkgs.writeTextFile {
|
||||
name = "hydra-jobset-specification-${name}";
|
||||
text = builtins.toJSON spec;
|
||||
destination = "/spec.json";
|
||||
};
|
||||
in
|
||||
"${drv}";
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./config.nix
|
||||
../nginx-base
|
||||
|
@ -66,35 +71,40 @@ in {
|
|||
default = "hydra.aciceri.dev";
|
||||
};
|
||||
repos = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.submodule ({
|
||||
name,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
owner = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "aciceri";
|
||||
};
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.homepage;
|
||||
};
|
||||
homepage = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "https://github.com/${config.owner}/${config.name}";
|
||||
};
|
||||
reportStatus = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{
|
||||
name,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
owner = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "aciceri";
|
||||
};
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.homepage;
|
||||
};
|
||||
homepage = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "https://github.com/${config.owner}/${config.name}";
|
||||
};
|
||||
reportStatus = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -115,28 +125,38 @@ in {
|
|||
include ${config.age.secrets.hydra-github-token.path}
|
||||
</github_authorization>
|
||||
''
|
||||
+ (lib.concatMapStrings (repo:
|
||||
lib.optionalString repo.reportStatus
|
||||
''
|
||||
+ (lib.concatMapStrings (
|
||||
repo:
|
||||
lib.optionalString repo.reportStatus ''
|
||||
<githubstatus>
|
||||
jobs = ${repo.name}.*
|
||||
excludeBuildFromContext = 1
|
||||
useShortContext = 1
|
||||
</githubstatus>
|
||||
'') (builtins.attrValues cfg.repos));
|
||||
''
|
||||
) (builtins.attrValues cfg.repos));
|
||||
};
|
||||
|
||||
systemd.services.hydra-setup = {
|
||||
description = "Hydra CI setup";
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.RemainAfterExit = true;
|
||||
wantedBy = ["multi-user.target"];
|
||||
requires = ["hydra-init.service"];
|
||||
after = ["hydra-init.service"];
|
||||
environment = builtins.removeAttrs (config.systemd.services.hydra-init.environment) ["PATH"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = builtins.removeAttrs (config.systemd.services.hydra-init.environment) [ "PATH" ];
|
||||
script =
|
||||
''
|
||||
PATH=$PATH:${lib.makeBinPath (with pkgs; [yq-go curl config.services.hydra.package])}
|
||||
PATH=$PATH:${
|
||||
lib.makeBinPath (
|
||||
with pkgs;
|
||||
[
|
||||
yq-go
|
||||
curl
|
||||
config.services.hydra.package
|
||||
]
|
||||
)
|
||||
}
|
||||
PASSWORD="$(cat ${config.age.secrets.hydra-admin-password.path})"
|
||||
if [ ! -e ~hydra/.setup-is-complete ]; then
|
||||
hydra-create-user admin \
|
||||
|
|
|
@ -2,42 +2,45 @@
|
|||
repoInfoPath,
|
||||
prs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
minutes = 60;
|
||||
hours = 60 * minutes;
|
||||
days = 24 * hours;
|
||||
filterAttrs = pred: set:
|
||||
builtins.listToAttrs (builtins.concatMap (name: let
|
||||
v = set.${name};
|
||||
mapAttrs' = f: set: builtins.listToAttrs (map (attr: f attr set.${attr}) (builtins.attrNames set));
|
||||
|
||||
mkJobset =
|
||||
{
|
||||
enabled ? 1,
|
||||
hidden ? false,
|
||||
type ? 1,
|
||||
description ? "",
|
||||
checkinterval ? 5 * minutes,
|
||||
schedulingshares ? 100,
|
||||
enableemail ? false,
|
||||
emailoverride ? "",
|
||||
keepnr ? 1,
|
||||
flake,
|
||||
}:
|
||||
{
|
||||
inherit
|
||||
enabled
|
||||
hidden
|
||||
type
|
||||
description
|
||||
checkinterval
|
||||
schedulingshares
|
||||
enableemail
|
||||
emailoverride
|
||||
keepnr
|
||||
flake
|
||||
;
|
||||
};
|
||||
|
||||
mkSpec =
|
||||
contents:
|
||||
let
|
||||
escape = builtins.replaceStrings [ ''"'' ] [ ''\"'' ];
|
||||
contentsJson = builtins.toJSON contents;
|
||||
in
|
||||
if pred name v
|
||||
then [
|
||||
{
|
||||
inherit name;
|
||||
value = v;
|
||||
}
|
||||
]
|
||||
else []) (builtins.attrNames set));
|
||||
mapAttrs' = f: set:
|
||||
builtins.listToAttrs (map (attr: f attr set.${attr}) (builtins.attrNames set));
|
||||
|
||||
mkJobset = {
|
||||
enabled ? 1,
|
||||
hidden ? false,
|
||||
type ? 1,
|
||||
description ? "",
|
||||
checkinterval ? 5 * minutes,
|
||||
schedulingshares ? 100,
|
||||
enableemail ? false,
|
||||
emailoverride ? "",
|
||||
keepnr ? 1,
|
||||
flake,
|
||||
} @ args: {inherit enabled hidden type description checkinterval schedulingshares enableemail emailoverride keepnr flake;};
|
||||
|
||||
mkSpec = contents: let
|
||||
escape = builtins.replaceStrings [''"''] [''\"''];
|
||||
contentsJson = builtins.toJSON contents;
|
||||
in
|
||||
builtins.derivation {
|
||||
name = "spec.json";
|
||||
system = "x86_64-linux";
|
||||
|
@ -54,20 +57,21 @@
|
|||
repo = builtins.fromJSON (builtins.readFile repoInfoPath);
|
||||
|
||||
pullRequests = builtins.fromJSON (builtins.readFile prs);
|
||||
pullRequestsToBuild = filterAttrs (n: pr: pr.head.repo != null && pr.head.repo.owner.login == repo.owner && pr.head.repo.name == repo.name) pullRequests;
|
||||
in {
|
||||
jobsets = mkSpec ({
|
||||
in
|
||||
{
|
||||
jobsets = mkSpec (
|
||||
{
|
||||
master = mkJobset {
|
||||
description = "${repo.name}'s master branch";
|
||||
flake = "git+ssh://git@github.com/${repo.owner}/${repo.name}?ref=master";
|
||||
};
|
||||
}
|
||||
// (mapAttrs' (n: pr: {
|
||||
name = "pullRequest_${n}";
|
||||
value = mkJobset {
|
||||
description = pr.title;
|
||||
flake = "git+ssh://git@github.com/${repo.owner}/${repo.name}?ref=${pr.head.ref}";
|
||||
};
|
||||
})
|
||||
pullRequests));
|
||||
name = "pullRequest_${n}";
|
||||
value = mkJobset {
|
||||
description = pr.title;
|
||||
flake = "git+ssh://git@github.com/${repo.owner}/${repo.name}?ref=${pr.head.ref}";
|
||||
};
|
||||
}) pullRequests)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
nixpkgsImmich = builtins.getFlake "github:NixOS/nixpkgs/c0ee4c1770aa1ef998c977c4cc653a07ec95d9bf";
|
||||
in {
|
||||
in
|
||||
{
|
||||
containers.nextcloud = {
|
||||
nixpkgs = nixpkgsImmich;
|
||||
autoStart = true;
|
||||
|
@ -9,15 +11,14 @@ in {
|
|||
# localAddress = "192.168.100.11";
|
||||
# hostAddress6 = "fc00::1";
|
||||
# localAddress6 = "fc00::2";
|
||||
config = {
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
services.immich = {
|
||||
enable = true;
|
||||
config =
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.immich = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
inherit
|
||||
(lib)
|
||||
}:
|
||||
let
|
||||
inherit (lib)
|
||||
hasAttr
|
||||
hasPrefix
|
||||
maintainers
|
||||
|
@ -31,17 +31,18 @@
|
|||
|
||||
isServerPostgresUnix = hasPrefix "/" serverCfg.postgres.host;
|
||||
postgresEnv =
|
||||
if isServerPostgresUnix
|
||||
then {
|
||||
# If passwordFile is given, this will be overwritten in ExecStart
|
||||
DB_URL = "socket://${serverCfg.postgres.host}?dbname=${serverCfg.postgres.database}";
|
||||
}
|
||||
else {
|
||||
DB_HOSTNAME = serverCfg.postgres.host;
|
||||
DB_PORT = toString serverCfg.postgres.port;
|
||||
DB_DATABASE_NAME = serverCfg.postgres.database;
|
||||
DB_USERNAME = serverCfg.postgres.username;
|
||||
};
|
||||
if isServerPostgresUnix then
|
||||
{
|
||||
# If passwordFile is given, this will be overwritten in ExecStart
|
||||
DB_URL = "socket://${serverCfg.postgres.host}?dbname=${serverCfg.postgres.database}";
|
||||
}
|
||||
else
|
||||
{
|
||||
DB_HOSTNAME = serverCfg.postgres.host;
|
||||
DB_PORT = toString serverCfg.postgres.port;
|
||||
DB_DATABASE_NAME = serverCfg.postgres.database;
|
||||
DB_USERNAME = serverCfg.postgres.username;
|
||||
};
|
||||
|
||||
typesenseEnv =
|
||||
{
|
||||
|
@ -54,7 +55,8 @@
|
|||
};
|
||||
|
||||
# Don't start a redis instance if the user sets a custom redis connection
|
||||
enableRedis = !hasAttr "REDIS_URL" serverCfg.extraConfig && !hasAttr "REDIS_SOCKET" serverCfg.extraConfig;
|
||||
enableRedis =
|
||||
!hasAttr "REDIS_URL" serverCfg.extraConfig && !hasAttr "REDIS_SOCKET" serverCfg.extraConfig;
|
||||
redisServerCfg = config.services.redis.servers.immich;
|
||||
redisEnv = optionalAttrs enableRedis {
|
||||
REDIS_SOCKET = redisServerCfg.unixSocket;
|
||||
|
@ -69,9 +71,7 @@
|
|||
|
||||
IMMICH_MEDIA_LOCATION = serverCfg.mediaDir;
|
||||
IMMICH_MACHINE_LEARNING_URL =
|
||||
if serverCfg.machineLearningUrl != null
|
||||
then serverCfg.machineLearningUrl
|
||||
else "false";
|
||||
if serverCfg.machineLearningUrl != null then serverCfg.machineLearningUrl else "false";
|
||||
};
|
||||
|
||||
serverStartWrapper = program: ''
|
||||
|
@ -79,9 +79,10 @@
|
|||
mkdir -p ${serverCfg.mediaDir}
|
||||
|
||||
${optionalString (serverCfg.postgres.passwordFile != null) (
|
||||
if isServerPostgresUnix
|
||||
then ''export DB_URL="socket://${serverCfg.postgres.username}:$(cat ${serverCfg.postgres.passwordFile})@${serverCfg.postgres.host}?dbname=${serverCfg.postgres.database}"''
|
||||
else "export DB_PASSWORD=$(cat ${serverCfg.postgres.passwordFile})"
|
||||
if isServerPostgresUnix then
|
||||
''export DB_URL="socket://${serverCfg.postgres.username}:$(cat ${serverCfg.postgres.passwordFile})@${serverCfg.postgres.host}?dbname=${serverCfg.postgres.database}"''
|
||||
else
|
||||
"export DB_PASSWORD=$(cat ${serverCfg.postgres.passwordFile})"
|
||||
)}
|
||||
|
||||
${optionalString serverCfg.typesense.enable ''
|
||||
|
@ -146,30 +147,27 @@
|
|||
EnvironmentFile = mkIf (serverCfg.environmentFile != null) serverCfg.environmentFile;
|
||||
|
||||
TemporaryFileSystem = "/:ro";
|
||||
BindReadOnlyPaths =
|
||||
[
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
"-/run/postgresql"
|
||||
]
|
||||
++ optional enableRedis redisServerCfg.unixSocket;
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
"-/run/postgresql"
|
||||
] ++ optional enableRedis redisServerCfg.unixSocket;
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.immich = {
|
||||
enable =
|
||||
mkEnableOption "immich"
|
||||
// {
|
||||
description = ''
|
||||
Enables immich which consists of a backend server, microservices,
|
||||
machine-learning and web ui. You can disable or reconfigure components
|
||||
individually using the subsections.
|
||||
'';
|
||||
};
|
||||
enable = mkEnableOption "immich" // {
|
||||
description = ''
|
||||
Enables immich which consists of a backend server, microservices,
|
||||
machine-learning and web ui. You can disable or reconfigure components
|
||||
individually using the subsections.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkPackageOption pkgs "immich" {};
|
||||
package = mkPackageOption pkgs "immich" { };
|
||||
|
||||
server = {
|
||||
mediaDir = mkOption {
|
||||
|
@ -179,11 +177,9 @@ in {
|
|||
};
|
||||
|
||||
backend = {
|
||||
enable =
|
||||
mkEnableOption "immich backend server"
|
||||
// {
|
||||
default = true;
|
||||
};
|
||||
enable = mkEnableOption "immich backend server" // {
|
||||
default = true;
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 3001;
|
||||
|
@ -198,7 +194,7 @@ in {
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
LOG_LEVEL = "debug";
|
||||
};
|
||||
|
@ -220,11 +216,9 @@ in {
|
|||
};
|
||||
|
||||
microservices = {
|
||||
enable =
|
||||
mkEnableOption "immich microservices"
|
||||
// {
|
||||
default = true;
|
||||
};
|
||||
enable = mkEnableOption "immich microservices" // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
|
@ -240,7 +234,7 @@ in {
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
REVERSE_GEOCODING_PRECISION = 1;
|
||||
};
|
||||
|
@ -262,11 +256,9 @@ in {
|
|||
};
|
||||
|
||||
typesense = {
|
||||
enable =
|
||||
mkEnableOption "typesense"
|
||||
// {
|
||||
default = true;
|
||||
};
|
||||
enable = mkEnableOption "typesense" // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
|
@ -343,7 +335,7 @@ in {
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
REDIS_SOCKET = "/run/custom-redis";
|
||||
};
|
||||
|
@ -365,11 +357,9 @@ in {
|
|||
};
|
||||
|
||||
web = {
|
||||
enable =
|
||||
mkEnableOption "immich web frontend"
|
||||
// {
|
||||
default = true;
|
||||
};
|
||||
enable = mkEnableOption "immich web frontend" // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
|
@ -398,7 +388,7 @@ in {
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
PUBLIC_LOGIN_PAGE_MESSAGE = "My awesome Immich instance!";
|
||||
};
|
||||
|
@ -410,11 +400,9 @@ in {
|
|||
};
|
||||
|
||||
machineLearning = {
|
||||
enable =
|
||||
mkEnableOption "immich machine-learning server"
|
||||
// {
|
||||
default = true;
|
||||
};
|
||||
enable = mkEnableOption "immich machine-learning server" // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
|
@ -430,7 +418,7 @@ in {
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
example = {
|
||||
MACHINE_LEARNING_MODEL_TTL = 600;
|
||||
};
|
||||
|
@ -451,10 +439,10 @@ in {
|
|||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = mkMerge [
|
||||
(mkIf (backendCfg.enable && backendCfg.openFirewall) [backendCfg.port])
|
||||
(mkIf (microservicesCfg.enable && microservicesCfg.openFirewall) [microservicesCfg.port])
|
||||
(mkIf (webCfg.enable && webCfg.openFirewall) [webCfg.port])
|
||||
(mkIf (mlCfg.enable && mlCfg.openFirewall) [mlCfg.port])
|
||||
(mkIf (backendCfg.enable && backendCfg.openFirewall) [ backendCfg.port ])
|
||||
(mkIf (microservicesCfg.enable && microservicesCfg.openFirewall) [ microservicesCfg.port ])
|
||||
(mkIf (webCfg.enable && webCfg.openFirewall) [ webCfg.port ])
|
||||
(mkIf (mlCfg.enable && mlCfg.openFirewall) [ mlCfg.port ])
|
||||
];
|
||||
|
||||
services.redis.servers.immich.enable = mkIf enableRedis true;
|
||||
|
@ -462,15 +450,13 @@ in {
|
|||
|
||||
systemd.services.immich-server = mkIf backendCfg.enable {
|
||||
description = "Immich backend server (Self-hosted photo and video backup solution)";
|
||||
after =
|
||||
[
|
||||
"network.target"
|
||||
"typesense.service"
|
||||
"postgresql.service"
|
||||
"immich-machine-learning.service"
|
||||
]
|
||||
++ optional enableRedis "redis-immich.service";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = [
|
||||
"network.target"
|
||||
"typesense.service"
|
||||
"postgresql.service"
|
||||
"immich-machine-learning.service"
|
||||
] ++ optional enableRedis "redis-immich.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment =
|
||||
serverEnv
|
||||
|
@ -491,15 +477,13 @@ in {
|
|||
|
||||
systemd.services.immich-microservices = mkIf microservicesCfg.enable {
|
||||
description = "Immich microservices (Self-hosted photo and video backup solution)";
|
||||
after =
|
||||
[
|
||||
"network.target"
|
||||
"typesense.service"
|
||||
"postgresql.service"
|
||||
"immich-machine-learning.service"
|
||||
]
|
||||
++ optional enableRedis "redis-immich.service";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = [
|
||||
"network.target"
|
||||
"typesense.service"
|
||||
"postgresql.service"
|
||||
"immich-machine-learning.service"
|
||||
] ++ optional enableRedis "redis-immich.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment =
|
||||
serverEnv
|
||||
|
@ -524,16 +508,14 @@ in {
|
|||
"network.target"
|
||||
"immich-server.service"
|
||||
];
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment =
|
||||
{
|
||||
NODE_ENV = "production";
|
||||
PORT = toString webCfg.port;
|
||||
IMMICH_SERVER_URL = webCfg.serverUrl;
|
||||
IMMICH_API_URL_EXTERNAL = webCfg.apiUrlExternal;
|
||||
}
|
||||
// mapAttrs (_: toString) webCfg.extraConfig;
|
||||
environment = {
|
||||
NODE_ENV = "production";
|
||||
PORT = toString webCfg.port;
|
||||
IMMICH_SERVER_URL = webCfg.serverUrl;
|
||||
IMMICH_API_URL_EXTERNAL = webCfg.apiUrlExternal;
|
||||
} // mapAttrs (_: toString) webCfg.extraConfig;
|
||||
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
|
@ -541,68 +523,62 @@ in {
|
|||
export PUBLIC_IMMICH_API_URL_EXTERNAL=$IMMICH_API_URL_EXTERNAL
|
||||
exec ${cfg.package.web}/bin/web
|
||||
'';
|
||||
serviceConfig =
|
||||
commonServiceConfig
|
||||
// {
|
||||
DynamicUser = true;
|
||||
User = "immich-web";
|
||||
Group = "immich-web";
|
||||
serviceConfig = commonServiceConfig // {
|
||||
DynamicUser = true;
|
||||
User = "immich-web";
|
||||
Group = "immich-web";
|
||||
|
||||
MemoryDenyWriteExecute = false; # nodejs requires this.
|
||||
MemoryDenyWriteExecute = false; # nodejs requires this.
|
||||
|
||||
TemporaryFileSystem = "/:ro";
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
};
|
||||
TemporaryFileSystem = "/:ro";
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.immich-machine-learning = mkIf mlCfg.enable {
|
||||
description = "Immich machine learning (Self-hosted photo and video backup solution)";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment =
|
||||
{
|
||||
NODE_ENV = "production";
|
||||
MACHINE_LEARNING_PORT = toString mlCfg.port;
|
||||
environment = {
|
||||
NODE_ENV = "production";
|
||||
MACHINE_LEARNING_PORT = toString mlCfg.port;
|
||||
|
||||
MACHINE_LEARNING_CACHE_FOLDER = "/var/cache/immich-ml";
|
||||
TRANSFORMERS_CACHE = "/var/cache/immich-ml";
|
||||
}
|
||||
// mapAttrs (_: toString) mlCfg.extraConfig;
|
||||
MACHINE_LEARNING_CACHE_FOLDER = "/var/cache/immich-ml";
|
||||
TRANSFORMERS_CACHE = "/var/cache/immich-ml";
|
||||
} // mapAttrs (_: toString) mlCfg.extraConfig;
|
||||
|
||||
serviceConfig =
|
||||
commonServiceConfig
|
||||
// {
|
||||
ExecStart = "${cfg.package.machine-learning}/bin/machine-learning";
|
||||
DynamicUser = true;
|
||||
User = "immich-ml";
|
||||
Group = "immich-ml";
|
||||
serviceConfig = commonServiceConfig // {
|
||||
ExecStart = "${cfg.package.machine-learning}/bin/machine-learning";
|
||||
DynamicUser = true;
|
||||
User = "immich-ml";
|
||||
Group = "immich-ml";
|
||||
|
||||
MemoryDenyWriteExecute = false; # onnxruntime_pybind11 requires this.
|
||||
ProcSubset = "all"; # Needs /proc/cpuinfo
|
||||
MemoryDenyWriteExecute = false; # onnxruntime_pybind11 requires this.
|
||||
ProcSubset = "all"; # Needs /proc/cpuinfo
|
||||
|
||||
CacheDirectory = "immich-ml";
|
||||
CacheDirectoryMode = "0700";
|
||||
CacheDirectory = "immich-ml";
|
||||
CacheDirectoryMode = "0700";
|
||||
|
||||
# TODO gpu access
|
||||
# TODO gpu access
|
||||
|
||||
TemporaryFileSystem = "/:ro";
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
};
|
||||
TemporaryFileSystem = "/:ro";
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [oddlama];
|
||||
meta.maintainers = with maintainers; [ oddlama ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -3,5 +3,5 @@
|
|||
enable = true;
|
||||
};
|
||||
|
||||
users.users.jellyfin.extraGroups = ["transmission"];
|
||||
users.users.jellyfin.extraGroups = [ "transmission" ];
|
||||
}
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
desktopManager.kodi = {
|
||||
enable = true;
|
||||
package = pkgs.kodi.withPackages (ps:
|
||||
with ps; [
|
||||
package = pkgs.kodi.withPackages (
|
||||
ps: with ps; [
|
||||
joystick
|
||||
youtube
|
||||
libretro
|
||||
libretro-mgba
|
||||
]);
|
||||
]
|
||||
);
|
||||
};
|
||||
displayManager.autoLogin = {
|
||||
enable = true;
|
||||
|
@ -28,8 +28,8 @@
|
|||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [8080];
|
||||
allowedUDPPorts = [8080];
|
||||
allowedTCPPorts = [ 8080 ];
|
||||
allowedUDPPorts = [ 8080 ];
|
||||
};
|
||||
|
||||
# environment.systemPackages = with pkgs; [xboxdrv cifs-utils];
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
cfg = config.services.loki;
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.loki = {
|
||||
enable = true;
|
||||
configuration = {
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{fleetFlake, ...}: {
|
||||
{ fleetFlake, ... }:
|
||||
{
|
||||
services.macos-ventura = {
|
||||
enable = true;
|
||||
cores = 8;
|
||||
threads = 8;
|
||||
mem = "8G";
|
||||
vncListenAddr = "0.0.0.0";
|
||||
extraQemuFlags = ["-nographic"];
|
||||
extraQemuFlags = [ "-nographic" ];
|
||||
sshPort = 2021;
|
||||
installNix = true;
|
||||
stateless = true;
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
fleetHmModules,
|
||||
fleetFlake,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mara = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
|
@ -14,12 +15,15 @@
|
|||
|
||||
modules = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = ["shell" "git"];
|
||||
default = [
|
||||
"shell"
|
||||
"git"
|
||||
];
|
||||
};
|
||||
|
||||
packages = lib.mkOption {
|
||||
type = with lib.types; listOf package;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
autologin = lib.mkOption {
|
||||
|
@ -39,7 +43,12 @@
|
|||
|
||||
extraGroups = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = ["wheel" "fuse" "networkmanager" "dialout"];
|
||||
default = [
|
||||
"wheel"
|
||||
"fuse"
|
||||
"networkmanager"
|
||||
"dialout"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -54,10 +63,7 @@
|
|||
openssh.authorizedKeys.keys = config.mara.authorizedKeys;
|
||||
};
|
||||
|
||||
services.getty.autologinUser =
|
||||
if config.mara.autologin
|
||||
then "mara"
|
||||
else null;
|
||||
services.getty.autologinUser = if config.mara.autologin then "mara" else null;
|
||||
|
||||
home-manager.useGlobalPkgs = true;
|
||||
home-manager.useUserPackages = true;
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
clientConfig = {
|
||||
"m.homeserver".base_url = "https://matrix.aciceri.dev";
|
||||
"org.matrix.msc3575.proxy".url = "https://syncv3.matrix.aciceri.dev";
|
||||
|
@ -14,8 +15,9 @@
|
|||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '${builtins.toJSON data}';
|
||||
'';
|
||||
in {
|
||||
imports = [../nginx-base];
|
||||
in
|
||||
{
|
||||
imports = [ ../nginx-base ];
|
||||
|
||||
services.nginx.virtualHosts = {
|
||||
"aciceri.dev" = {
|
||||
|
@ -58,20 +60,23 @@ in {
|
|||
listeners = [
|
||||
{
|
||||
port = 8008;
|
||||
bind_addresses = ["127.0.0.1"];
|
||||
bind_addresses = [ "127.0.0.1" ];
|
||||
type = "http";
|
||||
tls = false;
|
||||
x_forwarded = true;
|
||||
resources = [
|
||||
{
|
||||
names = ["client" "federation"];
|
||||
names = [
|
||||
"client"
|
||||
"federation"
|
||||
];
|
||||
compress = true;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
extraConfigFiles = [config.age.secrets.matrix-registration-shared-secret.path];
|
||||
extraConfigFiles = [ config.age.secrets.matrix-registration-shared-secret.path ];
|
||||
};
|
||||
|
||||
backup.paths = [
|
||||
|
@ -81,7 +86,7 @@ in {
|
|||
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = ["matrix-synapse"];
|
||||
databases = [ "matrix-synapse" ];
|
||||
};
|
||||
|
||||
services.matrix-sliding-sync = {
|
||||
|
|
|
@ -11,5 +11,5 @@
|
|||
];
|
||||
};
|
||||
|
||||
users.users.mediatomb.extraGroups = ["transmission"];
|
||||
users.users.mediatomb.extraGroups = [ "transmission" ];
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.minidlna = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
@ -11,6 +12,6 @@
|
|||
};
|
||||
};
|
||||
|
||||
ccr.extraGroups = ["minidlna"];
|
||||
users.users.minidlna.extraGroups = ["transmission"];
|
||||
ccr.extraGroups = [ "minidlna" ];
|
||||
users.users.minidlna.extraGroups = [ "transmission" ];
|
||||
}
|
||||
|
|
|
@ -2,14 +2,15 @@
|
|||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
imports = [../nginx-base];
|
||||
}:
|
||||
{
|
||||
imports = [ ../nginx-base ];
|
||||
|
||||
services.minio = {
|
||||
enable = true;
|
||||
rootCredentialsFile = config.age.secrets.minio-credentials.path;
|
||||
region = "eu-south-1";
|
||||
dataDir = lib.mkForce ["/mnt/hd/minio"];
|
||||
dataDir = lib.mkForce [ "/mnt/hd/minio" ];
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."cache.aciceri.dev" = {
|
||||
|
@ -25,8 +26,7 @@
|
|||
'';
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:9000";
|
||||
extraConfig = ''
|
||||
'';
|
||||
extraConfig = '''';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
{config, ...}: {
|
||||
imports = [../nginx-base];
|
||||
{ ... }:
|
||||
{
|
||||
imports = [ ../nginx-base ];
|
||||
services.nginx.virtualHosts = {
|
||||
"home.aciceri.dev" = {
|
||||
forceSSL = true;
|
||||
|
|
|
@ -2,15 +2,20 @@
|
|||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
fileSystems."/home/${config.ccr.username}/torrent" = {
|
||||
device = "//sisko.fleet/torrent";
|
||||
fsType = "cifs";
|
||||
options = let
|
||||
credentials = pkgs.writeText "credentials" ''
|
||||
username=guest
|
||||
password=
|
||||
'';
|
||||
in ["credentials=${credentials},x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s"];
|
||||
options =
|
||||
let
|
||||
credentials = pkgs.writeText "credentials" ''
|
||||
username=guest
|
||||
password=
|
||||
'';
|
||||
in
|
||||
[
|
||||
"credentials=${credentials},x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
{lib, ...}: {
|
||||
{ lib, ... }:
|
||||
{
|
||||
networking.networkmanager.enable = true;
|
||||
ccr.extraGroups = ["networkmanager"];
|
||||
ccr.extraGroups = [ "networkmanager" ];
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
}
|
||||
|
|
|
@ -2,14 +2,13 @@
|
|||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
cfg = config.services.nextcloud;
|
||||
in {
|
||||
}:
|
||||
{
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /mnt/raid/nextcloud 770 nextcloud nextcloud"
|
||||
];
|
||||
|
||||
ccr.extraGroups = ["nextcloud"];
|
||||
ccr.extraGroups = [ "nextcloud" ];
|
||||
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
|
@ -23,5 +22,5 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [80];
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
}
|
||||
|
|
|
@ -2,10 +2,12 @@
|
|||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.services.my-nix-serve;
|
||||
in {
|
||||
imports = [../nginx-base];
|
||||
in
|
||||
{
|
||||
imports = [ ../nginx-base ];
|
||||
options.services.my-nix-serve = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
config,
|
||||
lib,
|
||||
fleetFlake,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
nix = {
|
||||
optimise.automatic = true;
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
|||
# deprecated-features = [ "url-literals" ];
|
||||
};
|
||||
|
||||
nixPath = ["nixpkgs=${fleetFlake.inputs.nixpkgs}"];
|
||||
nixPath = [ "nixpkgs=${fleetFlake.inputs.nixpkgs}" ];
|
||||
|
||||
extraOptions = ''
|
||||
experimental-features = nix-command flakes impure-derivations
|
||||
|
@ -48,7 +48,8 @@
|
|||
options = "--delete-older-than 180d";
|
||||
};
|
||||
|
||||
registry = lib.mkForce ({
|
||||
registry = lib.mkForce (
|
||||
{
|
||||
nixpkgs.to = {
|
||||
type = "path";
|
||||
path = fleetFlake.inputs.nixpkgs;
|
||||
|
@ -71,7 +72,8 @@
|
|||
type = "path";
|
||||
path = "/home/${config.ccr.username}/.config/emacs";
|
||||
};
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
distributedBuilds = true;
|
||||
buildMachines =
|
||||
|
@ -79,7 +81,12 @@
|
|||
hostName = "sisko.fleet";
|
||||
system = "aarch64-linux";
|
||||
maxJobs = 7;
|
||||
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
|
||||
supportedFeatures = [
|
||||
"kvm"
|
||||
"nixos-test"
|
||||
"big-parallel"
|
||||
"benchmark"
|
||||
];
|
||||
protocol = "ssh-ng";
|
||||
sshUser = "root";
|
||||
sshKey = "/home/${config.ccr.username}/.ssh/id_rsa";
|
||||
|
@ -88,7 +95,12 @@
|
|||
hostName = "mac.staging.mlabs.city?remote-program=/run/current-system/sw/bin/nix-store";
|
||||
system = "x86_64-darwin";
|
||||
maxJobs = 4;
|
||||
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
|
||||
supportedFeatures = [
|
||||
"kvm"
|
||||
"nixos-test"
|
||||
"big-parallel"
|
||||
"benchmark"
|
||||
];
|
||||
protocol = "ssh";
|
||||
sshUser = "root";
|
||||
sshKey = "/home/${config.ccr.username}/.ssh/id_rsa";
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
{...}: {
|
||||
{ ... }:
|
||||
{
|
||||
networking.firewall.interfaces."wg0" = {
|
||||
allowedTCPPorts = [
|
||||
35901
|
||||
];
|
||||
};
|
||||
imports = [../nginx-base];
|
||||
imports = [ ../nginx-base ];
|
||||
services.nginx.virtualHosts = {
|
||||
"roam.aciceri.dev" = {
|
||||
forceSSL = true;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
address = "0.0.0.0";
|
||||
|
@ -7,7 +8,10 @@
|
|||
consumptionDir = "/mnt/hd/paperless/consume";
|
||||
settings = {
|
||||
PAPERLESS_OCR_LANGUAGE = "ita+eng";
|
||||
PAPERLESS_CONSUMER_IGNORE_PATTERN = builtins.toJSON [".DS_STORE/*" "desktop.ini"];
|
||||
PAPERLESS_CONSUMER_IGNORE_PATTERN = builtins.toJSON [
|
||||
".DS_STORE/*"
|
||||
"desktop.ini"
|
||||
];
|
||||
PAPERLESS_OCR_USER_ARGS = builtins.toJSON {
|
||||
optimize = 1;
|
||||
pdfa_image_compression = "lossless";
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{lib, ...}: {
|
||||
{ lib, ... }:
|
||||
{
|
||||
services.pipewire.enable = true;
|
||||
|
||||
hardware.pulseaudio = {
|
||||
|
|
|
@ -9,5 +9,5 @@
|
|||
"d /mnt/raid/plex 770 plex plex"
|
||||
];
|
||||
|
||||
users.users.plex.extraGroups = ["transmission"];
|
||||
users.users.plex.extraGroups = [ "transmission" ];
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.avahi = {
|
||||
enable = true;
|
||||
# Important to resolve .local domains of printers, otherwise you get an error
|
||||
|
@ -11,7 +12,7 @@
|
|||
services.printing = {
|
||||
enable = true;
|
||||
drivers = [
|
||||
(pkgs.callPackage ./driver.nix {})
|
||||
(pkgs.callPackage ./driver.nix { })
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -12,8 +12,13 @@
|
|||
coreutils,
|
||||
gnugrep,
|
||||
which,
|
||||
}: let
|
||||
arches = ["x86_64" "i686" "armv7l"];
|
||||
}:
|
||||
let
|
||||
arches = [
|
||||
"x86_64"
|
||||
"i686"
|
||||
"armv7l"
|
||||
];
|
||||
|
||||
runtimeDeps = [
|
||||
ghostscript
|
||||
|
@ -24,63 +29,67 @@
|
|||
which
|
||||
];
|
||||
in
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cups-brother-mfcl2710dw";
|
||||
version = "4.0.0-1";
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cups-brother-mfcl2710dw";
|
||||
version = "4.0.0-1";
|
||||
|
||||
nativeBuildInputs = [dpkg makeWrapper autoPatchelfHook];
|
||||
buildInputs = [perl];
|
||||
nativeBuildInputs = [
|
||||
dpkg
|
||||
makeWrapper
|
||||
autoPatchelfHook
|
||||
];
|
||||
buildInputs = [ perl ];
|
||||
|
||||
dontUnpack = true;
|
||||
dontUnpack = true;
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://download.brother.com/welcome/dlf103526/mfcl2710dwpdrv-${version}.i386.deb";
|
||||
hash = "sha256-OOTvbCuyxw4k01CTMuBqG2boMN13q5xC7LacaweGmyw=";
|
||||
};
|
||||
src = fetchurl {
|
||||
url = "https://download.brother.com/welcome/dlf103526/mfcl2710dwpdrv-${version}.i386.deb";
|
||||
hash = "sha256-OOTvbCuyxw4k01CTMuBqG2boMN13q5xC7LacaweGmyw=";
|
||||
};
|
||||
|
||||
installPhase =
|
||||
''
|
||||
runHook preInstall
|
||||
installPhase =
|
||||
''
|
||||
runHook preInstall
|
||||
|
||||
mkdir -p $out
|
||||
dpkg-deb -x $src $out
|
||||
mkdir -p $out
|
||||
dpkg-deb -x $src $out
|
||||
|
||||
# delete unnecessary files for the current architecture
|
||||
''
|
||||
+ lib.concatMapStrings (arch: ''
|
||||
echo Deleting files for ${arch}
|
||||
rm -r "$out/opt/brother/Printers/MFCL2710DW/lpd/${arch}"
|
||||
'') (builtins.filter (arch: arch != stdenv.hostPlatform.linuxArch) arches)
|
||||
+ ''
|
||||
# delete unnecessary files for the current architecture
|
||||
''
|
||||
+ lib.concatMapStrings (arch: ''
|
||||
echo Deleting files for ${arch}
|
||||
rm -r "$out/opt/brother/Printers/MFCL2710DW/lpd/${arch}"
|
||||
'') (builtins.filter (arch: arch != stdenv.hostPlatform.linuxArch) arches)
|
||||
+ ''
|
||||
|
||||
# bundled scripts don't understand the arch subdirectories for some reason
|
||||
ln -s \
|
||||
"$out/opt/brother/Printers/MFCL2710DW/lpd/${stdenv.hostPlatform.linuxArch}/"* \
|
||||
"$out/opt/brother/Printers/MFCL2710DW/lpd/"
|
||||
# bundled scripts don't understand the arch subdirectories for some reason
|
||||
ln -s \
|
||||
"$out/opt/brother/Printers/MFCL2710DW/lpd/${stdenv.hostPlatform.linuxArch}/"* \
|
||||
"$out/opt/brother/Printers/MFCL2710DW/lpd/"
|
||||
|
||||
# Fix global references and replace auto discovery mechanism with hardcoded values
|
||||
substituteInPlace $out/opt/brother/Printers/MFCL2710DW/lpd/lpdfilter \
|
||||
--replace /opt "$out/opt" \
|
||||
--replace "my \$BR_PRT_PATH =" "my \$BR_PRT_PATH = \"$out/opt/brother/Printers/MFCL2710DW\"; #" \
|
||||
--replace "PRINTER =~" "PRINTER = \"MFCL2710DW\"; #"
|
||||
# Fix global references and replace auto discovery mechanism with hardcoded values
|
||||
substituteInPlace $out/opt/brother/Printers/MFCL2710DW/lpd/lpdfilter \
|
||||
--replace /opt "$out/opt" \
|
||||
--replace "my \$BR_PRT_PATH =" "my \$BR_PRT_PATH = \"$out/opt/brother/Printers/MFCL2710DW\"; #" \
|
||||
--replace "PRINTER =~" "PRINTER = \"MFCL2710DW\"; #"
|
||||
|
||||
# Make sure all executables have the necessary runtime dependencies available
|
||||
find "$out" -executable -and -type f | while read file; do
|
||||
wrapProgram "$file" --prefix PATH : "${lib.makeBinPath runtimeDeps}"
|
||||
done
|
||||
# Make sure all executables have the necessary runtime dependencies available
|
||||
find "$out" -executable -and -type f | while read file; do
|
||||
wrapProgram "$file" --prefix PATH : "${lib.makeBinPath runtimeDeps}"
|
||||
done
|
||||
|
||||
# Symlink filter and ppd into a location where CUPS will discover it
|
||||
mkdir -p $out/lib/cups/filter
|
||||
mkdir -p $out/share/cups/model
|
||||
# Symlink filter and ppd into a location where CUPS will discover it
|
||||
mkdir -p $out/lib/cups/filter
|
||||
mkdir -p $out/share/cups/model
|
||||
|
||||
ln -s \
|
||||
$out/opt/brother/Printers/MFCL2710DW/lpd/lpdfilter \
|
||||
$out/lib/cups/filter/brother_lpdwrapper_MFCL2710DW
|
||||
ln -s \
|
||||
$out/opt/brother/Printers/MFCL2710DW/lpd/lpdfilter \
|
||||
$out/lib/cups/filter/brother_lpdwrapper_MFCL2710DW
|
||||
|
||||
ln -s \
|
||||
$out/opt/brother/Printers/MFCL2710DW/cupswrapper/brother-MFCL2710DW-cups-en.ppd \
|
||||
$out/share/cups/model/
|
||||
ln -s \
|
||||
$out/opt/brother/Printers/MFCL2710DW/cupswrapper/brother-MFCL2710DW-cups-en.ppd \
|
||||
$out/share/cups/model/
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,56 +1,89 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
hostname = config.networking.hostName;
|
||||
mkFor = hosts: lib.mkIf (builtins.elem hostname hosts);
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.prometheus.exporters = {
|
||||
node = mkFor ["sisko" "picard" "kirk"] {
|
||||
enable = true;
|
||||
enabledCollectors = [
|
||||
"cpu"
|
||||
"conntrack"
|
||||
"diskstats"
|
||||
"entropy"
|
||||
"filefd"
|
||||
"filesystem"
|
||||
"loadavg"
|
||||
"mdadm"
|
||||
"meminfo"
|
||||
"netdev"
|
||||
"netstat"
|
||||
"stat"
|
||||
"time"
|
||||
"vmstat"
|
||||
"systemd"
|
||||
"logind"
|
||||
"interrupts"
|
||||
"ksmd"
|
||||
"textfile"
|
||||
"pressure"
|
||||
];
|
||||
extraFlags = ["--collector.ethtool" "--collector.softirqs" "--collector.tcpstat" "--collector.wifi"];
|
||||
};
|
||||
wireguard = mkFor ["sisko" "picard" "kirk"] {
|
||||
enable = true;
|
||||
};
|
||||
zfs = mkFor ["picard" "kirk"] {
|
||||
enable = true;
|
||||
};
|
||||
node =
|
||||
mkFor
|
||||
[
|
||||
"sisko"
|
||||
"picard"
|
||||
"kirk"
|
||||
]
|
||||
{
|
||||
enable = true;
|
||||
enabledCollectors = [
|
||||
"cpu"
|
||||
"conntrack"
|
||||
"diskstats"
|
||||
"entropy"
|
||||
"filefd"
|
||||
"filesystem"
|
||||
"loadavg"
|
||||
"mdadm"
|
||||
"meminfo"
|
||||
"netdev"
|
||||
"netstat"
|
||||
"stat"
|
||||
"time"
|
||||
"vmstat"
|
||||
"systemd"
|
||||
"logind"
|
||||
"interrupts"
|
||||
"ksmd"
|
||||
"textfile"
|
||||
"pressure"
|
||||
];
|
||||
extraFlags = [
|
||||
"--collector.ethtool"
|
||||
"--collector.softirqs"
|
||||
"--collector.tcpstat"
|
||||
"--collector.wifi"
|
||||
];
|
||||
};
|
||||
wireguard =
|
||||
mkFor
|
||||
[
|
||||
"sisko"
|
||||
"picard"
|
||||
"kirk"
|
||||
]
|
||||
{
|
||||
enable = true;
|
||||
};
|
||||
zfs =
|
||||
mkFor
|
||||
[
|
||||
"picard"
|
||||
"kirk"
|
||||
]
|
||||
{
|
||||
enable = true;
|
||||
};
|
||||
# restic = mkFor ["sisko"] {
|
||||
# enable = true;
|
||||
# };
|
||||
postgres = mkFor ["sisko"] {
|
||||
postgres = mkFor [ "sisko" ] {
|
||||
enable = true;
|
||||
};
|
||||
nginx = mkFor ["sisko"] {
|
||||
enable = true;
|
||||
};
|
||||
smartctl = mkFor ["sisko" "picard" "kirk"] {
|
||||
nginx = mkFor [ "sisko" ] {
|
||||
enable = true;
|
||||
};
|
||||
smartctl =
|
||||
mkFor
|
||||
[
|
||||
"sisko"
|
||||
"picard"
|
||||
"kirk"
|
||||
]
|
||||
{
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
cfg = config.services.prometheus;
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
pushgateway = {
|
||||
|
@ -19,7 +21,9 @@ in {
|
|||
bearer_token_file = config.age.secrets.home-assistant-token.path;
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["sisko.fleet:${builtins.toString config.services.home-assistant.config.http.server_port}"];
|
||||
targets = [
|
||||
"sisko.fleet:${builtins.toString config.services.home-assistant.config.http.server_port}"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -27,7 +31,7 @@ in {
|
|||
job_name = "pushgateway";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [cfg.pushgateway.web.listen-address];
|
||||
targets = [ cfg.pushgateway.web.listen-address ];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -35,7 +39,11 @@ in {
|
|||
job_name = "node";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9100") ["sisko" "picard" "kirk"];
|
||||
targets = builtins.map (host: "${host}.fleet:9100") [
|
||||
"sisko"
|
||||
"picard"
|
||||
"kirk"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -43,7 +51,10 @@ in {
|
|||
job_name = "wireguard";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9586") ["picard" "kirk"];
|
||||
targets = builtins.map (host: "${host}.fleet:9586") [
|
||||
"picard"
|
||||
"kirk"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -51,7 +62,10 @@ in {
|
|||
job_name = "zfs";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9134") ["picard" "kirk"];
|
||||
targets = builtins.map (host: "${host}.fleet:9134") [
|
||||
"picard"
|
||||
"kirk"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -59,7 +73,7 @@ in {
|
|||
job_name = "restic";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9753") ["sisko"];
|
||||
targets = builtins.map (host: "${host}.fleet:9753") [ "sisko" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -67,7 +81,7 @@ in {
|
|||
job_name = "postgres";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9187") ["sisko"];
|
||||
targets = builtins.map (host: "${host}.fleet:9187") [ "sisko" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -75,7 +89,7 @@ in {
|
|||
job_name = "nginx";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9117") ["sisko"];
|
||||
targets = builtins.map (host: "${host}.fleet:9117") [ "sisko" ];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -83,7 +97,11 @@ in {
|
|||
job_name = "smartctl";
|
||||
static_configs = [
|
||||
{
|
||||
targets = builtins.map (host: "${host}.fleet:9633") ["sisko" "kirk" "picard"];
|
||||
targets = builtins.map (host: "${host}.fleet:9633") [
|
||||
"sisko"
|
||||
"kirk"
|
||||
"picard"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
lib,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
conf = {
|
||||
server = {
|
||||
http_listen_port = 28183;
|
||||
|
@ -11,7 +12,9 @@
|
|||
};
|
||||
clients = [
|
||||
{
|
||||
url = "http://sisko.fleet:${builtins.toString config.services.loki.configuration.server.http_listen_port or 3100}/loki/api/v1/push";
|
||||
url = "http://sisko.fleet:${
|
||||
builtins.toString config.services.loki.configuration.server.http_listen_port or 3100
|
||||
}/loki/api/v1/push";
|
||||
}
|
||||
];
|
||||
positions = {
|
||||
|
@ -29,7 +32,7 @@
|
|||
};
|
||||
relabel_configs = [
|
||||
{
|
||||
source_labels = ["__journal__systemd_unit"];
|
||||
source_labels = [ "__journal__systemd_unit" ];
|
||||
target_label = "unit";
|
||||
}
|
||||
];
|
||||
|
@ -38,12 +41,13 @@
|
|||
};
|
||||
configFile = pkgs.writeTextFile {
|
||||
name = "promtail.yaml";
|
||||
text = lib.generators.toYAML {} conf;
|
||||
text = lib.generators.toYAML { } conf;
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
systemd.services.promtail = {
|
||||
description = "Promtail service for Loki";
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{pkgs, ...}: {
|
||||
services.udev.packages = [pkgs.qmk-udev-rules];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.udev.packages = [ pkgs.qmk-udev-rules ];
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
# nixpkgs.config.pulseaudio = true;
|
||||
# services.xrdp = {
|
||||
# enable = true;
|
||||
|
@ -16,11 +16,14 @@
|
|||
# };
|
||||
# displayManager.defaultSession = "xfce";
|
||||
# };
|
||||
environment.systemPackages = with pkgs; [sunshine superTuxKart];
|
||||
environment.systemPackages = with pkgs; [
|
||||
sunshine
|
||||
superTuxKart
|
||||
];
|
||||
|
||||
boot.kernelModules = ["uinput"];
|
||||
boot.kernelModules = [ "uinput" ];
|
||||
|
||||
users.groups.input.members = ["ccr"];
|
||||
users.groups.input.members = [ "ccr" ];
|
||||
|
||||
services.udev.extraRules = ''
|
||||
KERNEL=="uinput", SUBSYSTEM=="misc", OPTIONS+="static_node=uinput", TAG+="uaccess"' |
|
||||
|
|
|
@ -3,11 +3,13 @@
|
|||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
user = "u382036-sub1";
|
||||
host = "u382036.your-storagebox.de";
|
||||
port = "23";
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets = {
|
||||
HETZNER_STORAGE_BOX_SISKO_SSH_PASSWORD = {
|
||||
file = ../../secrets/hetzner-storage-box-sisko-ssh-password.age;
|
||||
|
@ -19,10 +21,12 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
services.openssh.knownHosts."${host}".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs";
|
||||
services.openssh.knownHosts."${
|
||||
host
|
||||
}".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs";
|
||||
|
||||
services.restic.backups.sisko = {
|
||||
paths = ["/persist"];
|
||||
paths = [ "/persist" ];
|
||||
passwordFile = config.age.secrets.SISKO_RESTIC_PASSWORD.path;
|
||||
extraOptions = [
|
||||
"sftp.command='${lib.getExe pkgs.sshpass} -f ${config.age.secrets.HETZNER_STORAGE_BOX_SISKO_SSH_PASSWORD.path} ssh -p${port} ${user}@${host} -s sftp'"
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
{config, ...}: {
|
||||
imports = [../nginx-base];
|
||||
{ config, ... }:
|
||||
{
|
||||
imports = [ ../nginx-base ];
|
||||
services.nginx.virtualHosts = {
|
||||
localhost.listen = [{addr = "127.0.0.1";}];
|
||||
localhost.listen = [ { addr = "127.0.0.1"; } ];
|
||||
"home.aciceri.dev" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
|
|
|
@ -36,7 +36,10 @@
|
|||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [139 445];
|
||||
allowedUDPPorts = [138];
|
||||
allowedTCPPorts = [
|
||||
139
|
||||
445
|
||||
];
|
||||
allowedUDPPorts = [ 138 ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.searx = {
|
||||
enable = true;
|
||||
package = pkgs.searxng;
|
||||
settings = {
|
||||
server.secret_key = "secret";
|
||||
search.formats = ["html" "json"];
|
||||
search.formats = [
|
||||
"html"
|
||||
"json"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
# For unlocking the disk connect using ssh and type
|
||||
# systemctl start initrd-nixos-activation
|
||||
boot.initrd = {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{fleetFlake, ...}: {
|
||||
{ fleetFlake, ... }:
|
||||
{
|
||||
services = {
|
||||
openssh = {
|
||||
enable = true;
|
||||
|
@ -15,5 +16,7 @@
|
|||
};
|
||||
|
||||
# This makes sense only because I'm the only user for these machines
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues (with (import "${fleetFlake}/lib"); keys.users // keys.hosts);
|
||||
users.users.root.openssh.authorizedKeys.keys = builtins.attrValues (
|
||||
with (import "${fleetFlake}/lib"); keys.users // keys.hosts
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services = {
|
||||
syncthing = {
|
||||
enable = true;
|
||||
|
@ -45,7 +46,12 @@
|
|||
kirk = "/home/${config.ccr.username}/org";
|
||||
}
|
||||
.${config.networking.hostName};
|
||||
devices = ["picard" "sisko" "kirk" "oneplus8t"];
|
||||
devices = [
|
||||
"picard"
|
||||
"sisko"
|
||||
"kirk"
|
||||
"oneplus8t"
|
||||
];
|
||||
};
|
||||
sync = {
|
||||
path =
|
||||
|
@ -55,7 +61,11 @@
|
|||
kirk = "/home/${config.ccr.username}/sync";
|
||||
}
|
||||
.${config.networking.hostName};
|
||||
devices = ["picard" "sisko" "kirk"];
|
||||
devices = [
|
||||
"picard"
|
||||
"sisko"
|
||||
"kirk"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.teamviewer.enable = true;
|
||||
ccr.packages = [pkgs.teamviewer];
|
||||
ccr.packages = [ pkgs.teamviewer ];
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.transmission = {
|
||||
enable = true;
|
||||
openRPCPort = true;
|
||||
|
@ -44,7 +45,7 @@
|
|||
"d /mnt/hd/torrent/.incomplete 770 transmission transmission"
|
||||
];
|
||||
|
||||
ccr.extraGroups = ["transmission"];
|
||||
ccr.extraGroups = [ "transmission" ];
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
config.services.transmission.home
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{config, ...}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
programs.virt-manager.enable = true;
|
||||
virtualisation.libvirtd.enable = true;
|
||||
users.users."${config.ccr.username}".extraGroups = ["libvirtd"];
|
||||
users.users."${config.ccr.username}".extraGroups = [ "libvirtd" ];
|
||||
virtualisation.libvirtd.qemu.swtpm.enable = true;
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
# lib,
|
||||
# fleetFlake,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
security.polkit.enable = true;
|
||||
virtualisation.libvirtd.enable = true;
|
||||
|
||||
|
@ -117,35 +117,37 @@
|
|||
# -audiodev alsa,id=snd0,out.try-poll=off -device ich9-intel-hda -device hda-output,audiodev=snd0 \
|
||||
# -device vfio-pci,host=00:02.0 \
|
||||
|
||||
systemd.services.vm-mara = let
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [qemu];
|
||||
text = ''
|
||||
[ ! -f /var/lib/vm-mara/w10.qcow2 ] && \
|
||||
qemu-img create -f qcow2 /var/lib/vm-mara/w10.qcow2 50G
|
||||
systemd.services.vm-mara =
|
||||
let
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [ qemu ];
|
||||
text = ''
|
||||
[ ! -f /var/lib/vm-mara/w10.qcow2 ] && \
|
||||
qemu-img create -f qcow2 /var/lib/vm-mara/w10.qcow2 50G
|
||||
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host,kvm=off,hv-spinlocks=819,hv-vapic=on,hv-relaxed=on,hv-vendor-id="IrisXE" \
|
||||
-smp 4 \
|
||||
-m 8192 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::3389-:3389,hostfwd=tcp::47989-:47989,hostfwd=tcp::47990-:47990,hostfwd=tcp::47984-:47984,hostfwd=tcp::48010-:48010,hostfwd=udp::47998-:47988,hostfwd=udp::47999-:47999,hostfwd=udp::48000-:48000,hostfwd=udp::48002-:48002,hostfwd=udp::48003-:48003,hostfwd=udp::48004-:48004,hostfwd=udp::48005-:48005,hostfwd=udp::48006-:48006,hostfwd=udp::48007-:48007,hostfwd=udp::48008-:48008,hostfwd=udp::48009-:48009,hostfwd=udp::48010-:48010 \
|
||||
-cdrom /var/lib/vm-mara/virtio-win.iso \
|
||||
-device nec-usb-xhci,id=usb,bus=pci.0,addr=0x4 \
|
||||
-device usb-tablet \
|
||||
-vnc :0 \
|
||||
-nographic \
|
||||
-vga none \
|
||||
-drive file=/var/lib/vm-mara/w10.qcow2 \
|
||||
-device vfio-pci,host=00:02.0,addr=03.0,x-vga=on,multifunction=on,romfile=${./adls_dmc_ver2_01.bin}
|
||||
'';
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host,kvm=off,hv-spinlocks=819,hv-vapic=on,hv-relaxed=on,hv-vendor-id="IrisXE" \
|
||||
-smp 4 \
|
||||
-m 8192 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::3389-:3389,hostfwd=tcp::47989-:47989,hostfwd=tcp::47990-:47990,hostfwd=tcp::47984-:47984,hostfwd=tcp::48010-:48010,hostfwd=udp::47998-:47988,hostfwd=udp::47999-:47999,hostfwd=udp::48000-:48000,hostfwd=udp::48002-:48002,hostfwd=udp::48003-:48003,hostfwd=udp::48004-:48004,hostfwd=udp::48005-:48005,hostfwd=udp::48006-:48006,hostfwd=udp::48007-:48007,hostfwd=udp::48008-:48008,hostfwd=udp::48009-:48009,hostfwd=udp::48010-:48010 \
|
||||
-cdrom /var/lib/vm-mara/virtio-win.iso \
|
||||
-device nec-usb-xhci,id=usb,bus=pci.0,addr=0x4 \
|
||||
-device usb-tablet \
|
||||
-vnc :0 \
|
||||
-nographic \
|
||||
-vga none \
|
||||
-drive file=/var/lib/vm-mara/w10.qcow2 \
|
||||
-device vfio-pci,host=00:02.0,addr=03.0,x-vga=on,multifunction=on,romfile=${./adls_dmc_ver2_01.bin}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
in {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
stdenv,
|
||||
kernel,
|
||||
fetchFromGitHub,
|
||||
runCommand,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
m = stdenv.mkDerivation rec {
|
||||
name = "i915-sriov-dkms";
|
||||
version = "4d89a1d5ba8c66308e3276c5297eda838c70cc31";
|
||||
|
@ -22,32 +22,29 @@
|
|||
export sourceRoot=$(pwd)/source
|
||||
'';
|
||||
|
||||
makeFlags =
|
||||
kernel.makeFlags
|
||||
++ [
|
||||
"-C"
|
||||
"${kernel.dev}/lib/modules/${kernel.modDirVersion}/build"
|
||||
"M=$(sourceRoot)"
|
||||
"KVER=${kernel.version}"
|
||||
];
|
||||
makeFlags = kernel.makeFlags ++ [
|
||||
"-C"
|
||||
"${kernel.dev}/lib/modules/${kernel.modDirVersion}/build"
|
||||
"M=$(sourceRoot)"
|
||||
"KVER=${kernel.version}"
|
||||
];
|
||||
|
||||
# installPhase = ''
|
||||
# install -D i915.ko $out/lib/modules/${kernel.modDirVersion}/kernel/drivers/gpu/drm/i915/i915.ko
|
||||
# '';
|
||||
|
||||
installFlags = ["INSTALL_MOD_PATH=${placeholder "out"}"];
|
||||
installFlags = [ "INSTALL_MOD_PATH=${placeholder "out"}" ];
|
||||
|
||||
installTargets = ["modules_install"];
|
||||
installTargets = [ "modules_install" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
# meta.priority = -10;
|
||||
};
|
||||
in
|
||||
m
|
||||
m
|
||||
# in runCommand "test" {} ''
|
||||
# # mkdir -p $out/lib/modules/6.1.30/kernel/drivers/gpu/drm/i915
|
||||
# mkdir -p $out/lib/modules/6.1.30/extra
|
||||
# cp ${m}/lib/modules/6.1.30/extra/i915.ko.xz $out/lib/modules/6.1.30/extra/foo.ko.xz
|
||||
# ''
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{
|
||||
pkgs,
|
||||
lib,
|
||||
fleetFlake,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
security.polkit.enable = true;
|
||||
virtualisation.libvirtd.enable = true;
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
|||
2222
|
||||
];
|
||||
|
||||
imports = [../nginx-base];
|
||||
imports = [ ../nginx-base ];
|
||||
|
||||
services.nginx.virtualHosts."git.slavni.aciceri.dev" = {
|
||||
forceSSL = true;
|
||||
|
@ -21,67 +21,74 @@
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.vm-sala = let
|
||||
initial-config = fleetFlake.inputs.nixos-generators.nixosGenerate {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
# fleetFlake.inputs.nixos-vscode-server.nixosModule
|
||||
({
|
||||
modulesPath,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
# services.vscode-server = {
|
||||
# enable = true;
|
||||
# enableFHS = true;
|
||||
# };
|
||||
system.build.qcow = lib.mkForce (import "${toString modulesPath}/../lib/make-disk-image.nix" {
|
||||
inherit lib config pkgs;
|
||||
diskSize = 50 * 1024;
|
||||
format = "qcow2";
|
||||
partitionTableType = "hybrid";
|
||||
});
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
git
|
||||
htop
|
||||
];
|
||||
users.users.root = {
|
||||
password = "password";
|
||||
openssh.authorizedKeys.keys = [
|
||||
(import "${fleetFlake.outPath}/lib").keys.users.ccr-ssh
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7qikwR0a4LDoMQIVvtX+gyJ41OsAOWe8RcXc4ksIBP9x1nCcSrItlgC2soADB77QGIgyeyLGmnTCtMj5/s8NdREAycPeXLii1WRakbT7oZ/hTEmvAgObpadeYJn3LhaUDNtmsnAqqh2pRpCpSsAdhfIt+YyV4VgRYSfaa12Ozp/H6NI9bJMNttmG8TmY9V4zyskV9bE+up9y8Yuck2bZV/GjQe6UWgxsiC3XPSrFFGuxyaFMRGsc8h86xVwTAmwaHESEFhRvHD4EtdPNss0jqmSI6m4AoSZQ2wq7eiH8ZiYzERF0FnEFf4UsyOTM7j78bfogNLfKrdcEIPLrNNFFc3Iarfe9CJn3DdSnwwPnhFU1MBBXSbGOp1IyN3+gpjHwLMPzozlDAVqOwx6XpnpF78VpeknFBHCbkcKC/R0MXzqf900wH3i2HvfB7v9e9EUFzCQ0vUC+1Og+BFw3F5VSo0QtZyLc4BJ/akBs5mEE6TnuWQa/GhlY8Lz7wbcV1AaBOAQdx+NTbL/+Q31SJ1XsXtGtXCrwMY9noUTyVfpGVXo7Mn4HSslmeQ9SKfYKjyetkBR/1f8a47O3rCggjBy1AlfLjgbERnXy+0Ma4T8lnPZAKt3s9Ya1JupZ7SO7D5j7WfPKP+60c372/RrX1wXsxEeLvBJ0jd8GnSCXDOuvHTQ=="
|
||||
];
|
||||
};
|
||||
})
|
||||
];
|
||||
format = "qcow";
|
||||
};
|
||||
image = "${initial-config}/nixos.qcow2";
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [qemu];
|
||||
text = ''
|
||||
[ ! -f /var/lib/vm-sala/nixos.qcow2 ] && \
|
||||
install ${image} /var/lib/vm-sala
|
||||
systemd.services.vm-sala =
|
||||
let
|
||||
initial-config = fleetFlake.inputs.nixos-generators.nixosGenerate {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
# fleetFlake.inputs.nixos-vscode-server.nixosModule
|
||||
(
|
||||
{
|
||||
modulesPath,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# services.vscode-server = {
|
||||
# enable = true;
|
||||
# enableFHS = true;
|
||||
# };
|
||||
system.build.qcow = lib.mkForce (
|
||||
import "${toString modulesPath}/../lib/make-disk-image.nix" {
|
||||
inherit lib config pkgs;
|
||||
diskSize = 50 * 1024;
|
||||
format = "qcow2";
|
||||
partitionTableType = "hybrid";
|
||||
}
|
||||
);
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
vim
|
||||
git
|
||||
htop
|
||||
];
|
||||
users.users.root = {
|
||||
password = "password";
|
||||
openssh.authorizedKeys.keys = [
|
||||
(import "${fleetFlake.outPath}/lib").keys.users.ccr-ssh
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7qikwR0a4LDoMQIVvtX+gyJ41OsAOWe8RcXc4ksIBP9x1nCcSrItlgC2soADB77QGIgyeyLGmnTCtMj5/s8NdREAycPeXLii1WRakbT7oZ/hTEmvAgObpadeYJn3LhaUDNtmsnAqqh2pRpCpSsAdhfIt+YyV4VgRYSfaa12Ozp/H6NI9bJMNttmG8TmY9V4zyskV9bE+up9y8Yuck2bZV/GjQe6UWgxsiC3XPSrFFGuxyaFMRGsc8h86xVwTAmwaHESEFhRvHD4EtdPNss0jqmSI6m4AoSZQ2wq7eiH8ZiYzERF0FnEFf4UsyOTM7j78bfogNLfKrdcEIPLrNNFFc3Iarfe9CJn3DdSnwwPnhFU1MBBXSbGOp1IyN3+gpjHwLMPzozlDAVqOwx6XpnpF78VpeknFBHCbkcKC/R0MXzqf900wH3i2HvfB7v9e9EUFzCQ0vUC+1Og+BFw3F5VSo0QtZyLc4BJ/akBs5mEE6TnuWQa/GhlY8Lz7wbcV1AaBOAQdx+NTbL/+Q31SJ1XsXtGtXCrwMY9noUTyVfpGVXo7Mn4HSslmeQ9SKfYKjyetkBR/1f8a47O3rCggjBy1AlfLjgbERnXy+0Ma4T8lnPZAKt3s9Ya1JupZ7SO7D5j7WfPKP+60c372/RrX1wXsxEeLvBJ0jd8GnSCXDOuvHTQ=="
|
||||
];
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
format = "qcow";
|
||||
};
|
||||
image = "${initial-config}/nixos.qcow2";
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [ qemu ];
|
||||
text = ''
|
||||
[ ! -f /var/lib/vm-sala/nixos.qcow2 ] && \
|
||||
install ${image} /var/lib/vm-sala
|
||||
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host \
|
||||
-smp 2 \
|
||||
-m 4096 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::2222-:22,hostfwd=tcp::13000-:3000 \
|
||||
-nographic \
|
||||
-drive file=/var/lib/vm-sala/nixos.qcow2
|
||||
'';
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host \
|
||||
-smp 2 \
|
||||
-m 4096 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::2222-:22,hostfwd=tcp::13000-:3000 \
|
||||
-nographic \
|
||||
-drive file=/var/lib/vm-sala/nixos.qcow2
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
in {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.libvirtd.enable = true;
|
||||
|
||||
networking.firewall.interfaces."wg0" = {
|
||||
|
@ -10,25 +11,27 @@
|
|||
];
|
||||
};
|
||||
|
||||
systemd.services.vm-ubuntu = let
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [qemu];
|
||||
text = ''
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host,kvm=on,hv-vendor_id="GenuineIntel" \
|
||||
-smp 4 \
|
||||
-m 8192 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::60022-:22,hostfwd=tcp::8545-:8545 \
|
||||
-drive file=/var/lib/vm-ubuntu/ubuntu.qcow2
|
||||
'';
|
||||
systemd.services.vm-ubuntu =
|
||||
let
|
||||
start-vm = pkgs.writeShellApplication {
|
||||
name = "start-vm";
|
||||
runtimeInputs = with pkgs; [ qemu ];
|
||||
text = ''
|
||||
qemu-system-x86_64 \
|
||||
-enable-kvm \
|
||||
-cpu host,kvm=on,hv-vendor_id="GenuineIntel" \
|
||||
-smp 4 \
|
||||
-m 8192 \
|
||||
-nic user,model=virtio-net-pci,hostfwd=tcp::60022-:22,hostfwd=tcp::8545-:8545 \
|
||||
-drive file=/var/lib/vm-ubuntu/ubuntu.qcow2
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
in {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = {
|
||||
ExecStart = "${start-vm}/bin/start-vm";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,16 +2,17 @@
|
|||
config,
|
||||
vpn,
|
||||
...
|
||||
}: {
|
||||
imports = [../wireguard-common];
|
||||
}:
|
||||
{
|
||||
imports = [ ../wireguard-common ];
|
||||
|
||||
networking.wireguard.interfaces.wg0 = {
|
||||
mtu = 1200;
|
||||
ips = ["${vpn.${config.networking.hostName}.ip}/32"];
|
||||
ips = [ "${vpn.${config.networking.hostName}.ip}/32" ];
|
||||
peers = [
|
||||
{
|
||||
publicKey = vpn.sisko.publicKey;
|
||||
allowedIPs = ["10.100.0.0/24"];
|
||||
allowedIPs = [ "10.100.0.0/24" ];
|
||||
endpoint = "vpn.aciceri.dev:51820";
|
||||
persistentKeepalive = 25;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
config,
|
||||
vpn,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
networking.firewall.interfaces.wg0 = {
|
||||
allowedUDPPortRanges = [
|
||||
{
|
||||
|
@ -24,11 +25,8 @@
|
|||
listenPort = 51820;
|
||||
};
|
||||
|
||||
networking.hosts =
|
||||
lib.mapAttrs'
|
||||
(hostname: vpnConfig: {
|
||||
name = vpnConfig.ip;
|
||||
value = ["${hostname}.fleet"];
|
||||
})
|
||||
vpn;
|
||||
networking.hosts = lib.mapAttrs' (hostname: vpnConfig: {
|
||||
name = vpnConfig.ip;
|
||||
value = [ "${hostname}.fleet" ];
|
||||
}) vpn;
|
||||
}
|
||||
|
|
|
@ -3,20 +3,19 @@
|
|||
lib,
|
||||
vpn,
|
||||
...
|
||||
}: {
|
||||
imports = [../wireguard-common];
|
||||
}:
|
||||
{
|
||||
imports = [ ../wireguard-common ];
|
||||
|
||||
networking.nat.enable = true;
|
||||
|
||||
networking.firewall.allowedUDPPorts = [config.networking.wireguard.interfaces.wg0.listenPort]; # FIXME move this to wireguard-server
|
||||
networking.firewall.allowedUDPPorts = [ config.networking.wireguard.interfaces.wg0.listenPort ]; # FIXME move this to wireguard-server
|
||||
|
||||
networking.wireguard.interfaces.wg0 = {
|
||||
ips = ["${vpn.${config.networking.hostName}.ip}/24"];
|
||||
peers =
|
||||
lib.mapAttrsToList (hostname: vpnConfig: {
|
||||
publicKey = vpnConfig.publicKey;
|
||||
allowedIPs = ["${vpnConfig.ip}/32"];
|
||||
})
|
||||
vpn;
|
||||
ips = [ "${vpn.${config.networking.hostName}.ip}/24" ];
|
||||
peers = lib.mapAttrsToList (_hostname: vpnConfig: {
|
||||
publicKey = vpnConfig.publicKey;
|
||||
allowedIPs = [ "${vpnConfig.ip}/32" ];
|
||||
}) vpn;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
xdg = {
|
||||
portal = {
|
||||
enable = true;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue