* Note that the trailing `/` and quoting are important
* The will load the `user-data`, `meta-data`, and `vendor-data` files in the directory served by the python web server
7. After the installation is complete, the system will reboot and run cloud-init for the final portion of the initial setup. Once completed, ansible can be run against it using the ubuntu user and becoming root, eg. `ansible-playbook -i hosts -u ubuntu -b ...`
+
+# LXD Cluster
+
+## Start a new cluster
+
+1. For the initial member of the cluster, set the `lxd_cluster` variable in the host variables to something similar to:
+
+```
+lxd_cluster:
+ server_name: cluster-member-name
+ enabled: true
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+```
+
+2. Run the `site.yml` playbook on the node
+3. Verify that storage pool is configured:
+
+```
+$ lxc storage list
+| name | driver | state |
+| default | zfs | created |
+```
+
+ * If not present, create it on necessary targets:
+
+```
+$ lxc storage create default zfs source=tank/lxd --target=cluster-member-name
+# Repeat for any other members
+# Then, on the member itself
+$ lxc storage create default zfs
+# The storage listed should not be in the 'pending' state
+```
+
+4. Create a metrics certificate pair for the cluster, or use an existing one
+
+```
+openssl req -x509 -newkey ec -pkeyopt ec_paramgen_curve:secp384r1 -sha384 -keyout metrics.key -nodes -out metrics.crt -days 3650 -subj "/CN=metrics.local"
+lxc config trust add metrics.crt --type=metrics
+```
+
+## Adding a new host
+
+1. Generate a token for the new member: `lxc cluster add member-host-name`
+2. In the member's host_var's file set the following key:
+ * `lxd_cluster_ip`: The IP address on which the server will listen
+ * `lxd_cluster`: In a fashion similar to the following entry
+```
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.192
+ cluster_token: 'xxx'
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+```
+ * The `cluster_token` does not need to be kept in git after the the playbook's first run
+3. Assuming the member is in the host's group of the inventory, run the `site.yml` playbook.
+
+## Managing instances
+
+Local requirements:
+
+ * python3, python3-dnspython, samba-tool, kinit
+
+To automatically provision instances, perform certain operations, and update DNS entries:
+
+1. Update `vars/ci-instances.yml`
+2. Open a kerberos ticket with `kinit`
+3. Run the playbook, eg. `ansible-playbook -l ci-host-XX.internal.efficios.com playbooks/ci-instances.yml`
# some basic default values...
-#inventory = /etc/ansible/hosts
+inventory = ./hosts
#library = /usr/share/my_modules/
#module_utils = /usr/share/my_module_utils/
#remote_tmp = ~/.ansible/tmp
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.190
+lxd_cluster:
+ server_name: ci-host-amd64-1a
+ enabled: true
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.190/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.191
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.191
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.191/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.192
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.192
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.192/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.193
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.193
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.193/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.194
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.194
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.194/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.195
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.195
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.195/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.196
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.196
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.196/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
--- /dev/null
+---
+lxd_cluster_ip: 172.18.0.197
+lxd_cluster:
+ enabled: true
+ server_address: 172.18.0.197
+ member_config:
+ - entity: storage-pool
+ name: default
+ key: source
+ value: tank/lxd
+
+netplan_manage: true
+netplan_content: |
+ network:
+ version: 2
+ renderer: networkd
+ ethernets:
+ enp5s0f0:
+ dhcp4: no
+ bridges:
+ br102:
+ interfaces:
+ - enp5s0f0
+ accept-ra: false
+ addresses:
+ - 172.18.0.197/16
+ routes:
+ - to: default
+ via: 172.18.0.1
+ nameservers:
+ search:
+ - internal.efficios.com
+ addresses:
+ - 172.18.0.13
cloud05.internal.efficios.com
cloud06.internal.efficios.com
cloud07.internal.efficios.com
+ci-host-amd64-1a.internal.efficios.com
+ci-host-amd64-1b.internal.efficios.com
+ci-host-amd64-1c.internal.efficios.com
+ci-host-amd64-1d.internal.efficios.com
+ci-host-amd64-2a.internal.efficios.com
+ci-host-amd64-2b.internal.efficios.com
+ci-host-amd64-2c.internal.efficios.com
+ci-host-amd64-2d.internal.efficios.com
# This host is used for tests via lava and is running ephemereal installs only
#cloud08.internal.efficios.com
ci-host-win11-arm64-01.internal.efficios.com
+[lxd_cluster_ci]
+ci-host-amd64-1a.internal.efficios.com
+ci-host-amd64-1b.internal.efficios.com
+ci-host-amd64-1c.internal.efficios.com
+ci-host-amd64-1d.internal.efficios.com
+ci-host-amd64-2a.internal.efficios.com
+ci-host-amd64-2b.internal.efficios.com
+ci-host-amd64-2c.internal.efficios.com
+ci-host-amd64-2d.internal.efficios.com
+
[windows]
ci-host-win11-arm64-01.internal.efficios.com
+---
- hosts: hosts
roles:
+ # Setup filesystem and network configuration before other roles
+ - zfs
+ - netplan
- common
- libvirt
+ - lxd
--- /dev/null
+---
+- hosts: "{{lxd_host}}"
+ vars_files:
+ - ../vars/ci-instances.yml
+ - ../roles/lxd/defaults/main.yml
+ vars:
+ skp_lxd: false
+ skip_dns: false
+ skip_jenkins: false
+ jenkins_config: "~/.config/jenkins_jobs/jenkins_jobs.ini"
+ jenkins_default_credentials: "c3e4f9f2-3e89-474d-bc75-6251a13e1053"
+ tasks:
+ - name: Manage instances
+ when: not skip_lxd
+ include_tasks:
+ file: '../roles/lxd/tasks/container.yml'
+ vars:
+ object: >-
+ {{item.lxd|
+ combine({'config':{'user.network-config': lookup('ansible.builtin.template',
+ '../templates/cloud_init_netconf.j2',
+ template_vars=item.meta
+ )
+ }
+ },
+ recursive=true
+ )}}
+ with_items: "{{containers}}"
+ - name: Update DNS entries
+ delegate_to: localhost
+ when: not skip_dns
+ with_items: "{{containers}}"
+ ansible.builtin.command:
+ argv: [
+ '../scripts/update_dns_entry.py', '-n', "{{item.lxd.name}}",
+ '-z', "{{search_domain}}",
+ '-v', "{{item.meta.address}}",
+ '-s', "{{name_server}}",
+ ]
+ - name: Update Jenkins nodes
+ delegate_to: localhost
+ when: not skip_jenkins
+ with_items: "{{containers}}"
+ vars:
+ node_name: "{{item.jenkins.node_name|default(item.lxd.name)}}"
+ node_ip: "{{item.meta.address|default(None)}}"
+ node_host: "{{item.meta.jenkins.node_host|default(item.lxd.name + '.' + search_domain)}}"
+ node_label: "{{item.jenkins.label|default('')}}"
+ node_state: "{{item.jenkins.state|default('online')}}"
+ node_credentials: "{{item.jenkins.credentials|default(jenkins_default_credentials)}}"
+ node_message: "{{item.jenkins.message|default('Set offline by ansible')}}"
+ ansible.builtin.command:
+ argv: [
+ '../scripts/update_jenkins_node.py', '-n', "{{node_name}}",
+ '-c', "launcher/host={{node_ip|default(node_host)}}",
+ '-c', "label={{node_label}}",
+ '-c', "launcher/credentialsId={{node_credentials}}",
+ '-c', "launcher/sshHostKeyVerificationStrategy=hudson.plugins.sshslaves.verifiers.ManuallyTrustedKeyVerificationStrategy=class",
+ '-s', "{{node_state}}", '-m', "{{node_message}}",
+ '-f', "{{jenkins_config|expanduser}}",
+ ]
--- /dev/null
+---
+lxd_container_defaults:
+ ephemeral: false
+ profiles:
+ - ci-node
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: simplestreams
+ alias: debian/bookworm/cloud/amd64
+ wait_for_container: true
--- /dev/null
+---
+- name: Merge defaults
+ set_fact:
+ lxd_container_config: "{{lxd_container_defaults|combine(object)}}"
+- name: Manage container
+ community.general.lxd_container: "{{lxd_container_config}}"
--- /dev/null
+---
+- name: Install LXD
+ ansible.builtin.apt:
+ name: lxd
+- name: Ensure LXD service is running
+ ansible.builtin.systemd:
+ name: lxd
+ state: started
+ enabled: true
+- name: LXD Init
+ block:
+ - ansible.builtin.file:
+ path: /etc/lxd
+ state: directory
+ owner: root
+ group: root
+ mode: '0750'
+ - ansible.builtin.template:
+ dest: /etc/lxd/lxd_init.yml
+ src: init.yaml.j2
+ owner: root
+ group: root
+ mode: '0600'
+ register: lxd_init_template
+ - ansible.builtin.shell:
+ cmd: 'lxd init --preseed=true < /etc/lxd/lxd_init.yml'
+ when: lxd_init_template.changed
--- /dev/null
+---
+config:
+ core.https_address: "{{lxd_cluster_ip|default('[::]')}}:8443"
+{% if lxd_cluster_ip|default(false) %}
+ cluster.https_address: {{lxd_cluster_ip}}:8443
+{% endif %}
+ images.auto_update_interval: "0"
+networks: []
+{% if lxd_cluster|default(false) %}
+# Storage pools are configured per member
+storage_pools: []
+{% else %}
+- config:
+ source: tank/lxd
+ description: ""
+ name: default
+ driver: zfs
+{% endif %}
+profiles:
+- config:
+ boot.autostart: "true"
+ user.vendor-data: |-
+ #cloud-config
+ manage_etc_hosts: True
+ packages:
+ - netplan.io
+ - openssh-server
+ runcmd:
+ - ['mkdir', '-p', '/root/.ssh/authorized_keys']
+ - "echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBnCyGcahJXys7md2yb3jP8L6hLN3D72aZCzsqUrJDsC\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHVFn/ymsG8LqPvgVzyMaSVzYCVn/440ME8O6AzbZG39' > /root/.ssh/authorized_keys"
+ description: ""
+ devices:
+ eth0:
+ name: eth0
+ parent: br102
+ nictype: bridged
+ network: ''
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ size: 100GB
+ name: default
+- config:
+ boot.autostart: "true"
+ limits.cpu: "4"
+ limits.memory: "8192MB"
+ user.vendor-data: |-
+ #cloud-config
+ manage_etc_hosts: True
+ packages:
+ - netplan.io
+ - openssh-server
+ runcmd:
+ - ['mkdir', '-p', '/root/.ssh/authorized_keys']
+ - "echo 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBnCyGcahJXys7md2yb3jP8L6hLN3D72aZCzsqUrJDsC\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHVFn/ymsG8LqPvgVzyMaSVzYCVn/440ME8O6AzbZG39' > /root/.ssh/authorized_keys"
+ description: "CI node"
+ devices:
+ eth0:
+ name: eth0
+ parent: br102
+ nictype: bridged
+ network: ''
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ size: 80GB
+ name: ci-node
+- config:
+ boot.autostart: "true"
+ limits.cpu: "4"
+ limits.memory: "8192MB"
+ description: "CI root node (VM)"
+ devices:
+ eth0:
+ name: eth0
+ parent: br102
+ nictype: bridged
+ network: ''
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
+ size: 80GB
+ name: ci-rootnode
+projects: []
+cluster: {{lxd_cluster|default(null)}}
--- /dev/null
+---
+netplan_manage: false
+netplan_content: ''
+netplan_apply: true
--- /dev/null
+---
+- when: netplan_manage
+ block:
+ - name: Install netplan
+ ansible.builtin.apt:
+ name: netplan.io
+ - name: Update netplan default.yaml
+ ansible.builtin.copy:
+ content: "{{netplan_content}}"
+ dest: /etc/netplan/default.yaml
+ owner: root
+ group: root
+ mode: '0640'
+ register: netplan_default_yaml
+ - name: Apply netplan
+ when: netplan_default_yaml.changed and netplan_apply
+ ansible.builtin.command:
+ argv: ['netplan', 'apply']
--- /dev/null
+---
+zfs_arc_max: 34359738368
--- /dev/null
+---
+- name: Install ZFS on Debian
+ when: ansible_distribution == 'Debian'
+ apt:
+ name: zfs-dkms
+- name: Set ZFS module options
+ community.general.modprobe:
+ name: zfs
+ state: present
+ params: "zfs_arc_max={{zfs_arc_max}}"
+- name: Set default compression
+ community.general.zfs:
+ name: "tank"
+ state: present
+ extra_zfs_properties:
+ compression: zstd
+- name: Create datasets
+ community.general.zfs:
+ name: "tank/{{item}}"
+ state: present
+ extra_zfs_properties:
+ mountpoint: 'none'
+ with_items:
+ - libvirt
+ - lxd
--- /dev/null
+#!/usr/bin/env python3
+#
+
+import argparse
+import ipaddress
+import subprocess
+
+import dns.message
+import dns.query
+import dns.resolver
+
+def get_argument_parser():
+ parser = argparse.ArgumentParser(
+ prog='update_dns_entry.py',
+ description='Generate fixed-address DHCP configuration based for hosts based on DNS entries'
+ )
+ parser.add_argument(
+ '-s', '--server', default=None, required=True,
+ help='Server for DNS updates'
+ )
+ parser.add_argument(
+ '-u', '--user', default=None,
+ help='The user to use with samba-tool'
+ )
+ parser.add_argument(
+ '-z', '--zone', required=True,
+ help='The zone in which to update the entry'
+ )
+ parser.add_argument(
+ '-n', '--name', required=True,
+ help='DNS entry name'
+ )
+ parser.add_argument(
+ '-v', '--value', required=True,
+ help='DNS entry value'
+ )
+ parser.add_argument(
+ '-t', '--type', default='A',
+ help='Entry type'
+ )
+ return parser
+
+
+def update_dns_entry(server, zone, name, entry_type, value, user=None, with_reverse = True):
+ if entry_type == "A":
+ assert(ipaddress.ip_address(value))
+ try:
+ server_ip = str(ipaddress.ip_address(server))
+ except ValueError:
+ server_ip = dns.resolver.resolve(server)[0].to_text()
+
+ commands = []
+ # Verify existing entry
+ query = dns.message.make_query('.'.join([name, zone]), entry_type)
+ record = dns.query.udp(query, server_ip);
+ if len(record.answer) == 0:
+ # Create
+ argv = ['samba-tool', 'dns', 'add', server, zone, name, entry_type, value]
+ if user is not None:
+ argv += ['-U', user]
+ commands.append(argv)
+ else:
+ assert(len(record.answer) == 1)
+ # Check validity
+ existing = (record.answer)[0][0].to_text()
+ if existing != value:
+ # Update
+ argv = ['samba-tool', 'dns', 'update', server, zone, name, entry_type, existing, value]
+ if user is not None:
+ argv += ['-U', user]
+ commands.append(argv)
+
+ # Check reverse
+ if with_reverse and entry_type == 'A':
+ rname, rzone = ipaddress.ip_address(value).reverse_pointer.split('.', 1)
+ rvalue = '.'.join([name, zone]) + '.'
+ rtype = 'PTR'
+ query = dns.message.make_query(ipaddress.ip_address(value).reverse_pointer, rtype)
+ record = dns.query.udp(query, server_ip)
+ if len(record.answer) == 0:
+ argv = ['samba-tool', 'dns', 'add', server, rzone, rname, rtype, rvalue]
+ if user is not None:
+ argv += ['-U', user]
+ commands.append(argv)
+ else:
+ assert(len(record.answer) == 1)
+ existing = (record.answer)[0][0].to_text()
+ if existing != value:
+ argv = ['samba-tool', 'dns', 'update', server, rzone, rname, rtype, existing, rvalue]
+ if user is not None:
+ argv += ['-U', user]
+ commands.append(argv)
+
+ # Run commands
+ for command in commands:
+ subprocess.run(command, check=True)
+
+
+if __name__ == '__main__':
+ parser = get_argument_parser()
+ args = parser.parse_args()
+ update_dns_entry(args.server, args.zone, args.name, args.type, args.value, user=args.user)
--- /dev/null
+#!/usr/bin/python3
+
+import argparse
+import configparser
+import sys
+import xml.etree.ElementTree
+
+import jenkins
+
+def get_argument_parser():
+ parser = argparse.ArgumentParser(
+ prog='update_jenkins_node.py',
+ description='Create, update, or delete Jenkins nodes'
+ )
+ parser.add_argument(
+ '-u', '--url', default=None,
+ help='Jenkins server URL including protocol'
+ )
+ parser.add_argument(
+ '--user', default=None,
+ help='Jenkins username'
+ )
+ parser.add_argument(
+ '--password', default=None,
+ help='Jenkins password'
+ )
+ parser.add_argument(
+ '-n', '--node', default=None, required=True,
+ help='The name of the node to manage in Jenkins'
+ )
+ parser.add_argument(
+ '-c', '--node-config', default=[], action='append',
+ help='An equals-separated set path=value[=attrib]. When attrib is not set, text is assumed'
+ )
+ parser.add_argument(
+ '-f', '--config-file', default=None, type=argparse.FileType('r'),
+ help='An INI config file as used by jenkins_jobs'
+ )
+ parser.add_argument(
+ '-s', '--state', default='online',
+ choices=['online', 'offline', 'absent'],
+ help='The state of the Jenkins node'
+ )
+ parser.add_argument(
+ '-m', '--message', default='',
+ help='A message to set for the offline reason of a node'
+ )
+ return parser
+
+
+def manage_node(url, user, password, node, state, offline_message='', config={}):
+ server = jenkins.Jenkins(url, username=user, password=password)
+ exists = server.node_exists(node)
+ node_info = {}
+ changed = False
+ if exists and state == 'absent':
+ server.delete_node(node)
+ changed = True
+ if not exists and state != 'absent':
+ server.create_node(node, numExecutors=1, remoteFS='/home/jenkins',
+ launcher=jenkins.LAUNCHER_SSH)
+ changed = True
+ if state != 'absent':
+ # Check configuration
+ updated = False
+ node_config = xml.etree.ElementTree.fromstring(server.get_node_config(node))
+ for key, value in config.items():
+ element = node_config.find(key)
+ new_element = None
+ current_key = key
+ while element is None:
+ head = key.rsplit('/', 1)[0] if '/' in current_key else None
+ tail = key.rsplit('/', 1)[1] if '/' in current_key else current_key
+ e = xml.etree.ElementTree.Element(tail)
+ if new_element is not None:
+ e.append(new_element)
+ new_element = None
+ if head is None:
+ node_config.append(e)
+ element = node_config.find(key)
+ else:
+ parent = node_config.find(head)
+ if parent:
+ parent.append(e)
+ element = node_config.find(key)
+ else:
+ new_element = e
+ current_key = head
+ continue
+
+ if value['attrib'] is None:
+ if element.text != value['value']:
+ updated = True
+ element.text = value['value']
+ else:
+ try:
+ if element.attrib[value['attrib']] != value['value']:
+ updated = True
+ element.attrib[value['attrib']] = value['value']
+ except KeyError:
+ element.attrib[value['attrib']] = value['value']
+ updated = True
+ if updated:
+ server.reconfig_node(
+ node,
+ xml.etree.ElementTree.tostring(
+ node_config,
+ xml_declaration=True,
+ encoding='unicode'
+ )
+ )
+ changed = True
+ # Online/offline
+ node_info = server.get_node_info(node)
+ if node_info['offline'] and state == 'online':
+ server.enable_node(node)
+ changed = True
+ if not node_info['offline'] and state == 'offline':
+ server.disable_node(node, offline_message)
+ changed = True
+ return changed
+
+
+if __name__ == '__main__':
+ parser = get_argument_parser()
+ args = parser.parse_args()
+ if args.config_file is not None:
+ config = configparser.ConfigParser()
+ config.read_file(args.config_file)
+ if 'jenkins' not in config.sections():
+ print("[jenkins] section not found")
+ sys.exit(1)
+ if args.url is None:
+ args.url = config.get('jenkins', 'url')
+ if args.user is None:
+ args.user = config['jenkins']['user']
+ if args.password is None:
+ args.password = config['jenkins']['password']
+ assert(args.user is not None)
+ assert(args.url is not None)
+ assert(args.password is not None)
+ node_config = {}
+ for entry in args.node_config:
+ key, value = entry.split('=', 1)
+ node_config[key] = {
+ 'attrib': value.split('=', 1)[1] if '=' in value else None,
+ 'value': value.split('=', 1)[0] if '=' in value else value,
+ }
+ print(node_config)
+ manage_node(
+ args.url, args.user, args.password, args.node, args.state,
+ args.message, node_config
+ )
--- /dev/null
+network:
+ version: 2
+ ethernets:
+ eth0:
+ addresses:
+ - {{address}}/16
+ gateway4: 172.18.0.1
+ nameservers:
+ search:
+ - 'internal.efficios.com'
+ addresses:
+ - '172.18.0.13'
--- /dev/null
+---
+# The search domain for instance names
+search_domain: internal.efficios.com
+# Which DNS server to use for checks + updates
+name_server: smb-adc02.internal.efficios.com
+# The host to use for delegating lxd commands
+lxd_host: ci-host-amd64-1a.internal.efficios.com
+
+# @see https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#ansible-collections-community-general-lxd-container-module
+# @example a container instance with the default image (deb12 amd64)
+# - meta:
+# address: 192.168.1.2
+# lxd:
+# name: ci-node-example
+#
+# The meta.address is used to fill in the default lxd.config['user.network-config'] value
+# templated from the cloud_init_netconf.j2 template. Note that this does not change the
+# configuration inside an already deployed instances. The merged value of lxd is passed
+# as arguments to community.general.lxd_container task.
+#
+# When a container/vm is created, the target within the cluster is
+# determined by the allocation strategy configured for the LXD cluster. The
+# default is to allocate the instance to the cluster member with the fewest
+# instances (resource usage not considered).
+#
+# Once the instances are provisied meta.address and lxd.name are used to update the
+# DNS records in Samba. @see scripts/update_dns_entry.py.
+#
+# @example a container created on a specific cluster member
+# - meta:
+# address: 192.168.1.2
+# lxd:
+# name: ci-node-example
+# # This only affects creation, the container is not moved if it
+# # already exists in the cluster.
+# target: ci-host-amd64-1a
+#
+# @example a virtual machine with the default image
+# - meta:
+# address: 192.168.1.2
+# lxd:
+# name: ci-rootnode-example
+# type: virtual-machine
+# profiles:
+# - ci-rootnode
+#
+# @example a VM with an ISO attached as a cdrom
+# - meta:
+# address: 192.168.1.2
+# lxd:
+# name: ci-rootnode-example
+# type: virtual-machine
+# profiles:
+# - ci-rootnode
+# # If there are configuration errors, sometimes the state
+# # will need to be set to stopped to apply any differences.
+# # Some values, eg. raw.apparmor, cannot be changed while the
+# # VM or container is online. Note that the image needs to
+# # be readable as the user 'nobody'.
+# # state: stopped
+# config:
+# raw.qemu: '-drive file=/path/to/image.iso,media=cdrom'
+# raw.apparmor: '/path/to/image.iso rk,'
+#
+# Console access, assuming the remote 'ci' is connected to the LXD cluster:
+# lxc console ci:ci-rootnode-example --type vga
+# Text console (may not have output depending on boot settings)
+# lxc console ci:ci-rootnode-example
+#
+containers:
+ - meta:
+ address: 172.18.16.1
+ lxd:
+ name: ci-node-deb12-amd64-01
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.2
+ lxd:
+ name: ci-node-deb12-amd64-02
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.3
+ lxd:
+ name: ci-node-deb12-amd64-03
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.4
+ lxd:
+ name: ci-node-deb12-amd64-04
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.5
+ lxd:
+ name: ci-node-deb12-amd64-05
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.6
+ lxd:
+ name: ci-node-deb12-amd64-06
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.7
+ lxd:
+ name: ci-node-deb12-amd64-07
+ jenkins:
+ label: 'deb12-amd64 deb12'
+ - meta:
+ address: 172.18.16.8
+ lxd:
+ name: ci-node-deb12-amd64-08
+ jenkins:
+ label: 'deb12-amd64 deb12'