[pve-devel] [RFC pve-container 1/3] Insert new options in the LXC config for the PVE Replica.
Thomas Lamprecht
t.lamprecht at proxmox.com
Wed Apr 5 12:30:03 CEST 2017
On 04/03/2017 04:53 PM, Wolfgang Link wrote:
> ---
> src/PVE/LXC/Config.pm | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 74 insertions(+)
>
> diff --git a/src/PVE/LXC/Config.pm b/src/PVE/LXC/Config.pm
> index 05cd970..a4a8a4c 100644
> --- a/src/PVE/LXC/Config.pm
> +++ b/src/PVE/LXC/Config.pm
> @@ -8,6 +8,7 @@ use PVE::Cluster qw(cfs_register_file);
> use PVE::INotify;
> use PVE::JSONSchema qw(get_standard_option);
> use PVE::Tools;
> +use PVE::ReplicaTools;
>
> use base qw(PVE::AbstractConfig);
>
> @@ -245,6 +246,12 @@ my $rootfs_desc = {
> description => 'Enable user quotas inside the container (not supported with zfs subvolumes)',
> optional => 1,
> },
> + rep => {
> + type => 'boolean',
> + description => 'Will include this volume to an asyncron replica job when it run.',
> + optional => 1,
> + default => 1,
> + },
> shared => {
> type => 'boolean',
> description => 'Mark this non-volume mount point as available on multiple nodes (see \'nodes\')',
> @@ -384,6 +391,31 @@ my $confdesc = {
> type => 'integer',
> minimum => 0,
> },
> + replica => {
> + optional => 1,
> + description => "Enable asyncron replica for local storage.",
> + type => 'boolean',
> + default => 0,
> + },
> + replimit => {
> + optional => 1,
> + description => "Asysncron replica speed limit.",
> + type => 'integer',
> + minimum => 0,
> + },
> + reptarget => {
> + optional => 1,
> + description => "Asysncron replica taget node.",
> + type => 'string',
> + },
> + repinterval => {
> + optional => 1,
> + description => "Asyncron replica interval [1-1440] in minutes default 15",
> + type => 'integer',
> + minimum => 1,
> + maximum => 1440,
> + default => 15,
> + },
> cmode => {
> optional => 1,
> description => "Console mode. By default, the console command tries to open a connection to one of the available tty devices. By setting cmode to 'console' it tries to attach to /dev/console instead. If you set cmode to 'shell', it simply invokes a shell inside the container (no login).",
> @@ -824,6 +856,22 @@ sub update_pct_config {
> }
> } elsif ($opt eq 'unprivileged') {
> die "unable to delete read-only option: '$opt'\n";
> + } elsif ($opt eq "replica" || $opt eq "reptarget") {
> + delete $conf->{$opt};
> + delete $conf->{replica} if $opt eq "reptarget";
> +
> + #delete replica in snap to permit uncontrolled behavior.
> + foreach my $snap (keys %{$conf->{snapshots}}) {
> + delete $conf->{snapshots}->{$snap}->{replica};
> + }
> +
> + #job_remove required updated lxc conf
> + PVE::LXC::Config->write_config($vmid, $conf);
> + PVE::ReplicaTools::job_remove($vmid);
> + next;
> + } elsif ($opt eq "repinterval" || $opt eq "replimit") {
> + delete $conf->{$opt};
> + PVE::LXC::Config->write_config($vmid, $conf);
> } else {
> die "implement me (delete: $opt)"
> }
> @@ -961,12 +1009,38 @@ sub update_pct_config {
> } elsif ($opt eq 'ostype') {
> next if $hotplug_error->($opt);
> $conf->{$opt} = $value;
> + } elsif ($opt eq "replica") {
> + PVE::ReplicaTools::get_syncable_guestdisks($conf, 'lxc', 1);
As far as I can tell this checks if all volumes of the CT are syncable,
but the function name really does not suggests that.
So at least a comment would be nice here, else I'd think this was left
over by mistake at first view :)
> + $conf->{$opt} = $param->{$opt};
> + } elsif ($opt eq "repinterval" || $opt eq "replimit") {
> + $conf->{$opt} = $param->{$opt};
> + } elsif ($opt eq "reptarget" ) {
> + die "Node: $param->{$opt} does not exists in Cluster.\n"
> + if !PVE::Cluster::check_node_exists($param->{$opt});
> + $conf->{$opt} = $param->{$opt};
> } else {
> die "implement me: $opt";
> }
> PVE::LXC::Config->write_config($vmid, $conf) if $running;
> }
>
> + if ($param->{replica}) {
> +
same as in qemu implemenatation:
if (defined($param->{replica})) {
...
> + eval {
> + if ($param->{replica} =~ m/^(?i:0|no|off|false)$/) {
if ($param->{replica}) {
...
> + PVE::ReplicaTools::job_disable($vmid);
> + } else {
> + PVE::ReplicaTools::job_enable($vmid);
> + }
> + };
> + if (my $err = $@) {
> + $conf->{replica} = '0';
> + PVE::LXC::Config->write_config($vmid, $conf);
Hmm, if a job disable fails, you still mark it as disabled? Can a
disable actually fail?
You can only get a lock timeout in job_disable, but is marking the
replica as disabled on this failure the best way to handle this?
The job is still active after all?
> + die $err;
> + }
> +
> +}
> +
> # Apply deletions and creations of new volumes
> if (@deleted_volumes) {
> my $storage_cfg = PVE::Storage::config();
More information about the pve-devel
mailing list