[pve-devel] [PATCH access-control 1/1] add realmsync plugin for jobs and CRUD api for domainsync-jobs
Dominik Csapak
d.csapak at proxmox.com
Tue Nov 8 09:20:10 CET 2022
On 11/7/22 18:05, Thomas Lamprecht wrote:
[snip]
>> + returns => {
>> + type => 'object',
>> + },
>> + code => sub {
>> + my ($param) = @_;
>> +
>> + my $jobs = cfs_read_file('jobs.cfg');
>> + my $job = $jobs->{ids}->{$param->{id}};
>
> code style nit: please avoid using hash access inside for hash keys.
>
>> + return $job if $job && $job->{type} eq 'realmsync';
>
> is this outdated due to being from april? or do we really share the
> ID namespace between all plugin types?
thats normal for section configs, but usually we don't notice in pve
since we normally only have a single endpoint for listing all objects
and not per 'type'
(e.g. you can't have 2 storages with the same ids but different types
either)
>
>> +
>> + raise_param_exc({ id => "No such job '$param->{id}'" });
>
>
>
>> +
>> + }});
>> +
>> +__PACKAGE__->register_method({
>> + name => 'create_job',
>> + path => '{id}',
>> + method => 'POST',
>> + protected => 1,
>> + description => "Create new domain-sync job.",
>> + permissions => {
>> + description => "'Realm.AllocateUser' on '/access/realm/<realm>' and "
>> + ." 'User.Modify' permissions to '/access/groups/'.",
>
> results in double whitespace at "and 'User.Modify'"
>
>> + check => [ 'and',
>> + ['perm', '/access/realm/{realm}', ['Realm.AllocateUser']],
>> + ['perm', '/access/groups', ['User.Modify']],
>
> hmm, is this the same as for a manual sync? I guess it's OK then, just
> seems a bit lax.
>
that was the idea, but i'll check again
>> + ],
>> + },
>> + parameters => PVE::Jobs::RealmSync->createSchema(),
>> + returns => { type => 'null' },
>> + code => sub {
>> + my ($param) = @_;
>> +
>> + my $id = extract_param($param, 'id');
>> +
>> + cfs_lock_file('jobs.cfg', undef, sub {
>> + my $data = cfs_read_file('jobs.cfg');
>> +
>> + die "Job '$id' already exists\n"
>> + if $data->{ids}->{$id};
>> +
>> + my $plugin = PVE::Jobs::Plugin->lookup('realmsync');
>> + my $opts = $plugin->check_config($id, $param, 1, 1);
>> +
>> + $data->{ids}->{$id} = $opts;
>> +
>> + PVE::Jobs::create_job($id, 'realmsync');
>> +
>> + cfs_write_file('jobs.cfg', $data);
>> + });
>> + die "$@" if ($@);
>> +
>> + return undef;
>> + }});
>> +
>> +__PACKAGE__->register_method({
>> + name => 'update_job',
>> + path => '{id}',
>> + method => 'PUT',
>> + protected => 1,
>> + description => "Update domain-sync job definition.",
>> + permissions => {
>> + description => "'Realm.AllocateUser' on '/access/realm/<realm>' and "
>> + ." 'User.Modify' permissions to '/access/groups/'.",
>> + check => [ 'and',
>> + ['perm', '/access/realm/{realm}', ['Realm.AllocateUser']],
>> + ['perm', '/access/groups', ['User.Modify']],
>> + ],
>> + },
>> + parameters => PVE::Jobs::RealmSync->updateSchema(),
>> + returns => { type => 'null' },
>> + code => sub {
>> + my ($param) = @_;
>> +
>> + my $id = extract_param($param, 'id');
>> + my $delete = extract_param($param, 'delete');
>> + if ($delete) {
>> + $delete = [PVE::Tools::split_list($delete)];
>> + }
>> +
>> + my $update_job = sub {
>> + my $jobs = cfs_read_file('jobs.cfg');
>> +
>> + die "no options specified\n" if !scalar(keys %$param);
>> +
>> + my $plugin = PVE::Jobs::Plugin->lookup('realmsync');
>> + my $opts = $plugin->check_config($id, $param, 0, 1);
>> +
>> + my $job = $jobs->{ids}->{$id};
>> + die "no such realmsync job\n" if !$job || $job->{type} ne 'realmsync';
>> +
>> + my $options = $plugin->options();
>> + foreach my $k (@$delete) {
>> + my $d = $options->{$k} || die "no such option '$k'\n";
>> + die "unable to delete required option '$k'\n" if !$d->{optional};
>> + die "unable to delete fixed option '$k'\n" if $d->{fixed};
>> + die "cannot set and delete property '$k' at the same time!\n"
>> + if defined($opts->{$k});
>
> don't we have that repeated a few times (didn't check actively, but seems to ring
> some bells to me)? isn't this something some schema thingy w/could enforce?
> (also @wolfgang for the latter)
yes, afair we do that (or some variation) for all section config crud api endpoints
>
>> + delete $job->{$k};
>> + }
>> +
>> + my $schedule_updated = 0;
>> + if ($param->{schedule} ne $job->{schedule}) {
>> + $schedule_updated = 1;
>> + }
>
> erm, why not?:
>
> my $schedule_updated = $param->{schedule} ne $job->{schedule};
good question ;)
>
>> +
>> + foreach my $k (keys %$param) {
>
> probably just copy "error", but please: s/foreach/for/, or even:
>
> $job->{$_} = $param->{$_} for keys $param->%*;
mhmm.. AFAIR i did not see that pattern anywhere yet in our codebase, maybe we want
an example of that in our style guide? (for single line loops i like it)
>
>> + $job->{$k} = $param->{$k};
>> + }
>> +
>> + if ($schedule_updated) {
>> + PVE::Jobs::updated_job_schedule($id, 'realmsync');
>> + }
>> +
>> + cfs_write_file('jobs.cfg', $jobs);
>> + return;
>> + };
>> + cfs_lock_file('jobs.cfg', undef, $update_job);
>
> could use a direct sub {} instead of named $code_ref like the first
> cfs_lock_file usage here does already.
>
>> + die "$@" if ($@);
>> + }});
>> +
>> +
>> +__PACKAGE__->register_method({
>> + name => 'delete_job',
>> + path => '{id}',
>> + method => 'DELETE',
>> + description => "Delete domain-sync job definition.",
>> + permissions => {
>> + check => ['perm', '/', ['Sys.Modify']],
>> + },
>> + protected => 1,
>> + parameters => {
>> + additionalProperties => 0,
>> + properties => {
>> + id => {
>> + type => 'string',
>> + format => 'pve-configid',
>> + },
>> + },
>> + },
>> + returns => { type => 'null' },
>> + code => sub {
>> + my ($param) = @_;
>> +
>> + my $id = $param->{id};
>> +
>> + my $delete_job = sub {
>> + my $jobs = cfs_read_file('jobs.cfg');
>> +
>> + if (!defined($jobs->{ids}->{$id})) {
>
> why not check the job type here (but on get/update)?
probably a copy&paste mistake
>
>> + raise_param_exc({ id => "No such job '$id'" });
>> + }
>> + delete $jobs->{ids}->{$id};
>> +
>> + PVE::Jobs::remove_job($id, 'realmsync');
>> +
>> + cfs_write_file('jobs.cfg', $jobs);
>> + unlink "/etc/pve/priv/jobs/realmsync-$id.json";
>> + };
>> +
>> + cfs_lock_file('jobs.cfg', undef, $delete_job);
>
> could use a direct sub {} instead of named $code_ref like the first
> cfs_lock_file usage here does already.
>
>> + die "$@" if $@;
>> +
>> + return undef;
>> + }});
>> +1;
>> diff --git a/src/PVE/API2/Makefile b/src/PVE/API2/Makefile
>> index 2817f48..9aeb047 100644
>> --- a/src/PVE/API2/Makefile
>> +++ b/src/PVE/API2/Makefile
>> @@ -2,6 +2,7 @@
>> API2_SOURCES= \
>> AccessControl.pm \
>> Domains.pm \
>> + Domainsync.pm \
>> ACL.pm \
>> Role.pm \
>> Group.pm \
>> diff --git a/src/PVE/Jobs/Makefile b/src/PVE/Jobs/Makefile
>> new file mode 100644
>> index 0000000..9eed1b2
>> --- /dev/null
>> +++ b/src/PVE/Jobs/Makefile
>> @@ -0,0 +1,6 @@
>> +SOURCES=RealmSync.pm
>> +
>> +.PHONY: install
>> +install: ${SOURCES}
>> + install -d -m 0755 ${DESTDIR}${PERLDIR}/PVE/Jobs
>> + for i in ${SOURCES}; do install -D -m 0644 $$i ${DESTDIR}${PERLDIR}/PVE/Jobs/$$i; done
>> diff --git a/src/PVE/Jobs/RealmSync.pm b/src/PVE/Jobs/RealmSync.pm
>> new file mode 100644
>> index 0000000..f19f4a8
>> --- /dev/null
>> +++ b/src/PVE/Jobs/RealmSync.pm
>> @@ -0,0 +1,192 @@
>> +package PVE::Jobs::RealmSync;
>
> Hmm, why not DomainSync, or why is the other module called Domainsynced, no biggie but
> slightly odd
i can't really remember, but i guess i decided on a different name sometime during
development, and forgot to update all instances...
i'm not bound to any of these names, but i also would like to have it consistent
(so either all DomainSync or all RealmSync)
>
>> +
>> +use strict;
>> +use warnings;
>
> missing empty line between strict/warning and other use statements
>
>> +use JSON;
>> +
>> +use PVE::API2::Domains;
>> +use PVE::JSONSchema qw(get_standard_option);
>> +use PVE::Cluster;
>> +use PVE::CalendarEvent;
>> +use PVE::Tools;
>
> pls sort
>
>> +
>> +use base qw(PVE::Jobs::Plugin);
>
> isn't this in pve-manager? would add a cyclic dependency and so to our (well
> most of the time my ;P) bootstrap PITA.
we already talked off-list about that, so just for documentation purposes:
AFAIR @hannes wanted to move the jobs to common (or was it cluster?)
and i did not want to wait for that for this patch so i sent it as is
i'd have to check how we'd have to move the packages around to avoid this
(easiest would probably be to have the basic plugin in common/cluster i guess)
>
>> +
>> +sub type {
>> + return 'realmsync';
>> +}
>> +
>> +my $props = get_standard_option('realm-sync-options', {
>> + realm => get_standard_option('realm'),
>> +});
>> +
>> +sub properties {
>> + return $props;
>> +}
>> +
>> +sub options {
>> + my $options = {
>> + enabled => { optional => 1 },
>> + schedule => {},
>> + comment => { optional => 1 },
>> + node => { optional => 1 },
>> + scope => {},
>> + };
>> + foreach my $opt (keys %$props) {
>
> s/foreach/for/
>
>> + next if defined($options->{$opt});
>> + if ($props->{$opt}->{optional}) {
>> + $options->{$opt} = { optional => 1 };
>> + } else {
>> + $options->{$opt} = {};
>> + }
>> + }
>> + $options->{realm}->{fixed} = 1;
>> +
>> + return $options;
>> +}
>> +
>> +sub decode_value {
>> + my ($class, $type, $key, $value) = @_;
>> + return $value;
>> +}
>> +
>> +sub encode_value {
>> + my ($class, $type, $key, $value) = @_;
>> + return $value;
>> +}
>> +
>> +sub createSchema {
>> + my ($class, $skip_type) = @_;
>> + my $schema = $class->SUPER::createSchema($skip_type);
>> +
>> + my $opts = $class->options();
>> + for my $opt (keys $schema->{properties}->%*) {
>> + next if defined($opts->{$opt}) || $opt eq 'id';
>> + delete $schema->{properties}->{$opt};
>> + }
>> +
>> + # remove legacy props
>> + delete $schema->{properties}->{full};
>> + delete $schema->{properties}->{purge};
>> +
>> + return $schema;
>> +}
>> +
>> +sub updateSchema {
>> + my ($class, $skip_type) = @_;
>> + my $schema = $class->SUPER::updateSchema($skip_type);
>> +
>> + my $opts = $class->options();
>> + for my $opt (keys $schema->{properties}->%*) {
>> + next if defined($opts->{$opt});
>> + next if $opt eq 'id' || $opt eq 'delete';
>> + delete $schema->{properties}->{$opt};
>> + }
>> +
>> + # remove legacy props
>> + delete $schema->{properties}->{full};
>> + delete $schema->{properties}->{purge};
>> +
>> + return $schema;
>> +}
>> +
>> +sub run {
>> + my ($class, $conf, $id, $schedule) = @_;
>> +
>> + my $node = $conf->{node};
>> + foreach my $opt (keys %$conf) {
>> + delete $conf->{$opt} if !defined($props->{$opt});
>> + }
>> +
>> + my $realm = $conf->{realm};
>> + my $statedir = "/etc/pve/priv/jobs";
>> + my $statefile = "$statedir/realmsync-$id.json";
>> +
>> + if (!defined($node)) {
>> + # cluster synced
>> + my $curtime = time();
>
> use $now if you want short, IMO a bit better than somewhat randomly cut-off
> & directly concatenated words.
>
>> + my $nodename = PVE::INotify::nodename();
>> + PVE::Cluster::cfs_update();
>
> doesn't the job infra does this for us?
no we also have it in the VZDump plugin but you have a point,
putting it before 'run()' in PVE::Jobs probably makes more sense
>
>> + # check statefile in pmxcfs if we should start
>> + my $shouldrun = PVE::Cluster::cfs_lock_domain($realm, undef, sub {
>> + mkdir $statedir;
>> + my $raw = eval { PVE::Tools::file_get_contents($statefile) } // '';
>> + my $members = PVE::Cluster::get_members();
>> +
>> + my $state = ($raw =~ m/^(\{.*\})$/) ? decode_json($1) : {};
>> + my $lastnode = $state->{node} // $nodename;
>> + my $lastupid = $state->{upid};
>> + my $lasttime = $state->{time};
>> +
>> + my $node_online = 0;
>> + if ($lastnode ne $nodename) {
>> + if (defined(my $member = $members->{$lastnode})) {
>> + $node_online = $member->{online};
>> + }
>> + } else {
>> + $node_online = 1;
>> + }
>> +
>> + if (defined($lastupid)) {
>> + # first check if the next run is scheduled
>> + if (my $parsed = PVE::Tools::upid_decode($lastupid, 1)) {
>> + my $calspec = PVE::CalendarEvent::parse_calendar_event($schedule);
>> + my $next_sync = PVE::CalendarEvent::compute_next_event($calspec, $parsed->{starttime});
>> + return 0 if !defined($next_sync) || $curtime < $next_sync; # not yet its (next) turn
>> + }
>> + # check if still running and node is online
>> + my $tasks = PVE::Cluster::get_tasklist();
>> + for my $task (@$tasks) {
>> + next if $task->{upid} ne $lastupid;
>> + last if defined($task->{endtime}); # it's already finished
>> + last if !$node_online; # it's not finished but the node is offline
>> + return 0; # not finished and online
>> + }
>> + } elsif (defined($lasttime) && ($lasttime+60) > $curtime && $node_online) {
>> + # another node started in the last 60 seconds and is online
>> + return 0;
>> + }
>> +> + # any of the following coniditions should be true here:
>
> s/coniditions/conditions/
>
>> + # * it was started on another node but that node is offline now
>> + # * it was started but either too long ago, or with an error
>> + # * the started task finished
>> +
>> + PVE::Tools::file_set_contents($statefile, encode_json({
>> + node => $nodename,
>> + time => $curtime,
>> + }));
>> + return 1;
>> + });
>> + die $@ if $@;
>> +
>> + if ($shouldrun) {
>> + my $err = undef;
>
> drop that and..
>
>> + my $upid = eval { PVE::API2::Domains->sync($conf) };
>> + $err = $@ if $@;
>
> just save the error with:
>
> my $err = $@;
>
>> + PVE::Cluster::cfs_lock_domain($realm, undef, sub {
>> + if ($err && !$upid) {
>> + PVE::Tools::file_set_contents($statefile, encode_json({
>> + node => $nodename,
>> + time => $curtime,
>> + error => $err,
>> + }));
>> + die "$err\n";
>> + }
>> +
>> + PVE::Tools::file_set_contents($statefile, encode_json({
>> + node => $nodename,
>> + upid => $upid,
>> + }));
>> + });
>> + die $@ if $@;
>> + return $upid;
>> + } else {
>> + return "OK";
>> + }
>> + }
>> +
>> + return PVE::API2::Domains->sync($conf);
>> +}
>> +
>> +1;
>> diff --git a/src/PVE/Makefile b/src/PVE/Makefile
>> index c839d8f..dfc5314 100644
>> --- a/src/PVE/Makefile
>> +++ b/src/PVE/Makefile
>> @@ -8,3 +8,4 @@ install:
>> install -D -m 0644 TokenConfig.pm ${DESTDIR}${PERLDIR}/PVE/TokenConfig.pm
>> make -C API2 install
>> make -C CLI install
>> + make -C Jobs install
>
More information about the pve-devel
mailing list