| 1 |
joko |
1.8 |
## $Id: Sync.pm,v 1.7 2002/12/13 21:49:34 joko Exp $ |
| 2 |
joko |
1.1 |
## |
| 3 |
|
|
## Copyright (c) 2002 Andreas Motl <andreas.motl@ilo.de> |
| 4 |
|
|
## |
| 5 |
|
|
## See COPYRIGHT section in pod text below for usage and distribution rights. |
| 6 |
|
|
## |
| 7 |
|
|
## ---------------------------------------------------------------------------------------- |
| 8 |
joko |
1.2 |
## $Log: Sync.pm,v $ |
| 9 |
joko |
1.8 |
## Revision 1.7 2002/12/13 21:49:34 joko |
| 10 |
|
|
## + sub configure |
| 11 |
|
|
## + sub checkOptions |
| 12 |
|
|
## |
| 13 |
joko |
1.7 |
## Revision 1.6 2002/12/06 04:49:10 jonen |
| 14 |
|
|
## + disabled output-puffer here |
| 15 |
|
|
## |
| 16 |
jonen |
1.6 |
## Revision 1.5 2002/12/05 08:06:05 joko |
| 17 |
|
|
## + bugfix with determining empty fields (Null) with DBD::CSV |
| 18 |
|
|
## + debugging |
| 19 |
|
|
## + updated comments |
| 20 |
|
|
## |
| 21 |
joko |
1.5 |
## Revision 1.4 2002/12/03 15:54:07 joko |
| 22 |
|
|
## + {import}-flag is now {prepare}-flag |
| 23 |
|
|
## |
| 24 |
joko |
1.4 |
## Revision 1.3 2002/12/01 22:26:59 joko |
| 25 |
|
|
## + minor cosmetics for logging |
| 26 |
|
|
## |
| 27 |
joko |
1.3 |
## Revision 1.2 2002/12/01 04:43:25 joko |
| 28 |
|
|
## + mapping deatil entries may now be either an ARRAY or a HASH |
| 29 |
|
|
## + erase flag is used now (for export-operations) |
| 30 |
|
|
## + expressions to refer to values inside deep nested structures |
| 31 |
|
|
## - removed old mappingV2-code |
| 32 |
|
|
## + cosmetics |
| 33 |
|
|
## + sub _erase_all |
| 34 |
|
|
## |
| 35 |
joko |
1.2 |
## Revision 1.1 2002/11/29 04:45:50 joko |
| 36 |
|
|
## + initial check in |
| 37 |
|
|
## |
| 38 |
joko |
1.1 |
## Revision 1.1 2002/10/10 03:44:21 cvsjoko |
| 39 |
|
|
## + new |
| 40 |
|
|
## ---------------------------------------------------------------------------------------- |
| 41 |
|
|
|
| 42 |
|
|
|
| 43 |
|
|
package Data::Transfer::Sync; |
| 44 |
|
|
|
| 45 |
|
|
use strict; |
| 46 |
|
|
use warnings; |
| 47 |
|
|
|
| 48 |
|
|
use Data::Dumper; |
| 49 |
joko |
1.7 |
#use Hash::Merge qw( merge ); |
| 50 |
|
|
|
| 51 |
joko |
1.1 |
use misc::HashExt; |
| 52 |
|
|
use libp qw( md5_base64 ); |
| 53 |
|
|
use libdb qw( quotesql hash2Sql ); |
| 54 |
joko |
1.2 |
use Data::Transform::Deep qw( hash2object refexpr2perlref ); |
| 55 |
joko |
1.1 |
use Data::Compare::Struct qw( getDifference isEmpty ); |
| 56 |
|
|
|
| 57 |
|
|
# get logger instance |
| 58 |
|
|
my $logger = Log::Dispatch::Config->instance; |
| 59 |
|
|
|
| 60 |
jonen |
1.6 |
$| = 1; |
| 61 |
joko |
1.1 |
|
| 62 |
|
|
sub new { |
| 63 |
|
|
my $invocant = shift; |
| 64 |
|
|
my $class = ref($invocant) || $invocant; |
| 65 |
joko |
1.7 |
my $self = {}; |
| 66 |
joko |
1.1 |
$logger->debug( __PACKAGE__ . "->new(@_)" ); |
| 67 |
|
|
bless $self, $class; |
| 68 |
joko |
1.7 |
$self->configure(@_); |
| 69 |
joko |
1.1 |
return $self; |
| 70 |
|
|
} |
| 71 |
|
|
|
| 72 |
|
|
|
| 73 |
joko |
1.7 |
sub configure { |
| 74 |
|
|
my $self = shift; |
| 75 |
|
|
my @args = @_; |
| 76 |
|
|
if (!isEmpty(\@args)) { |
| 77 |
|
|
my %properties = @_; |
| 78 |
|
|
# merge args to properties |
| 79 |
|
|
map { $self->{$_} = $properties{$_}; } keys %properties; |
| 80 |
|
|
$self->_init(); |
| 81 |
|
|
} else { |
| 82 |
|
|
#print "no args!", "\n"; |
| 83 |
|
|
} |
| 84 |
|
|
#print Dumper($self); |
| 85 |
|
|
} |
| 86 |
|
|
|
| 87 |
joko |
1.1 |
sub _init { |
| 88 |
|
|
my $self = shift; |
| 89 |
joko |
1.7 |
|
| 90 |
|
|
$self->{configured} = 1; |
| 91 |
joko |
1.1 |
|
| 92 |
|
|
# build new container if necessary |
| 93 |
|
|
$self->{container} = Data::Storage::Container->new() if !$self->{container}; |
| 94 |
|
|
|
| 95 |
|
|
# add storages to container (optional) |
| 96 |
|
|
foreach (keys %{$self->{storages}}) { |
| 97 |
|
|
$self->{container}->addStorage($_, $self->{storages}->{$_}); |
| 98 |
|
|
} |
| 99 |
|
|
|
| 100 |
|
|
# tag storages with id-authority and checksum-provider information |
| 101 |
|
|
# TODO: better store tag inside metadata to hold bits together! |
| 102 |
|
|
map { $self->{container}->{storage}->{$_}->{isIdentAuthority} = 1 } @{$self->{id_authorities}}; |
| 103 |
|
|
map { $self->{container}->{storage}->{$_}->{isChecksumAuthority} = 1; } @{$self->{checksum_authorities}}; |
| 104 |
|
|
map { $self->{container}->{storage}->{$_}->{isWriteProtected} = 1; } @{$self->{write_protected}}; |
| 105 |
|
|
|
| 106 |
|
|
} |
| 107 |
|
|
|
| 108 |
|
|
|
| 109 |
joko |
1.7 |
sub prepareOptions { |
| 110 |
|
|
|
| 111 |
|
|
my $self = shift; |
| 112 |
|
|
my $opts = shift; |
| 113 |
|
|
|
| 114 |
|
|
#print Dumper($opts); |
| 115 |
|
|
|
| 116 |
|
|
$opts->{mode} ||= ''; |
| 117 |
|
|
$opts->{erase} ||= 0; |
| 118 |
|
|
#$opts->{import} ||= 0; |
| 119 |
|
|
|
| 120 |
|
|
$logger->info( __PACKAGE__ . "->prepareOptions( source_node $opts->{source_node} mode $opts->{mode} erase $opts->{erase} prepare $opts->{prepare} )"); |
| 121 |
|
|
|
| 122 |
|
|
if (!$opts->{mapping} || !$opts->{mapping_module}) { |
| 123 |
|
|
$logger->warning( __PACKAGE__ . "->prepareOptions: No mapping supplied - please check key 'mappings' in BizWorks/Config.pm"); |
| 124 |
|
|
} |
| 125 |
|
|
|
| 126 |
|
|
my $evstring = "use $opts->{mapping_module};"; |
| 127 |
|
|
eval($evstring); |
| 128 |
|
|
if ($@) { |
| 129 |
|
|
$logger->warning( __PACKAGE__ . "->prepareOptions: error while trying to access mapping - $@"); |
| 130 |
|
|
return; |
| 131 |
|
|
} |
| 132 |
|
|
|
| 133 |
|
|
# resolve mapping metadata (returned from sub) |
| 134 |
|
|
my $mapObject = $opts->{mapping_module}->new(); |
| 135 |
|
|
#print Dumper($map); |
| 136 |
|
|
my $source_node_name = $opts->{source_node}; |
| 137 |
|
|
# check if mapping for certain node is contained in mapping object |
| 138 |
|
|
if (!$mapObject->can($source_node_name)) { |
| 139 |
|
|
$logger->warning( __PACKAGE__ . "->prepareOptions: Can't access mapping for node \"$source_node_name\" - please check $opts->{mapping_module}."); |
| 140 |
|
|
return; |
| 141 |
|
|
} |
| 142 |
|
|
my $map = $mapObject->$source_node_name; |
| 143 |
|
|
|
| 144 |
|
|
# remove asymmetries from $map (patch keys) |
| 145 |
|
|
$map->{source_node} = $map->{source}; delete $map->{source}; |
| 146 |
|
|
$map->{target_node} = $map->{target}; delete $map->{target}; |
| 147 |
|
|
$map->{mapping} = $map->{details}; delete $map->{details}; |
| 148 |
|
|
$map->{direction} = $map->{mode}; delete $map->{mode}; |
| 149 |
|
|
|
| 150 |
|
|
# defaults (mostly for backward-compatibility) |
| 151 |
|
|
$map->{source_node} ||= $source_node_name; |
| 152 |
|
|
$map->{source_ident} ||= 'storage_method:id'; |
| 153 |
|
|
$map->{target_ident} ||= 'property:oid'; |
| 154 |
|
|
$map->{direction} ||= $opts->{mode}; # | PUSH | PULL | FULL |
| 155 |
|
|
$map->{method} ||= 'checksum'; # | timestamp |
| 156 |
|
|
$map->{source_exclude} ||= [qw( cs )]; |
| 157 |
|
|
|
| 158 |
|
|
# merge map to opts |
| 159 |
|
|
map { $opts->{$_} = $map->{$_}; } keys %$map; |
| 160 |
|
|
|
| 161 |
|
|
#print Dumper($opts); |
| 162 |
|
|
|
| 163 |
|
|
# TODO: move this to checkOptions... |
| 164 |
|
|
|
| 165 |
|
|
# check - do we have a target? |
| 166 |
|
|
if (!$opts->{target_node}) { |
| 167 |
|
|
$logger->warning( __PACKAGE__ . "->prepareOptions: No target given - please check metadata declaration."); |
| 168 |
|
|
return; |
| 169 |
|
|
} |
| 170 |
|
|
|
| 171 |
|
|
|
| 172 |
|
|
#return $opts; |
| 173 |
|
|
return 1; |
| 174 |
|
|
|
| 175 |
|
|
} |
| 176 |
|
|
|
| 177 |
|
|
|
| 178 |
|
|
sub checkOptions { |
| 179 |
|
|
my $self = shift; |
| 180 |
|
|
my $opts = shift; |
| 181 |
|
|
|
| 182 |
|
|
my $result = 1; |
| 183 |
|
|
|
| 184 |
|
|
# check - do we have a target node? |
| 185 |
|
|
if (!$opts->{target_node}) { |
| 186 |
|
|
$logger->warning( __PACKAGE__ . "->checkOptions: Error while resolving resource metadata - no 'target node' could be determined."); |
| 187 |
|
|
$result = 0; |
| 188 |
|
|
} |
| 189 |
|
|
|
| 190 |
|
|
# check - do we have a mapping? |
| 191 |
|
|
if (!$opts->{mapping} && !$opts->{mapping_module}) { |
| 192 |
|
|
$logger->warning( __PACKAGE__ . "->checkOptions: Error while resolving resource metadata - no 'mapping' could be determined."); |
| 193 |
|
|
$result = 0; |
| 194 |
|
|
} |
| 195 |
|
|
|
| 196 |
|
|
return $result; |
| 197 |
|
|
|
| 198 |
|
|
} |
| 199 |
|
|
|
| 200 |
|
|
|
| 201 |
joko |
1.1 |
# TODO: some feature to show off the progress of synchronization (cur/max * 100) |
| 202 |
|
|
sub syncNodes { |
| 203 |
|
|
|
| 204 |
|
|
my $self = shift; |
| 205 |
|
|
my $args = shift; |
| 206 |
|
|
|
| 207 |
joko |
1.7 |
if (!$self->{configured}) { |
| 208 |
|
|
$logger->critical( __PACKAGE__ . "->syncNodes: Synchronization object is not configured/initialized correctly." ); |
| 209 |
|
|
return; |
| 210 |
|
|
} |
| 211 |
|
|
|
| 212 |
joko |
1.1 |
# remember arguments through the whole processing |
| 213 |
|
|
$self->{args} = $args; |
| 214 |
|
|
|
| 215 |
|
|
$logger->debug( __PACKAGE__ . "->syncNodes: starting" ); |
| 216 |
|
|
|
| 217 |
|
|
# hash to hold and/or fill in metadata required for the processing |
| 218 |
|
|
$self->{meta} = {}; |
| 219 |
|
|
|
| 220 |
|
|
# hash to sum up results |
| 221 |
|
|
my $direction_arrow = ''; |
| 222 |
|
|
|
| 223 |
|
|
# detect synchronization method to determine which optical symbol (directed arrow) to use |
| 224 |
|
|
if (lc $self->{args}->{direction} eq 'push') { |
| 225 |
|
|
$direction_arrow = '->'; |
| 226 |
|
|
} elsif (lc $self->{args}->{direction} eq 'pull') { |
| 227 |
|
|
$direction_arrow = '<-'; |
| 228 |
|
|
} elsif (lc $self->{args}->{direction} eq 'full') { |
| 229 |
|
|
$direction_arrow = '<->'; |
| 230 |
|
|
} else { |
| 231 |
|
|
} |
| 232 |
|
|
|
| 233 |
|
|
# decompose identifiers for each partner |
| 234 |
joko |
1.7 |
# TODO: refactor!!! take this list from already established/given metadata |
| 235 |
joko |
1.1 |
foreach ('source', 'target') { |
| 236 |
|
|
|
| 237 |
|
|
# get/set metadata for further processing |
| 238 |
|
|
|
| 239 |
|
|
# Partner and Node (e.g.: "L:Country" or "R:countries.csv") |
| 240 |
|
|
if (my $item = $self->{args}->{$_}) { |
| 241 |
|
|
my @item = split(':', $item); |
| 242 |
|
|
$self->{meta}->{$_}->{dbkey} = $item[0]; |
| 243 |
|
|
$self->{meta}->{$_}->{node} = $item[1]; |
| 244 |
|
|
} |
| 245 |
|
|
|
| 246 |
|
|
# Filter |
| 247 |
|
|
if (my $item_filter = $self->{args}->{$_ . '_filter'}) { |
| 248 |
|
|
$self->{meta}->{$_}->{filter} = $item_filter; |
| 249 |
|
|
} |
| 250 |
|
|
|
| 251 |
|
|
# IdentProvider |
| 252 |
|
|
if (my $item_ident = $self->{args}->{$_ . '_ident'}) { |
| 253 |
|
|
my @item_ident = split(':', $item_ident); |
| 254 |
|
|
$self->{meta}->{$_}->{IdentProvider} = { method => $item_ident[0], arg => $item_ident[1] }; |
| 255 |
|
|
} |
| 256 |
|
|
|
| 257 |
|
|
# TODO: ChecksumProvider |
| 258 |
|
|
|
| 259 |
|
|
# exclude properties/subnodes |
| 260 |
|
|
if (my $item_exclude = $self->{args}->{$_ . '_exclude'}) { |
| 261 |
|
|
$self->{meta}->{$_}->{subnodes_exclude} = $item_exclude; |
| 262 |
|
|
} |
| 263 |
|
|
|
| 264 |
|
|
# TypeProvider |
| 265 |
|
|
if (my $item_type = $self->{args}->{$_ . '_type'}) { |
| 266 |
|
|
my @item_type = split(':', $item_type); |
| 267 |
|
|
$self->{meta}->{$_}->{TypeProvider} = { method => $item_type[0], arg => $item_type[1] }; |
| 268 |
|
|
} |
| 269 |
|
|
|
| 270 |
|
|
# Callbacks - writers (will be triggered _before_ writing to target) |
| 271 |
|
|
if (my $item_writers = $self->{args}->{$_ . '_callbacks_write'}) { |
| 272 |
|
|
my $descent = $_; # this is important since the following code inside the map wants to use its own context variables |
| 273 |
|
|
map { $self->{meta}->{$descent}->{Callback}->{write}->{$_}++; } @$item_writers; |
| 274 |
|
|
} |
| 275 |
|
|
|
| 276 |
|
|
# Callbacks - readers (will be triggered _after_ reading from source) |
| 277 |
|
|
if (my $item_readers = $self->{args}->{$_ . '_callbacks_read'}) { |
| 278 |
|
|
my $descent = $_; |
| 279 |
|
|
map { $self->{meta}->{$descent}->{Callback}->{read}->{$_}++; } @$item_readers; |
| 280 |
|
|
} |
| 281 |
|
|
|
| 282 |
|
|
# resolve storage objects |
| 283 |
|
|
#$self->{$_} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}}; |
| 284 |
|
|
# relink references to metainfo |
| 285 |
|
|
$self->{meta}->{$_}->{storage} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}}; |
| 286 |
|
|
#print "iiiiisprov: ", Dumper($self->{meta}->{$_}->{storage}), "\n"; |
| 287 |
|
|
} |
| 288 |
|
|
|
| 289 |
joko |
1.7 |
#print Dumper($self->{meta}); |
| 290 |
|
|
|
| 291 |
joko |
1.1 |
$logger->info( __PACKAGE__ . "->syncNodes: source=$self->{meta}->{source}->{dbkey}/$self->{meta}->{source}->{node} $direction_arrow target=$self->{meta}->{target}->{dbkey}/$self->{meta}->{target}->{node}" ); |
| 292 |
|
|
|
| 293 |
|
|
# build mapping |
| 294 |
joko |
1.2 |
# incoming: and Array of node map entries (Array or Hash) - e.g. |
| 295 |
|
|
# [ 'source:item_name' => 'target:class_val' ] |
| 296 |
|
|
# { source => 'event->startDateTime', target => 'begindate' } |
| 297 |
joko |
1.1 |
foreach (@{$self->{args}->{mapping}}) { |
| 298 |
joko |
1.2 |
if (ref $_ eq 'ARRAY') { |
| 299 |
|
|
my @entry1 = split(':', $_->[0]); |
| 300 |
|
|
my @entry2 = split(':', $_->[1]); |
| 301 |
|
|
my $descent = []; |
| 302 |
|
|
my $node = []; |
| 303 |
|
|
$descent->[0] = $entry1[0]; |
| 304 |
|
|
$descent->[1] = $entry2[0]; |
| 305 |
|
|
$node->[0] = $entry1[1]; |
| 306 |
|
|
$node->[1] = $entry2[1]; |
| 307 |
|
|
push @{$self->{meta}->{$descent->[0]}->{childnodes}}, $node->[0]; |
| 308 |
|
|
push @{$self->{meta}->{$descent->[1]}->{childnodes}}, $node->[1]; |
| 309 |
|
|
} elsif (ref $_ eq 'HASH') { |
| 310 |
|
|
foreach my $entry_key (keys %$_) { |
| 311 |
|
|
my $entry_val = $_->{$entry_key}; |
| 312 |
|
|
push @{$self->{meta}->{$entry_key}->{childnodes}}, $entry_val; |
| 313 |
|
|
} |
| 314 |
|
|
} |
| 315 |
|
|
|
| 316 |
joko |
1.1 |
} |
| 317 |
|
|
|
| 318 |
joko |
1.7 |
#print Dumper($self->{meta}); |
| 319 |
|
|
|
| 320 |
joko |
1.1 |
# check partners/nodes: does partner exist / is node available? |
| 321 |
|
|
foreach my $partner (keys %{$self->{meta}}) { |
| 322 |
joko |
1.8 |
|
| 323 |
|
|
# 1. check partners & storages |
| 324 |
|
|
if (!$self->{meta}->{$partner}) { |
| 325 |
|
|
$logger->critical( __PACKAGE__ . "->syncNodes: Could not find partner '$partner' in configuration metadata." ); |
| 326 |
|
|
return; |
| 327 |
|
|
} |
| 328 |
|
|
|
| 329 |
|
|
my $dbkey = $self->{meta}->{$partner}->{dbkey}; |
| 330 |
|
|
|
| 331 |
|
|
if (!$self->{meta}->{$partner}->{storage}) { |
| 332 |
|
|
$logger->critical( __PACKAGE__ . "->syncNodes: Could not access storage of partner '$partner' (named '$dbkey'), looks like a configuration-error." ); |
| 333 |
|
|
return; |
| 334 |
|
|
} |
| 335 |
|
|
|
| 336 |
|
|
# TODO: |
| 337 |
|
|
# 2. check if partners (and nodes?) are actually available.... |
| 338 |
|
|
# eventually pre-check mode of access-attempt (read/write) here to provide an "early-croak" if possible |
| 339 |
|
|
|
| 340 |
|
|
# 3. check nodes |
| 341 |
|
|
next if $self->{meta}->{$partner}->{storage}->{locator}->{type} eq 'DBI'; # HACK for DBD::CSV - re-enable for others |
| 342 |
|
|
# get node-name |
| 343 |
joko |
1.1 |
my $node = $self->{meta}->{$partner}->{node}; |
| 344 |
|
|
if (!$self->{meta}->{$partner}->{storage}->existsChildNode($node)) { |
| 345 |
joko |
1.8 |
$logger->critical( __PACKAGE__ . "->syncNodes: Could not reach node \"$node\" at partner \"$partner\"." ); |
| 346 |
joko |
1.1 |
return; |
| 347 |
|
|
} |
| 348 |
joko |
1.8 |
|
| 349 |
joko |
1.1 |
} |
| 350 |
|
|
|
| 351 |
|
|
# TODO: |
| 352 |
|
|
# + if action == PUSH: start processing |
| 353 |
|
|
# -+ if action == PULL: swap metadata and start processing |
| 354 |
|
|
# - if action == FULL: start processing, then swap metadata and (re-)start processing |
| 355 |
|
|
|
| 356 |
|
|
#print Dumper($self->{args}); |
| 357 |
|
|
|
| 358 |
|
|
# manipulate metainfo according to direction of synchronization |
| 359 |
|
|
if (lc $self->{args}->{direction} eq 'push') { |
| 360 |
|
|
# just do it ... |
| 361 |
|
|
} elsif (lc $self->{args}->{direction} eq 'pull') { |
| 362 |
|
|
#print "=======SWAP", "\n"; |
| 363 |
|
|
# swap |
| 364 |
|
|
($self->{meta}->{source}, $self->{meta}->{target}) = |
| 365 |
|
|
($self->{meta}->{target}, $self->{meta}->{source}); |
| 366 |
|
|
} elsif (lc $self->{args}->{direction} eq 'full') { |
| 367 |
|
|
} else { |
| 368 |
|
|
} |
| 369 |
|
|
|
| 370 |
|
|
# import flag means: prepare the source node to be syncable |
| 371 |
|
|
# this is useful if there are e.g. no "ident" or "checksum" columns yet inside a DBI like (row-based) storage |
| 372 |
joko |
1.4 |
if ($self->{args}->{prepare}) { |
| 373 |
joko |
1.1 |
$self->_prepareNode_MetaProperties('source'); |
| 374 |
|
|
$self->_prepareNode_DummyIdent('source'); |
| 375 |
|
|
#return; |
| 376 |
|
|
#$self->_erase_all($opts->{source_node}); |
| 377 |
|
|
} |
| 378 |
|
|
|
| 379 |
joko |
1.2 |
# erase flag means: erase the target |
| 380 |
|
|
#if ($opts->{erase}) { |
| 381 |
|
|
if ($self->{args}->{erase}) { |
| 382 |
|
|
# TODO: move this method to the scope of the synchronization core and wrap it around different handlers |
| 383 |
|
|
#print "ERASE", "\n"; |
| 384 |
|
|
$self->_erase_all('target'); |
| 385 |
|
|
} |
| 386 |
|
|
|
| 387 |
joko |
1.1 |
$self->_syncNodes(); |
| 388 |
|
|
|
| 389 |
|
|
} |
| 390 |
|
|
|
| 391 |
|
|
|
| 392 |
|
|
# TODO: abstract the hardwired use of "source" and "target" in here somehow - hmmmm....... /(="§/%??? |
| 393 |
|
|
sub _syncNodes { |
| 394 |
|
|
|
| 395 |
|
|
my $self = shift; |
| 396 |
|
|
|
| 397 |
|
|
my $tc = OneLineDumpHash->new( {} ); |
| 398 |
|
|
my $results; |
| 399 |
|
|
|
| 400 |
|
|
# set of objects is already in $self->{args} |
| 401 |
|
|
# TODO: make independent of the terminology "object..." |
| 402 |
|
|
$results = $self->{args}->{objectSet} if $self->{args}->{objectSet}; |
| 403 |
|
|
|
| 404 |
|
|
# apply filter |
| 405 |
|
|
if (my $filter = $self->{meta}->{source}->{filter}) { |
| 406 |
|
|
#print Dumper($filter); |
| 407 |
|
|
#exit; |
| 408 |
|
|
$results ||= $self->_getNodeList('source', $filter); |
| 409 |
|
|
} |
| 410 |
|
|
|
| 411 |
joko |
1.5 |
# get reference to node list from convenient method provided by CORE-HANDLE |
| 412 |
joko |
1.1 |
#$results ||= $self->{source}->getListUnfiltered($self->{meta}->{source}->{node}); |
| 413 |
|
|
#$results ||= $self->{meta}->{source}->{storage}->getListUnfiltered($self->{meta}->{source}->{node}); |
| 414 |
|
|
$results ||= $self->_getNodeList('source'); |
| 415 |
|
|
|
| 416 |
|
|
# checkpoint: do we actually have a list to iterate through? |
| 417 |
|
|
if (!$results || !@{$results}) { |
| 418 |
|
|
$logger->notice( __PACKAGE__ . "->syncNodes: No nodes to synchronize." ); |
| 419 |
|
|
return; |
| 420 |
|
|
} |
| 421 |
|
|
|
| 422 |
|
|
# dereference |
| 423 |
|
|
my @results = @{$results}; |
| 424 |
|
|
|
| 425 |
|
|
# iterate through set |
| 426 |
|
|
foreach my $source_node_real (@results) { |
| 427 |
|
|
|
| 428 |
|
|
$tc->{total}++; |
| 429 |
|
|
|
| 430 |
|
|
#print "======================== iter", "\n"; |
| 431 |
|
|
|
| 432 |
|
|
# clone object (in case we have to modify it here) |
| 433 |
|
|
# TODO: |
| 434 |
|
|
# - is a "deep_copy" needed here if occouring modifications take place? |
| 435 |
|
|
# - puuhhhh, i guess a deep_copy would destroy tangram mechanisms? |
| 436 |
|
|
# - after all, just take care for now that this object doesn't get updated! |
| 437 |
|
|
my $source_node = $source_node_real; |
| 438 |
|
|
|
| 439 |
|
|
# modify entry - handle new style callbacks (the readers) |
| 440 |
|
|
#print Dumper($source_node); |
| 441 |
|
|
#exit; |
| 442 |
|
|
|
| 443 |
|
|
my $descent = 'source'; |
| 444 |
|
|
|
| 445 |
|
|
# handle callbacks right now while scanning them (asymmetric to the writers) |
| 446 |
|
|
my $map_callbacks = {}; |
| 447 |
|
|
if (my $callbacks = $self->{meta}->{$descent}->{Callback}) { |
| 448 |
|
|
|
| 449 |
|
|
my $error = 0; |
| 450 |
|
|
|
| 451 |
|
|
foreach my $node (keys %{$callbacks->{read}}) { |
| 452 |
|
|
|
| 453 |
|
|
my $object = $source_node; |
| 454 |
|
|
my $value; # = $source_node->{$node}; |
| 455 |
|
|
|
| 456 |
|
|
# ------------ half-redundant: make $self->callCallback($object, $value, $opts) |
| 457 |
|
|
my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_read'; |
| 458 |
|
|
my $evalstring = 'return ' . $perl_callback . '( { object => $object, property => $node, value => $value, storage => $self->{meta}->{$descent}->{storage} } );'; |
| 459 |
|
|
#print $evalstring, "\n"; exit; |
| 460 |
|
|
my $cb_result = eval($evalstring); |
| 461 |
|
|
if ($@) { |
| 462 |
|
|
die $@; |
| 463 |
|
|
$error = 1; |
| 464 |
|
|
print $@, "\n"; |
| 465 |
|
|
} |
| 466 |
|
|
# ------------ half-redundant: make $self->callCallback($object, $value, $opts) |
| 467 |
|
|
|
| 468 |
|
|
$source_node->{$node} = $cb_result; |
| 469 |
|
|
|
| 470 |
|
|
} |
| 471 |
|
|
|
| 472 |
|
|
} |
| 473 |
|
|
|
| 474 |
|
|
#print Dumper($source_node); |
| 475 |
|
|
|
| 476 |
|
|
# exclude defined fields (simply delete from object) |
| 477 |
|
|
map { delete $source_node->{$_} } @{$self->{meta}->{source}->{subnodes_exclude}}; |
| 478 |
|
|
|
| 479 |
|
|
# here we accumulate information about the status of the current node (payload/row/object/item/entry) |
| 480 |
|
|
$self->{node} = {}; |
| 481 |
|
|
$self->{node}->{source}->{payload} = $source_node; |
| 482 |
|
|
|
| 483 |
|
|
#print "res - ident", "\n"; |
| 484 |
|
|
|
| 485 |
|
|
# determine ident of entry |
| 486 |
|
|
my $identOK = $self->_resolveNodeIdent('source'); |
| 487 |
|
|
#if (!$identOK && lc $self->{args}->{direction} ne 'import') { |
| 488 |
|
|
if (!$identOK) { |
| 489 |
joko |
1.5 |
#print Dumper($self->{meta}->{source}); |
| 490 |
|
|
$logger->critical( __PACKAGE__ . "->syncNodes: No ident found in source node \"$self->{meta}->{source}->{node}\", try to \"prepare\" this node first?" ); |
| 491 |
joko |
1.1 |
return; |
| 492 |
|
|
} |
| 493 |
|
|
|
| 494 |
|
|
#print "statload", "\n"; |
| 495 |
|
|
#print "ident: ", $self->{node}->{source}->{ident}, "\n"; |
| 496 |
joko |
1.5 |
#print Dumper($self->{node}); |
| 497 |
joko |
1.1 |
|
| 498 |
|
|
my $statOK = $self->_statloadNode('target', $self->{node}->{source}->{ident}); |
| 499 |
joko |
1.5 |
|
| 500 |
|
|
#print Dumper($self->{node}); |
| 501 |
joko |
1.1 |
|
| 502 |
|
|
# mark node as new either if there's no ident or if stat/load failed |
| 503 |
|
|
if (!$statOK) { |
| 504 |
|
|
$self->{node}->{status}->{new} = 1; |
| 505 |
|
|
print "n" if $self->{verbose}; |
| 506 |
|
|
} |
| 507 |
|
|
|
| 508 |
|
|
#print "checksum", "\n"; |
| 509 |
|
|
|
| 510 |
|
|
# determine status of entry by synchronization method |
| 511 |
|
|
if ( (lc $self->{args}->{method} eq 'checksum') ) { |
| 512 |
|
|
#if ( $statOK && (lc $self->{args}->{method} eq 'checksum') ) { |
| 513 |
|
|
#if ( !$self->{node}->{status}->{new} && (lc $self->{args}->{method} eq 'checksum') ) { |
| 514 |
|
|
|
| 515 |
|
|
# TODO: |
| 516 |
|
|
# is this really worth a "critical"??? |
| 517 |
|
|
# no - it should just be a debug appendix i believe |
| 518 |
|
|
|
| 519 |
|
|
#print "readcs", "\n"; |
| 520 |
|
|
|
| 521 |
|
|
# calculate checksum of source node |
| 522 |
|
|
#$self->_calcChecksum('source'); |
| 523 |
|
|
if (!$self->_readChecksum('source')) { |
| 524 |
|
|
$logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"source\" entry with ident=\"$self->{node}->{source}->{ident}\"" ); |
| 525 |
|
|
$tc->{skip}++; |
| 526 |
|
|
print "s" if $self->{verbose}; |
| 527 |
|
|
next; |
| 528 |
|
|
} |
| 529 |
|
|
|
| 530 |
|
|
# get checksum from synchronization target |
| 531 |
|
|
$self->_readChecksum('target'); |
| 532 |
|
|
#if (!$self->_readChecksum('target')) { |
| 533 |
|
|
# $logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"target\" entry with ident=\"$self->{node}->{source}->{ident}\"" ); |
| 534 |
|
|
# next; |
| 535 |
|
|
#} |
| 536 |
|
|
|
| 537 |
|
|
# pre flight check: do we actually have a checksum provided? |
| 538 |
|
|
#if (!$self->{node}->{source}->{checksum}) { |
| 539 |
|
|
# print "Source checksum for entry with ident \"$self->{node}->{source}->{ident}\" could not be calculated, maybe it's missing?.", "\n"; |
| 540 |
|
|
# return; |
| 541 |
|
|
#} |
| 542 |
|
|
|
| 543 |
|
|
# determine if entry is "new" or "dirty" |
| 544 |
|
|
# after all, this seems to be the point where the hammer falls..... |
| 545 |
|
|
print "c" if $self->{verbose}; |
| 546 |
|
|
$self->{node}->{status}->{new} = !$self->{node}->{target}->{checksum}; |
| 547 |
|
|
if (!$self->{node}->{status}->{new}) { |
| 548 |
|
|
$self->{node}->{status}->{dirty} = |
| 549 |
|
|
$self->{node}->{status}->{new} || |
| 550 |
|
|
(!$self->{node}->{source}->{checksum} || !$self->{node}->{target}->{checksum}) || |
| 551 |
|
|
($self->{node}->{source}->{checksum} ne $self->{node}->{target}->{checksum}) || |
| 552 |
|
|
$self->{args}->{force}; |
| 553 |
|
|
} |
| 554 |
|
|
|
| 555 |
|
|
} |
| 556 |
|
|
|
| 557 |
|
|
# first reaction on entry-status: continue with next entry if the current is already "in sync" |
| 558 |
|
|
if (!$self->{node}->{status}->{new} && !$self->{node}->{status}->{dirty}) { |
| 559 |
|
|
$tc->{in_sync}++; |
| 560 |
|
|
next; |
| 561 |
|
|
} |
| 562 |
|
|
|
| 563 |
|
|
# build map to actually transfer the data from source to target |
| 564 |
|
|
$self->_buildMap(); |
| 565 |
|
|
|
| 566 |
|
|
|
| 567 |
|
|
#print Dumper($self->{node}); exit; |
| 568 |
|
|
|
| 569 |
|
|
#print "attempt", "\n"; |
| 570 |
|
|
|
| 571 |
|
|
# additional (new) checks for feature "write-protection" |
| 572 |
|
|
if ($self->{meta}->{target}->{storage}->{isWriteProtected}) { |
| 573 |
|
|
$tc->{attempt_transfer}++; |
| 574 |
|
|
print "\n" if $self->{verbose}; |
| 575 |
|
|
$logger->notice( __PACKAGE__ . "->syncNodes: Target is write-protected. Will not insert or modify node. " . |
| 576 |
|
|
"(Ident: $self->{node}->{source}->{ident} " . "Dump:\n" . Dumper($self->{node}->{source}->{payload}) . ")" ); |
| 577 |
|
|
print "\n" if $self->{verbose}; |
| 578 |
|
|
$tc->{skip}++; |
| 579 |
|
|
next; |
| 580 |
|
|
} |
| 581 |
|
|
|
| 582 |
|
|
# transfer contents of map to target |
| 583 |
|
|
if ($self->{node}->{status}->{new}) { |
| 584 |
|
|
$tc->{attempt_new}++; |
| 585 |
|
|
$self->_doTransferToTarget('insert'); |
| 586 |
|
|
# asymmetry: refetch node from target to re-calculate new ident and checksum (TODO: is IdentAuthority of relevance here?) |
| 587 |
joko |
1.5 |
#print Dumper($self->{node}); |
| 588 |
joko |
1.1 |
$self->_statloadNode('target', $self->{node}->{target}->{ident}, 1); |
| 589 |
|
|
$self->_readChecksum('target'); |
| 590 |
|
|
|
| 591 |
|
|
} elsif ($self->{node}->{status}->{dirty}) { |
| 592 |
|
|
$tc->{attempt_modify}++; |
| 593 |
|
|
# asymmetry: get ident before updating (TODO: is IdentAuthority of relevance here?) |
| 594 |
|
|
$self->{node}->{target}->{ident} = $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}}; |
| 595 |
|
|
$self->_doTransferToTarget('update'); |
| 596 |
|
|
$self->_readChecksum('target'); |
| 597 |
|
|
} |
| 598 |
|
|
|
| 599 |
|
|
if ($self->{node}->{status}->{ok}) { |
| 600 |
|
|
$tc->{ok}++; |
| 601 |
|
|
print "t" if $self->{verbose}; |
| 602 |
|
|
} |
| 603 |
|
|
|
| 604 |
|
|
if ($self->{node}->{status}->{error}) { |
| 605 |
|
|
$tc->{error}++; |
| 606 |
|
|
push( @{$tc->{error_per_row}}, $self->{node}->{status}->{error} ); |
| 607 |
|
|
print "e" if $self->{verbose}; |
| 608 |
|
|
} |
| 609 |
|
|
|
| 610 |
|
|
# change ident in source (take from target), if transfer was ok and target is an IdentAuthority |
| 611 |
|
|
# this is (for now) called a "retransmit" indicated by a "r"-character when verbosing |
| 612 |
|
|
if ($self->{node}->{status}->{ok} && $self->{meta}->{target}->{storage}->{isIdentAuthority}) { |
| 613 |
joko |
1.5 |
print "r" if $self->{verbose}; |
| 614 |
joko |
1.1 |
#print Dumper($self->{meta}); |
| 615 |
|
|
#print Dumper($self->{node}); |
| 616 |
|
|
#exit; |
| 617 |
|
|
$self->_doModifySource_IdentChecksum($self->{node}->{target}->{ident}); |
| 618 |
|
|
} |
| 619 |
|
|
|
| 620 |
|
|
print ":" if $self->{verbose}; |
| 621 |
|
|
|
| 622 |
|
|
} |
| 623 |
|
|
|
| 624 |
|
|
print "\n" if $self->{verbose}; |
| 625 |
|
|
|
| 626 |
|
|
# build user-message from some stats |
| 627 |
joko |
1.3 |
my $msg = "statistics: $tc"; |
| 628 |
joko |
1.1 |
|
| 629 |
|
|
if ($tc->{error_per_row}) { |
| 630 |
|
|
$msg .= "\n"; |
| 631 |
joko |
1.2 |
$msg .= "errors from \"error_per_row\":" . "\n"; |
| 632 |
joko |
1.1 |
$msg .= Dumper($tc->{error_per_row}); |
| 633 |
|
|
} |
| 634 |
|
|
|
| 635 |
|
|
# todo!!! |
| 636 |
|
|
#sysevent( { usermsg => $msg, level => $level }, $taskEvent ); |
| 637 |
|
|
$logger->info( __PACKAGE__ . "->syncNodes: $msg" ); |
| 638 |
|
|
|
| 639 |
|
|
return $tc; |
| 640 |
|
|
|
| 641 |
|
|
} |
| 642 |
|
|
|
| 643 |
|
|
|
| 644 |
|
|
sub _dumpCompact { |
| 645 |
|
|
my $self = shift; |
| 646 |
|
|
|
| 647 |
|
|
#my $vars = \@_; |
| 648 |
|
|
my @data = (); |
| 649 |
|
|
|
| 650 |
|
|
my $count = 0; |
| 651 |
|
|
foreach (@_) { |
| 652 |
|
|
my $item = {}; |
| 653 |
|
|
foreach my $key (keys %$_) { |
| 654 |
|
|
my $val = $_->{$key}; |
| 655 |
joko |
1.5 |
|
| 656 |
|
|
#print Dumper($val); |
| 657 |
|
|
|
| 658 |
joko |
1.1 |
if (ref $val eq 'Set::Object') { |
| 659 |
|
|
#print "========================= SET", "\n"; |
| 660 |
joko |
1.5 |
#print Dumper($val); |
| 661 |
joko |
1.1 |
#print Dumper($val->members()); |
| 662 |
|
|
#$val = $val->members(); |
| 663 |
|
|
#$vars->[$count]->{$key} = $val->members() if $val->can("members"); |
| 664 |
|
|
#$item->{$key} = $val->members() if $val->can("members"); |
| 665 |
|
|
$item->{$key} = $val->members(); |
| 666 |
|
|
#print Dumper($vars->[$count]->{$key}); |
| 667 |
joko |
1.5 |
|
| 668 |
joko |
1.1 |
} else { |
| 669 |
|
|
$item->{$key} = $val; |
| 670 |
|
|
} |
| 671 |
joko |
1.5 |
|
| 672 |
joko |
1.1 |
} |
| 673 |
|
|
push @data, $item; |
| 674 |
|
|
$count++; |
| 675 |
|
|
} |
| 676 |
|
|
|
| 677 |
joko |
1.5 |
#print "Dump:", Dumper(@data), "\n"; |
| 678 |
joko |
1.1 |
|
| 679 |
|
|
$Data::Dumper::Indent = 0; |
| 680 |
|
|
my $result = Dumper(@data); |
| 681 |
|
|
$Data::Dumper::Indent = 2; |
| 682 |
|
|
return $result; |
| 683 |
joko |
1.5 |
|
| 684 |
joko |
1.1 |
} |
| 685 |
|
|
|
| 686 |
|
|
|
| 687 |
|
|
sub _calcChecksum { |
| 688 |
|
|
|
| 689 |
|
|
my $self = shift; |
| 690 |
|
|
my $descent = shift; |
| 691 |
|
|
my $specifier = shift; |
| 692 |
|
|
|
| 693 |
|
|
# calculate checksum for current object |
| 694 |
|
|
my $ident = $self->{node}->{$descent}->{ident}; |
| 695 |
|
|
|
| 696 |
|
|
# build dump of this node |
| 697 |
|
|
my $payload = $self->{node}->{$descent}->{payload}; |
| 698 |
|
|
#my $dump = $ident . "\n" . $item->quickdump(); |
| 699 |
|
|
#my $dump = $ident . "\n" . Dumper($item); |
| 700 |
|
|
my $dump = $ident . "\n" . $self->_dumpCompact($payload); |
| 701 |
|
|
|
| 702 |
|
|
# TODO: $logger->dump( ... ); |
| 703 |
|
|
#$logger->debug( __PACKAGE__ . ": " . $dump ); |
| 704 |
|
|
#$logger->dump( __PACKAGE__ . ": " . $dump ); |
| 705 |
|
|
|
| 706 |
|
|
# calculate checksum from dump |
| 707 |
|
|
# md5-based fingerprint, base64 encoded (from Digest::MD5) |
| 708 |
|
|
#my $checksum_cur = md5_base64($objdump) . '=='; |
| 709 |
|
|
# 32-bit integer "hash" value (maybe faster?) (from DBI) |
| 710 |
|
|
$self->{node}->{$descent}->{checksum} = DBI::hash($dump, 1); |
| 711 |
|
|
|
| 712 |
|
|
# signal good |
| 713 |
|
|
return 1; |
| 714 |
|
|
|
| 715 |
|
|
} |
| 716 |
|
|
|
| 717 |
|
|
|
| 718 |
|
|
sub _readChecksum { |
| 719 |
|
|
my $self = shift; |
| 720 |
|
|
|
| 721 |
|
|
my $descent = shift; |
| 722 |
|
|
|
| 723 |
|
|
#print "getcheck:", "\n"; print Dumper($self->{node}->{$descent}); |
| 724 |
|
|
|
| 725 |
|
|
if (!$self->{node}->{$descent}) { |
| 726 |
|
|
# signal checksum bad |
| 727 |
|
|
return; |
| 728 |
|
|
} |
| 729 |
|
|
|
| 730 |
|
|
# get checksum for current entry |
| 731 |
|
|
# TODO: don't have the checksum column/property hardcoded as "cs" here, make this configurable somehow |
| 732 |
|
|
|
| 733 |
|
|
if ($self->{meta}->{$descent}->{storage}->{isChecksumAuthority}) { |
| 734 |
|
|
#$self->{node}->{$descent}->{checksum} = $entry->{cs}; |
| 735 |
|
|
#$self->{node}->{$descent}->{checksum} = $self->_calcChecksum($descent); # $entry->{cs}; |
| 736 |
|
|
#print "descent: $descent", "\n"; |
| 737 |
|
|
$self->_calcChecksum($descent); |
| 738 |
|
|
#print "checksum: ", $self->{node}->{$descent}->{checksum}, "\n"; |
| 739 |
|
|
} else { |
| 740 |
|
|
|
| 741 |
|
|
#$self->{node}->{$descent}->{checksum} = $entry->{cs}; |
| 742 |
|
|
$self->{node}->{$descent}->{checksum} = $self->{node}->{$descent}->{payload}->{cs}; |
| 743 |
|
|
} |
| 744 |
|
|
|
| 745 |
|
|
# signal checksum good |
| 746 |
|
|
return 1; |
| 747 |
|
|
|
| 748 |
|
|
} |
| 749 |
|
|
|
| 750 |
|
|
|
| 751 |
|
|
sub _buildMap { |
| 752 |
|
|
|
| 753 |
|
|
my $self = shift; |
| 754 |
|
|
|
| 755 |
|
|
# field-structure for building sql |
| 756 |
|
|
# mapping of sql-fieldnames to object-attributes |
| 757 |
|
|
$self->{node}->{map} = {}; |
| 758 |
|
|
|
| 759 |
|
|
# manually set ... |
| 760 |
|
|
# ... object-id |
| 761 |
|
|
$self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}} = $self->{node}->{source}->{ident}; |
| 762 |
|
|
# ... checksum |
| 763 |
|
|
$self->{node}->{map}->{cs} = $self->{node}->{source}->{checksum}; |
| 764 |
|
|
|
| 765 |
|
|
#print "sqlmap: ", Dumper($self->{node}->{map}), "\n"; |
| 766 |
|
|
|
| 767 |
|
|
# for transferring flat structures via simple (1:1) mapping |
| 768 |
|
|
# TODO: diff per property / property value |
| 769 |
|
|
|
| 770 |
|
|
if ($self->{args}->{mapping}) { |
| 771 |
|
|
# apply mapping from $self->{args}->{mapping} to $self->{node}->{map} |
| 772 |
|
|
#foreach my $key (@{$self->{meta}->{source}->{childnodes}}) { |
| 773 |
|
|
my @childnodes = @{$self->{meta}->{source}->{childnodes}}; |
| 774 |
|
|
for (my $mapidx = 0; $mapidx <= $#childnodes; $mapidx++) { |
| 775 |
|
|
#my $map_right = $self->{args}->{mapping}->{$key}; |
| 776 |
|
|
|
| 777 |
joko |
1.2 |
$self->{node}->{source}->{propcache} = {}; |
| 778 |
|
|
$self->{node}->{target}->{propcache} = {}; |
| 779 |
|
|
|
| 780 |
joko |
1.1 |
# get property name |
| 781 |
|
|
$self->{node}->{source}->{propcache}->{property} = $self->{meta}->{source}->{childnodes}->[$mapidx]; |
| 782 |
|
|
$self->{node}->{target}->{propcache}->{property} = $self->{meta}->{target}->{childnodes}->[$mapidx]; |
| 783 |
|
|
#print "map: $map_right", "\n"; |
| 784 |
|
|
|
| 785 |
|
|
# get property value |
| 786 |
|
|
my $value; |
| 787 |
|
|
|
| 788 |
|
|
# detect for callback - old style - (maybe the better???) |
| 789 |
|
|
if (ref($self->{node}->{target}->{map}) eq 'CODE') { |
| 790 |
|
|
#$value = &$map_right($objClone); |
| 791 |
|
|
} else { |
| 792 |
|
|
# plain (scalar?) value |
| 793 |
|
|
#$value = $objClone->{$map_right}; |
| 794 |
|
|
$self->{node}->{source}->{propcache}->{value} = $self->{node}->{source}->{payload}->{$self->{node}->{source}->{propcache}->{property}}; |
| 795 |
|
|
} |
| 796 |
|
|
#$self->{node}->{map}->{$key} = $value; |
| 797 |
joko |
1.2 |
|
| 798 |
|
|
# detect expression |
| 799 |
|
|
# for transferring deeply nested structures described by expressions |
| 800 |
|
|
#print "val: $self->{node}->{source}->{propcache}->{value}", "\n"; |
| 801 |
|
|
if ($self->{node}->{source}->{propcache}->{property} =~ s/^expr://) { |
| 802 |
|
|
|
| 803 |
|
|
# create an anonymous sub to act as callback target dispatcher |
| 804 |
|
|
my $cb_dispatcher = sub { |
| 805 |
|
|
#print "=============== CALLBACK DISPATCHER", "\n"; |
| 806 |
|
|
#print "ident: ", $self->{node}->{source}->{ident}, "\n"; |
| 807 |
|
|
#return $self->{node}->{source}->{ident}; |
| 808 |
|
|
|
| 809 |
|
|
}; |
| 810 |
|
|
|
| 811 |
|
|
|
| 812 |
|
|
#print Dumper($self->{node}); |
| 813 |
|
|
|
| 814 |
|
|
# build callback map for helper function |
| 815 |
|
|
#my $cbmap = { $self->{meta}->{source}->{IdentProvider}->{arg} => $cb_dispatcher }; |
| 816 |
|
|
my $cbmap = {}; |
| 817 |
|
|
my $value = refexpr2perlref($self->{node}->{source}->{payload}, $self->{node}->{source}->{propcache}->{property}, $cbmap); |
| 818 |
|
|
$self->{node}->{source}->{propcache}->{value} = $value; |
| 819 |
|
|
} |
| 820 |
joko |
1.1 |
|
| 821 |
|
|
# encode values dependent on type of underlying storage here - expand cases... |
| 822 |
|
|
my $storage_type = $self->{meta}->{target}->{storage}->{locator}->{type}; |
| 823 |
|
|
if ($storage_type eq 'DBI') { |
| 824 |
|
|
# ...for sql |
| 825 |
|
|
$self->{node}->{source}->{propcache}->{value} = quotesql($self->{node}->{source}->{propcache}->{value}); |
| 826 |
joko |
1.2 |
} |
| 827 |
|
|
elsif ($storage_type eq 'Tangram') { |
| 828 |
|
|
# iso? utf8 already possible? |
| 829 |
|
|
|
| 830 |
joko |
1.1 |
} elsif ($storage_type eq 'LDAP') { |
| 831 |
|
|
# TODO: encode utf8 here? |
| 832 |
|
|
} |
| 833 |
|
|
|
| 834 |
|
|
# store value to transfer map |
| 835 |
|
|
$self->{node}->{map}->{$self->{node}->{target}->{propcache}->{property}} = $self->{node}->{source}->{propcache}->{value}; |
| 836 |
|
|
|
| 837 |
|
|
} |
| 838 |
|
|
} |
| 839 |
|
|
|
| 840 |
|
|
|
| 841 |
|
|
# TODO: $logger->dump( ... ); |
| 842 |
|
|
#$logger->debug( "sqlmap:" . "\n" . Dumper($self->{node}->{map}) ); |
| 843 |
|
|
#print "sqlmap: ", Dumper($self->{node}->{map}), "\n"; |
| 844 |
|
|
#print "entrystatus: ", Dumper($self->{node}), "\n"; |
| 845 |
|
|
|
| 846 |
|
|
} |
| 847 |
|
|
|
| 848 |
|
|
sub _resolveNodeIdent { |
| 849 |
|
|
my $self = shift; |
| 850 |
|
|
my $descent = shift; |
| 851 |
|
|
|
| 852 |
|
|
#print Dumper($self->{node}->{$descent}); |
| 853 |
|
|
|
| 854 |
|
|
# get to the payload |
| 855 |
|
|
#my $item = $specifier->{item}; |
| 856 |
|
|
my $payload = $self->{node}->{$descent}->{payload}; |
| 857 |
|
|
|
| 858 |
|
|
# resolve method to get to the id of the given item |
| 859 |
|
|
# we use global metadata and the given descent for this task |
| 860 |
|
|
#my $ident = $self->{$descent}->id($item); |
| 861 |
|
|
#my $ident = $self->{meta}->{$descent}->{storage}->id($item); |
| 862 |
|
|
|
| 863 |
|
|
my $ident; |
| 864 |
|
|
my $provider_method = $self->{meta}->{$descent}->{IdentProvider}->{method}; |
| 865 |
|
|
my $provider_arg = $self->{meta}->{$descent}->{IdentProvider}->{arg}; |
| 866 |
|
|
|
| 867 |
|
|
# resolve to ident |
| 868 |
|
|
if ($provider_method eq 'property') { |
| 869 |
|
|
$ident = $payload->{$provider_arg}; |
| 870 |
|
|
|
| 871 |
|
|
} elsif ($provider_method eq 'storage_method') { |
| 872 |
|
|
#$ident = $self->{meta}->{$descent}->{storage}->id($item); |
| 873 |
|
|
$ident = $self->{meta}->{$descent}->{storage}->$provider_arg($payload); |
| 874 |
|
|
} |
| 875 |
|
|
|
| 876 |
|
|
$self->{node}->{$descent}->{ident} = $ident; |
| 877 |
|
|
|
| 878 |
|
|
return 1 if $ident; |
| 879 |
|
|
|
| 880 |
|
|
} |
| 881 |
|
|
|
| 882 |
|
|
|
| 883 |
|
|
sub _modifyNode { |
| 884 |
|
|
my $self = shift; |
| 885 |
|
|
my $descent = shift; |
| 886 |
|
|
my $action = shift; |
| 887 |
|
|
my $map = shift; |
| 888 |
|
|
my $crit = shift; |
| 889 |
|
|
|
| 890 |
|
|
# map for new style callbacks |
| 891 |
|
|
my $map_callbacks = {}; |
| 892 |
|
|
|
| 893 |
|
|
# checks go first! |
| 894 |
|
|
|
| 895 |
|
|
# TODO: this should be reviewed first - before extending ;-) |
| 896 |
|
|
# TODO: this should be extended: |
| 897 |
|
|
# count this cases inside the caller to this sub and provide a better overall message |
| 898 |
|
|
# if this counts still zero in the end: |
| 899 |
|
|
# "No nodes have been touched for modify: Do you have column-headers in your csv file?" |
| 900 |
|
|
if (not defined $self->{node}) { |
| 901 |
|
|
#$logger->critical( __PACKAGE__ . "->_modifyNode failed: \"$descent\" node is empty." ); |
| 902 |
|
|
#return; |
| 903 |
|
|
} |
| 904 |
|
|
|
| 905 |
|
|
# transfer callback nodes from value map to callback map - handle them afterwards! - (new style callbacks) |
| 906 |
|
|
if (my $callbacks = $self->{meta}->{$descent}->{Callback}) { |
| 907 |
|
|
foreach my $callback (keys %{$callbacks->{write}}) { |
| 908 |
|
|
$map_callbacks->{write}->{$callback} = $map->{$callback}; |
| 909 |
|
|
delete $map->{$callback}; |
| 910 |
|
|
} |
| 911 |
|
|
} |
| 912 |
|
|
|
| 913 |
joko |
1.5 |
|
| 914 |
|
|
#print Dumper($self->{meta}); |
| 915 |
joko |
1.1 |
|
| 916 |
|
|
# DBI speaks SQL |
| 917 |
|
|
if ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'DBI') { |
| 918 |
|
|
|
| 919 |
|
|
#print Dumper($self->{node}); |
| 920 |
|
|
my $sql_main; |
| 921 |
|
|
# translate map to sql |
| 922 |
|
|
#print $action, "\n"; exit; |
| 923 |
|
|
#print $self->{meta}->{$descent}->{node}, "\n"; exit; |
| 924 |
|
|
#print "action:"; |
| 925 |
|
|
#print $action, "\n"; |
| 926 |
|
|
#$action = "anc"; |
| 927 |
|
|
#print "yai", "\n"; |
| 928 |
joko |
1.7 |
|
| 929 |
|
|
#print Dumper($map); |
| 930 |
|
|
#delete $map->{cs}; |
| 931 |
|
|
|
| 932 |
joko |
1.1 |
if (lc($action) eq 'insert') { |
| 933 |
|
|
$sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_INSERT'); |
| 934 |
|
|
} elsif (lc $action eq 'update') { |
| 935 |
|
|
$crit ||= "$self->{meta}->{$descent}->{IdentProvider}->{arg}='$self->{node}->{$descent}->{ident}'"; |
| 936 |
|
|
$sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_UPDATE', $crit); |
| 937 |
|
|
} |
| 938 |
|
|
|
| 939 |
joko |
1.7 |
#$sql_main = "UPDATE currencies_csv SET oid='abcdef' WHERE text='Australian Dollar' AND key='AUD';"; |
| 940 |
|
|
#$sql_main = "UPDATE currencies_csv SET oid='huhu2' WHERE ekey='AUD'"; |
| 941 |
|
|
|
| 942 |
|
|
#print "sql: ", $sql_main, "\n"; |
| 943 |
|
|
#exit; |
| 944 |
joko |
1.1 |
|
| 945 |
|
|
# transfer data |
| 946 |
|
|
my $sqlHandle = $self->{meta}->{$descent}->{storage}->sendCommand($sql_main); |
| 947 |
|
|
|
| 948 |
joko |
1.7 |
#exit; |
| 949 |
|
|
|
| 950 |
joko |
1.1 |
# handle errors |
| 951 |
|
|
if ($sqlHandle->err) { |
| 952 |
|
|
#if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; } |
| 953 |
|
|
$self->{node}->{status}->{error} = { |
| 954 |
|
|
statement => $sql_main, |
| 955 |
|
|
state => $sqlHandle->state, |
| 956 |
|
|
err => $sqlHandle->err, |
| 957 |
|
|
errstr => $sqlHandle->errstr, |
| 958 |
|
|
}; |
| 959 |
|
|
} else { |
| 960 |
|
|
$self->{node}->{status}->{ok} = 1; |
| 961 |
|
|
} |
| 962 |
|
|
|
| 963 |
|
|
# Tangram does it the oo-way (naturally) |
| 964 |
|
|
} elsif ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'Tangram') { |
| 965 |
|
|
my $sql_main; |
| 966 |
|
|
my $object; |
| 967 |
|
|
|
| 968 |
|
|
# determine classname |
| 969 |
|
|
my $classname = $self->{meta}->{$descent}->{node}; |
| 970 |
|
|
|
| 971 |
|
|
# properties to exclude |
| 972 |
|
|
my @exclude = @{$self->{meta}->{$descent}->{subnodes_exclude}}; |
| 973 |
|
|
|
| 974 |
|
|
|
| 975 |
|
|
if (my $identProvider = $self->{meta}->{$descent}->{IdentProvider}) { |
| 976 |
|
|
push @exclude, $identProvider->{arg}; |
| 977 |
|
|
} |
| 978 |
|
|
|
| 979 |
|
|
# new feature: |
| 980 |
|
|
# - check TypeProvider metadata property from other side |
| 981 |
|
|
# - use argument (arg) inside as a classname for object creation on this side |
| 982 |
|
|
#my $otherSide = $self->_otherSide($descent); |
| 983 |
|
|
if (my $typeProvider = $self->{meta}->{$descent}->{TypeProvider}) { |
| 984 |
|
|
#print Dumper($map); |
| 985 |
|
|
$classname = $map->{$typeProvider->{arg}}; |
| 986 |
|
|
# remove nodes from map also (push nodes to "subnodes_exclude" list) |
| 987 |
|
|
push @exclude, $typeProvider->{arg}; |
| 988 |
|
|
} |
| 989 |
|
|
|
| 990 |
|
|
# exclude banned properties (remove from map) |
| 991 |
|
|
#map { delete $self->{node}->{map}->{$_} } @{$self->{args}->{exclude}}; |
| 992 |
|
|
map { delete $map->{$_} } @exclude; |
| 993 |
|
|
|
| 994 |
|
|
# list of properties |
| 995 |
|
|
my @props = keys %{$map}; |
| 996 |
|
|
|
| 997 |
|
|
# transfer data |
| 998 |
|
|
if (lc $action eq 'insert') { |
| 999 |
|
|
|
| 1000 |
|
|
# build array to initialize object |
| 1001 |
|
|
#my @initarray = (); |
| 1002 |
|
|
#map { push @initarray, $_, undef; } @props; |
| 1003 |
|
|
|
| 1004 |
|
|
# make the object persistent in four steps: |
| 1005 |
|
|
# - raw create (perl / class tangram scope) |
| 1006 |
|
|
# - engine insert (tangram scope) ... this establishes inheritance - don't try to fill in inherited properties before! |
| 1007 |
|
|
# - raw fill-in from hash (perl scope) |
| 1008 |
|
|
# - engine update (tangram scope) ... this updates all properties just filled in |
| 1009 |
|
|
|
| 1010 |
|
|
# create new object ... |
| 1011 |
|
|
#my $object = $classname->new( @initarray ); |
| 1012 |
|
|
$object = $classname->new(); |
| 1013 |
|
|
|
| 1014 |
|
|
# ... pass to orm ... |
| 1015 |
|
|
$self->{meta}->{$descent}->{storage}->insert($object); |
| 1016 |
|
|
|
| 1017 |
|
|
# ... and initialize with empty (undef'd) properties. |
| 1018 |
|
|
#print Dumper(@props); |
| 1019 |
|
|
map { $object->{$_} = undef; } @props; |
| 1020 |
|
|
|
| 1021 |
|
|
# mix in values ... |
| 1022 |
|
|
hash2object($object, $map); |
| 1023 |
|
|
|
| 1024 |
|
|
# ... and re-update@orm. |
| 1025 |
joko |
1.5 |
#print Dumper($object); |
| 1026 |
joko |
1.1 |
$self->{meta}->{$descent}->{storage}->update($object); |
| 1027 |
|
|
|
| 1028 |
|
|
# asymmetry: get ident after insert |
| 1029 |
|
|
# TODO: |
| 1030 |
|
|
# - just do this if it is an IdentAuthority |
| 1031 |
|
|
# - use IdentProvider metadata here |
| 1032 |
joko |
1.5 |
#print Dumper($self->{meta}->{$descent}); |
| 1033 |
|
|
my $oid = $self->{meta}->{$descent}->{storage}->id($object); |
| 1034 |
|
|
#print "oid: $oid", "\n"; |
| 1035 |
|
|
$self->{node}->{$descent}->{ident} = $oid; |
| 1036 |
joko |
1.1 |
|
| 1037 |
|
|
|
| 1038 |
|
|
} elsif (lc $action eq 'update') { |
| 1039 |
|
|
|
| 1040 |
|
|
# get fresh object from orm first |
| 1041 |
|
|
$object = $self->{meta}->{$descent}->{storage}->load($self->{node}->{$descent}->{ident}); |
| 1042 |
|
|
|
| 1043 |
|
|
#print Dumper($self->{node}); |
| 1044 |
|
|
|
| 1045 |
|
|
# mix in values |
| 1046 |
|
|
#print Dumper($object); |
| 1047 |
|
|
hash2object($object, $map); |
| 1048 |
|
|
#print Dumper($object); |
| 1049 |
|
|
#exit; |
| 1050 |
|
|
$self->{meta}->{$descent}->{storage}->update($object); |
| 1051 |
|
|
} |
| 1052 |
|
|
|
| 1053 |
|
|
my $error = 0; |
| 1054 |
|
|
|
| 1055 |
|
|
# handle new style callbacks - this is a HACK - do this without an eval! |
| 1056 |
|
|
#print Dumper($map); |
| 1057 |
|
|
#print "cb: ", Dumper($self->{meta}->{$descent}->{Callback}); |
| 1058 |
|
|
#print Dumper($map_callbacks); |
| 1059 |
|
|
foreach my $node (keys %{$map_callbacks->{write}}) { |
| 1060 |
|
|
#print Dumper($node); |
| 1061 |
|
|
my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_write'; |
| 1062 |
|
|
my $evalstring = $perl_callback . '( { object => $object, value => $map_callbacks->{write}->{$node}, storage => $self->{meta}->{$descent}->{storage} } );'; |
| 1063 |
|
|
#print $evalstring, "\n"; exit; |
| 1064 |
|
|
eval($evalstring); |
| 1065 |
|
|
if ($@) { |
| 1066 |
|
|
$error = 1; |
| 1067 |
|
|
print $@, "\n"; |
| 1068 |
|
|
} |
| 1069 |
|
|
|
| 1070 |
|
|
#print "after eval", "\n"; |
| 1071 |
|
|
|
| 1072 |
|
|
if (!$error) { |
| 1073 |
|
|
# re-update@orm |
| 1074 |
|
|
$self->{meta}->{$descent}->{storage}->update($object); |
| 1075 |
|
|
} |
| 1076 |
|
|
} |
| 1077 |
|
|
|
| 1078 |
|
|
# handle errors |
| 1079 |
|
|
if ($error) { |
| 1080 |
|
|
#print "error", "\n"; |
| 1081 |
|
|
=pod |
| 1082 |
|
|
my $sqlHandle; |
| 1083 |
|
|
#if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; } |
| 1084 |
|
|
$self->{node}->{status}->{error} = { |
| 1085 |
|
|
statement => $sql_main, |
| 1086 |
|
|
state => $sqlHandle->state, |
| 1087 |
|
|
err => $sqlHandle->err, |
| 1088 |
|
|
errstr => $sqlHandle->errstr, |
| 1089 |
|
|
}; |
| 1090 |
|
|
=cut |
| 1091 |
|
|
# rollback.... |
| 1092 |
|
|
#print "rollback", "\n"; |
| 1093 |
|
|
$self->{meta}->{$descent}->{storage}->erase($object); |
| 1094 |
|
|
#print "after rollback", "\n"; |
| 1095 |
|
|
} else { |
| 1096 |
|
|
$self->{node}->{status}->{ok} = 1; |
| 1097 |
|
|
} |
| 1098 |
|
|
|
| 1099 |
|
|
|
| 1100 |
|
|
} |
| 1101 |
|
|
|
| 1102 |
|
|
} |
| 1103 |
|
|
|
| 1104 |
|
|
# TODO: |
| 1105 |
|
|
# this should be split up into... |
| 1106 |
|
|
# - a "_statNode" (should just touch the node to check for existance) |
| 1107 |
|
|
# - a "_loadNode" (should load node completely) |
| 1108 |
|
|
# - maybe additionally a "loadNodeProperty" (may specify properties to load) |
| 1109 |
|
|
# - introduce $self->{nodecache} for this purpose |
| 1110 |
|
|
# TODO: |
| 1111 |
|
|
# should we: |
| 1112 |
|
|
# - not pass ident in here but resolve it via "$descent"? |
| 1113 |
|
|
# - refactor this and stuff it with additional debug/error message |
| 1114 |
|
|
# - this = the way the implicit load mechanism works |
| 1115 |
|
|
sub _statloadNode { |
| 1116 |
|
|
|
| 1117 |
|
|
my $self = shift; |
| 1118 |
|
|
my $descent = shift; |
| 1119 |
|
|
my $ident = shift; |
| 1120 |
|
|
my $force = shift; |
| 1121 |
|
|
|
| 1122 |
|
|
# fetch entry to retrieve checksum from |
| 1123 |
|
|
# was: |
| 1124 |
|
|
if (!$self->{node}->{$descent} || $force) { |
| 1125 |
|
|
# is: |
| 1126 |
|
|
#if (!$self->{node}->{$descent}->{item} || $force) { |
| 1127 |
|
|
|
| 1128 |
|
|
if (!$ident) { |
| 1129 |
|
|
#print "\n", "Attempt to fetch entry implicitely by ident failed: no ident given! This may result in an insert if no write-protection is in the way.", "\n"; |
| 1130 |
|
|
return; |
| 1131 |
|
|
} |
| 1132 |
joko |
1.5 |
|
| 1133 |
|
|
# patch for DBD::CSV |
| 1134 |
|
|
if ($ident && $ident eq 'Null') { |
| 1135 |
|
|
return; |
| 1136 |
|
|
} |
| 1137 |
joko |
1.1 |
|
| 1138 |
joko |
1.7 |
#print "yai!", "\n"; |
| 1139 |
|
|
|
| 1140 |
|
|
my $query = { |
| 1141 |
joko |
1.1 |
node => $self->{meta}->{$descent}->{node}, |
| 1142 |
|
|
subnodes => [qw( cs )], |
| 1143 |
|
|
criterias => [ |
| 1144 |
|
|
{ key => $self->{meta}->{$descent}->{IdentProvider}->{arg}, |
| 1145 |
|
|
op => 'eq', |
| 1146 |
|
|
val => $ident }, |
| 1147 |
|
|
] |
| 1148 |
joko |
1.7 |
}; |
| 1149 |
|
|
|
| 1150 |
|
|
#print Dumper($query); |
| 1151 |
|
|
|
| 1152 |
|
|
my $result = $self->{meta}->{$descent}->{storage}->sendQuery($query); |
| 1153 |
joko |
1.1 |
|
| 1154 |
|
|
my $entry = $result->getNextEntry(); |
| 1155 |
joko |
1.7 |
|
| 1156 |
|
|
#print Dumper($entry); |
| 1157 |
|
|
#print "pers: " . $self->{meta}->{$descent}->{storage}->is_persistent($entry), "\n"; |
| 1158 |
|
|
#my $state = $self->{meta}->{$descent}->{storage}->_fetch_object_state($entry, { name => 'TransactionHop' } ); |
| 1159 |
|
|
#print Dumper($state); |
| 1160 |
|
|
|
| 1161 |
joko |
1.1 |
my $status = $result->getStatus(); |
| 1162 |
|
|
|
| 1163 |
joko |
1.5 |
#print Dumper($status); |
| 1164 |
|
|
|
| 1165 |
joko |
1.1 |
# TODO: enhance error handling (store inside tc) |
| 1166 |
|
|
#if (!$row) { |
| 1167 |
|
|
# print "\n", "row error", "\n"; |
| 1168 |
|
|
# next; |
| 1169 |
|
|
#} |
| 1170 |
joko |
1.5 |
|
| 1171 |
|
|
# these checks run before actually loading payload- and meta-data to node-container |
| 1172 |
|
|
|
| 1173 |
|
|
# 1st level - hard error |
| 1174 |
|
|
if ($status && $status->{err}) { |
| 1175 |
joko |
1.7 |
$logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - hard error (that's ok): $status->{err}" ); |
| 1176 |
joko |
1.5 |
return; |
| 1177 |
|
|
} |
| 1178 |
|
|
|
| 1179 |
|
|
# 2nd level - logical (empty/notfound) error |
| 1180 |
|
|
if (($status && $status->{empty}) || !$entry) { |
| 1181 |
|
|
$logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - logical error (that's ok)" ); |
| 1182 |
|
|
#print "no entry (logical)", "\n"; |
| 1183 |
|
|
return; |
| 1184 |
|
|
} |
| 1185 |
|
|
|
| 1186 |
|
|
#print Dumper($entry); |
| 1187 |
|
|
|
| 1188 |
joko |
1.1 |
# was: |
| 1189 |
|
|
# $self->{node}->{$descent}->{ident} = $ident; |
| 1190 |
|
|
# is: |
| 1191 |
joko |
1.5 |
# TODO: re-resolve ident from entry via metadata "IdentProvider" here - like elsewhere |
| 1192 |
joko |
1.1 |
$self->{node}->{$descent}->{ident} = $ident; |
| 1193 |
|
|
$self->{node}->{$descent}->{payload} = $entry; |
| 1194 |
joko |
1.5 |
|
| 1195 |
joko |
1.1 |
} |
| 1196 |
|
|
|
| 1197 |
|
|
return 1; |
| 1198 |
|
|
|
| 1199 |
|
|
} |
| 1200 |
|
|
|
| 1201 |
|
|
sub _doTransferToTarget { |
| 1202 |
|
|
my $self = shift; |
| 1203 |
|
|
my $action = shift; |
| 1204 |
|
|
$self->_modifyNode('target', $action, $self->{node}->{map}); |
| 1205 |
|
|
} |
| 1206 |
|
|
|
| 1207 |
|
|
sub _doModifySource_IdentChecksum { |
| 1208 |
|
|
my $self = shift; |
| 1209 |
|
|
my $ident_new = shift; |
| 1210 |
|
|
# this changes an old node to a new one including ident and checksum |
| 1211 |
|
|
# TODO: |
| 1212 |
|
|
# - eventually introduce an external resource to store this data to |
| 1213 |
|
|
# - we won't have to "re"-modify the source node here |
| 1214 |
|
|
my $map = { |
| 1215 |
|
|
$self->{meta}->{source}->{IdentProvider}->{arg} => $ident_new, |
| 1216 |
|
|
cs => $self->{node}->{target}->{checksum}, |
| 1217 |
|
|
}; |
| 1218 |
joko |
1.5 |
|
| 1219 |
|
|
#print Dumper($map); |
| 1220 |
|
|
#print Dumper($self->{node}); |
| 1221 |
|
|
#exit; |
| 1222 |
|
|
|
| 1223 |
joko |
1.1 |
$self->_modifyNode('source', 'update', $map); |
| 1224 |
|
|
} |
| 1225 |
|
|
|
| 1226 |
|
|
|
| 1227 |
|
|
# this is a shortcut method |
| 1228 |
|
|
# ... let's try to avoid _any_ redundant code in here (ok... - at the cost of method lookups...) |
| 1229 |
|
|
sub _getNodeList { |
| 1230 |
|
|
my $self = shift; |
| 1231 |
|
|
my $descent = shift; |
| 1232 |
|
|
my $filter = shift; |
| 1233 |
|
|
return $self->{meta}->{$descent}->{storage}->getListFiltered($self->{meta}->{$descent}->{node}, $filter); |
| 1234 |
|
|
} |
| 1235 |
|
|
|
| 1236 |
|
|
|
| 1237 |
|
|
sub _prepareNode_MetaProperties { |
| 1238 |
|
|
my $self = shift; |
| 1239 |
|
|
my $descent = shift; |
| 1240 |
|
|
|
| 1241 |
|
|
$logger->info( __PACKAGE__ . "->_prepareNode_MetaProperties( descent $descent )" ); |
| 1242 |
|
|
|
| 1243 |
|
|
# TODO: this should (better) be: "my $firstnode = $self->_getFirstNode($descent);" |
| 1244 |
|
|
my $list = $self->_getNodeList($descent); |
| 1245 |
|
|
|
| 1246 |
|
|
# get first node |
| 1247 |
|
|
my $firstnode = $list->[0]; |
| 1248 |
|
|
|
| 1249 |
|
|
# check if node contains meta properties/nodes |
| 1250 |
|
|
# TODO: "cs" is hardcoded here! |
| 1251 |
|
|
my @required = ( $self->{meta}->{$descent}->{IdentProvider}->{arg}, 'cs' ); |
| 1252 |
|
|
my @found = keys %$firstnode; |
| 1253 |
|
|
#my @diff = getDifference(\@found, \@required); |
| 1254 |
|
|
my $diff = getDifference(\@required, \@found); |
| 1255 |
|
|
#print Dumper(@found); |
| 1256 |
|
|
#print Dumper(@required); |
| 1257 |
|
|
#print Dumper(@diff); |
| 1258 |
|
|
#if (!$#diff || $#diff == -1) { |
| 1259 |
|
|
if (isEmpty($diff)) { |
| 1260 |
|
|
$logger->warning( __PACKAGE__ . "->_prepareNode_MetaProperties: node is lacking meta properties - will try to alter..." ); |
| 1261 |
|
|
foreach (@required) { |
| 1262 |
|
|
my $sql = "ALTER TABLE $self->{meta}->{$descent}->{node} ADD COLUMN $_"; |
| 1263 |
|
|
#print "sql: $sql", "\n"; |
| 1264 |
|
|
my $res = $self->{meta}->{$descent}->{storage}->sendCommand($sql); |
| 1265 |
|
|
#print Dumper($res->getStatus()); |
| 1266 |
|
|
} |
| 1267 |
|
|
} |
| 1268 |
|
|
|
| 1269 |
|
|
} |
| 1270 |
|
|
|
| 1271 |
|
|
sub _prepareNode_DummyIdent { |
| 1272 |
|
|
my $self = shift; |
| 1273 |
|
|
my $descent = shift; |
| 1274 |
|
|
|
| 1275 |
|
|
$logger->info( __PACKAGE__ . "->_prepareNode_DummyIdent( descent $descent )" ); |
| 1276 |
|
|
|
| 1277 |
|
|
my $list = $self->_getNodeList($descent); |
| 1278 |
|
|
#print Dumper($list); |
| 1279 |
|
|
my $i = 0; |
| 1280 |
|
|
my $ident_base = 5678983; |
| 1281 |
|
|
my $ident_appendix = '0001'; |
| 1282 |
|
|
foreach my $node (@$list) { |
| 1283 |
|
|
my $ident_dummy = $i + $ident_base; |
| 1284 |
|
|
$ident_dummy .= $ident_appendix; |
| 1285 |
|
|
my $map = { |
| 1286 |
|
|
$self->{meta}->{$descent}->{IdentProvider}->{arg} => $ident_dummy, |
| 1287 |
|
|
cs => undef, |
| 1288 |
|
|
}; |
| 1289 |
|
|
|
| 1290 |
|
|
# diff lists and ... |
| 1291 |
|
|
my $diff = getDifference([keys %$node], [keys %$map]); |
| 1292 |
|
|
next if $#{$diff} == -1; |
| 1293 |
|
|
|
| 1294 |
|
|
# ... build criteria including all columns |
| 1295 |
|
|
my @crits; |
| 1296 |
|
|
foreach my $property (@$diff) { |
| 1297 |
|
|
next if !$property; |
| 1298 |
|
|
my $value = $node->{$property}; |
| 1299 |
|
|
next if !$value; |
| 1300 |
|
|
push @crits, "$property='" . quotesql($value) . "'"; |
| 1301 |
|
|
} |
| 1302 |
|
|
my $crit = join ' AND ', @crits; |
| 1303 |
|
|
print "p" if $self->{verbose}; |
| 1304 |
joko |
1.5 |
|
| 1305 |
|
|
#print Dumper($map); |
| 1306 |
|
|
#print Dumper($crit); |
| 1307 |
|
|
|
| 1308 |
joko |
1.1 |
$self->_modifyNode($descent, 'update', $map, $crit); |
| 1309 |
|
|
$i++; |
| 1310 |
|
|
} |
| 1311 |
|
|
|
| 1312 |
|
|
print "\n" if $self->{verbose}; |
| 1313 |
|
|
|
| 1314 |
|
|
if (!$i) { |
| 1315 |
|
|
$logger->warning( __PACKAGE__ . "->_prepareNode_DummyIdent: no nodes touched" ); |
| 1316 |
|
|
} |
| 1317 |
|
|
|
| 1318 |
|
|
} |
| 1319 |
|
|
|
| 1320 |
|
|
# TODO: handle this in an abstract way (wipe out use of 'source' and/or 'target' inside core) |
| 1321 |
|
|
sub _otherSide { |
| 1322 |
|
|
my $self = shift; |
| 1323 |
joko |
1.2 |
my $descent = shift; |
| 1324 |
|
|
return 'source' if $descent eq 'target'; |
| 1325 |
|
|
return 'target' if $descent eq 'source'; |
| 1326 |
joko |
1.1 |
return ''; |
| 1327 |
joko |
1.2 |
} |
| 1328 |
|
|
|
| 1329 |
|
|
sub _erase_all { |
| 1330 |
|
|
my $self = shift; |
| 1331 |
|
|
my $descent = shift; |
| 1332 |
|
|
#my $node = shift; |
| 1333 |
|
|
my $node = $self->{meta}->{$descent}->{node}; |
| 1334 |
|
|
$self->{meta}->{$descent}->{storage}->eraseAll($node); |
| 1335 |
joko |
1.1 |
} |
| 1336 |
|
|
|
| 1337 |
|
|
|
| 1338 |
|
|
=pod |
| 1339 |
|
|
|
| 1340 |
|
|
|
| 1341 |
|
|
=head1 DESCRIPTION |
| 1342 |
|
|
|
| 1343 |
|
|
Data::Transfer::Sync is a module providing a generic synchronization process |
| 1344 |
|
|
across arbitrary/multiple storages based on a ident/checksum mechanism. |
| 1345 |
|
|
It sits on top of Data::Storage. |
| 1346 |
|
|
|
| 1347 |
|
|
|
| 1348 |
|
|
=head1 REQUIREMENTS |
| 1349 |
|
|
|
| 1350 |
|
|
For full functionality: |
| 1351 |
|
|
Data::Storage |
| 1352 |
|
|
Data::Transform |
| 1353 |
|
|
Data::Compare |
| 1354 |
|
|
... and all their dependencies |
| 1355 |
|
|
|
| 1356 |
|
|
|
| 1357 |
|
|
=head1 AUTHORS / COPYRIGHT |
| 1358 |
|
|
|
| 1359 |
|
|
The Data::Storage module is Copyright (c) 2002 Andreas Motl. |
| 1360 |
|
|
All rights reserved. |
| 1361 |
|
|
|
| 1362 |
|
|
You may distribute it under the terms of either the GNU General Public |
| 1363 |
|
|
License or the Artistic License, as specified in the Perl README file. |
| 1364 |
|
|
|
| 1365 |
|
|
|
| 1366 |
|
|
=head1 SUPPORT / WARRANTY |
| 1367 |
|
|
|
| 1368 |
|
|
Data::Storage is free software. IT COMES WITHOUT WARRANTY OF ANY KIND. |
| 1369 |
|
|
|
| 1370 |
|
|
|
| 1371 |
|
|
|
| 1372 |
|
|
=head1 BUGS |
| 1373 |
|
|
|
| 1374 |
|
|
When in "import" mode for windows file - DBD::AutoCSV may hang. |
| 1375 |
|
|
Hint: Maybe the source node contains an ident-, but no checksum-column? |
| 1376 |
|
|
|
| 1377 |
|
|
|
| 1378 |
|
|
=head1 USER LEVEL ERRORS |
| 1379 |
|
|
|
| 1380 |
|
|
=head4 Mapping |
| 1381 |
|
|
|
| 1382 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1383 |
|
|
info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 )critical: BizWorks::Process::Setup->startSync: Can't access mapping for node "Currency" - please check BizWorks::ResourceMapping. |
| 1384 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1385 |
|
|
You have to create a sub for each node used in synchronization inside named Perl module. The name of this sub _must_ match |
| 1386 |
|
|
the name of the node you want to sync. This sub holds mapping metadata to give the engine hints about how |
| 1387 |
|
|
to access the otherwise generic nodes. |
| 1388 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1389 |
|
|
|
| 1390 |
|
|
|
| 1391 |
|
|
=head4 DBD::AutoCSV's rulebase |
| 1392 |
|
|
|
| 1393 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1394 |
|
|
info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 ) |
| 1395 |
|
|
info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv |
| 1396 |
|
|
|
| 1397 |
|
|
Execution ERROR: Error while scanning: Missing first row or scanrule not applied. at C:/home/amo/develop/netfrag.org/nfo/perl/libs/DBD/CSV.p |
| 1398 |
|
|
m line 165, <GEN9> line 1. |
| 1399 |
|
|
called from C:/home/amo/develop/netfrag.org/nfo/perl/libs/Data/Storage/Handler/DBI.pm at 123. |
| 1400 |
|
|
|
| 1401 |
|
|
DBI-Error: DBD::AutoCSV::st fetchrow_hashref failed: Attempt to fetch row from a Non-SELECT statement |
| 1402 |
|
|
notice: Data::Transfer::Sync->syncNodes: No nodes to synchronize. |
| 1403 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1404 |
|
|
DBD::AutoCSV contains a rulebase which is spooled down while attempting to guess the style of the csv file regarding |
| 1405 |
|
|
parameters like newline (eol), column-seperation-character (sep_char), quoting character (quote_char). |
| 1406 |
|
|
If this spool runs out of entries and no style could be resolved, DBD::CSV dies causing this "Execution ERROR" which |
| 1407 |
|
|
results in a "DBI-Error" afterwards. |
| 1408 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1409 |
|
|
|
| 1410 |
|
|
|
| 1411 |
|
|
=head4 Check structure of source node |
| 1412 |
|
|
|
| 1413 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1414 |
|
|
info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv |
| 1415 |
|
|
critical: Data::Transfer::Sync->syncNodes: Can not synchronize: No ident found in source node, maybe try to "import" this node first. |
| 1416 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1417 |
|
|
If lowlevel detection succeeds, but no other required informations are found, this message is issued. |
| 1418 |
|
|
"Other informations" might be: |
| 1419 |
|
|
- column-header-row completely missing |
| 1420 |
|
|
- ident column is empty |
| 1421 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1422 |
|
|
|
| 1423 |
|
|
|
| 1424 |
|
|
=head4 Modify structure of source node |
| 1425 |
|
|
|
| 1426 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1427 |
|
|
info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv |
| 1428 |
|
|
info: Data::Transfer::Sync->_prepareNode_MetaProperties( descent source ) |
| 1429 |
|
|
warning: Data::Transfer::Sync->_prepareNode_MetaProperties: node is lacking meta properties - will try to alter... |
| 1430 |
|
|
SQL ERROR: Command 'ALTER' not recognized or not supported! |
| 1431 |
|
|
|
| 1432 |
|
|
SQL ERROR: Command 'ALTER' not recognized or not supported! |
| 1433 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1434 |
|
|
The Engine found a node which structure does not match the required. It tries to alter this automatically - only when doing "import" - |
| 1435 |
|
|
but the DBD driver (in this case DBD::CSV) gets in the way croaking not to be able to do this. |
| 1436 |
|
|
This could also appear if your database connection has insufficient rights to modify the database structure. |
| 1437 |
|
|
DBD::CSV croaks because it doesn't implement the ALTER command, so please edit your columns manually. |
| 1438 |
|
|
Hint: Add columns with the names of your "ident" and "checksum" property specifications. |
| 1439 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1440 |
|
|
|
| 1441 |
|
|
|
| 1442 |
|
|
=head4 Load source node by ident |
| 1443 |
|
|
|
| 1444 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1445 |
|
|
info: Data::Transfer::Sync->_prepareNode_DummyIdent( descent source ) |
| 1446 |
|
|
pcritical: Data::Transfer::Sync->_modifyNode failed: "source" node is empty. |
| 1447 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1448 |
|
|
The source node could not be loaded. Maybe the ident is missing. Please check manually. |
| 1449 |
|
|
Hint: Like above, the ident and/or checksum columns may be missing.... |
| 1450 |
|
|
- - - - - - - - - - - - - - - - - - - - - - - - - - |
| 1451 |
|
|
|
| 1452 |
|
|
|
| 1453 |
|
|
=head1 TODO |
| 1454 |
|
|
|
| 1455 |
|
|
- sub _resolveIdentProvider |
| 1456 |
|
|
- wrap _doModifySource and _doTransferTarget around a core function which can change virtually any type of node |
| 1457 |
|
|
- split this module up into Sync.pm, Sync/Core.pm, Sync/Compare.pm and Sync/Compare/Checksum.pm |
| 1458 |
|
|
- introduce _compareNodes as a core method and wrap it around methods in Sync/Compare/Checksum.pm |
| 1459 |
|
|
- introduce Sync/Compare/MyComparisonImplementation.pm |
| 1460 |
|
|
- some generic deferring method - e.g. "$self->defer(action)" - to be able to accumulate a bunch of actions for later processing |
| 1461 |
|
|
- this implies everything done is _really_ split up into generic actions - how else would we defer them??? |
| 1462 |
|
|
- example uses: |
| 1463 |
|
|
- fetch whole checksum list from node |
| 1464 |
|
|
- remember source ident retransmits |
| 1465 |
|
|
- remember: this is convenient - and maybe / of course faster - but we'll loose "per-node-atomic" operations |
| 1466 |
|
|
- feature: mechanism to implicit inject checksum property to nodes (alter table / modify schema) |
| 1467 |
|
|
- expand statistics / keep track of: |
| 1468 |
|
|
- touched/untouched nodes |
| 1469 |
|
|
- full sync |
| 1470 |
|
|
- just do a push and a pull for now but use stats for touched nodes in between to speed up things |
| 1471 |
|
|
- introduce some new metadata flags for a synchronization partner which is (e.g.) of "source" or "target": |
| 1472 |
|
|
- isNewNodePropagator |
| 1473 |
|
|
- isWriteProtected |
| 1474 |
|
|
|
| 1475 |
|
|
|
| 1476 |
|
|
=cut |
| 1477 |
|
|
|
| 1478 |
|
|
1; |