0001 /**
0002 * @brief Tuxedo UBB configuration import tool
0003 *
0004 * @file ubb2ex.pscript
0005 */
0006 /* -----------------------------------------------------------------------------
0007 * Enduro/X Middleware Platform for Distributed Transaction Processing
0008 * Copyright (C) 2009-2016, ATR Baltic, Ltd. All Rights Reserved.
0009 * Copyright (C) 2017-2023, Mavimax, Ltd. All Rights Reserved.
0010 * This software is released under one of the following licenses:
0011 * AGPL (with Java and Go exceptions) or Mavimax's license for commercial use.
0012 * See LICENSE file for full text.
0013 * -----------------------------------------------------------------------------
0014 * AGPL license:
0015 *
0016 * This program is free software; you can redistribute it and/or modify it under
0017 * the terms of the GNU Affero General Public License, version 3 as published
0018 * by the Free Software Foundation;
0019 *
0020 * This program is distributed in the hope that it will be useful, but WITHOUT ANY
0021 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
0022 * PARTICULAR PURPOSE. See the GNU Affero General Public License, version 3
0023 * for more details.
0024 *
0025 * You should have received a copy of the GNU Affero General Public License along
0026 * with this program; if not, write to the Free Software Foundation, Inc.,
0027 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
0028 *
0029 * -----------------------------------------------------------------------------
0030 * A commercial use license is available from Mavimax, Ltd
0031 * contact@mavimax.com
0032 * -----------------------------------------------------------------------------
0033 */
0034
0035 //
0036 // Ubb config to Enduro/X converter
0037 //
0038 // Some Caveats:
0039 // - In Tuxedo srvid is per group setting. In Enduro/X this setting is per instance.
0040 // So in case if duplicate srvid's are detected, loader will allocate completely
0041 // new numbers.
0042 // - Groups are not supported per service entry. Thus to affected machines
0043 // only first service entry is exported.
0044
0045
0046 ////////////////////////////////////////////////////////////////////////////////
0047 // General constants
0048 ////////////////////////////////////////////////////////////////////////////////
0049
0050 //Base range for port number.
0051 //Each machine shall be accessible in range of 21001..210NN (where NN max number of
0052 //of cluster nodes).
0053 //Converter from *NETWORK section does not extract ports, just hostnames
0054 const NET_BASE_PORT = 21000;
0055
0056 //Host IP address (assumed) by + nodeid, range 200..2NN (where NN is max cluster node id
0057 //Also assumed that hosts can reach each other by these addresses.
0058 const NET_DEFAULT_IP_START=200; //start IP of the nodes
0059 const NET_DEFAULT_IP = "192.168.88.";
0060
0061 const IDFREE_SPACE = 30; //Free srvids between new binaries, auto-assign
0062
0063 //Include wizard base.
0064 compilestring(getwizardbase())();
0065
0066 ////////////////////////////////////////////////////////////////////////////////
0067 // UBB Config, functions called by ubb2ex as flex & bison parses.
0068 ////////////////////////////////////////////////////////////////////////////////
0069
0070 //
0071 // Globals
0072 //
0073
0074 M_port_mul <- 100;
0075
0076 //This will keep open handles
0077 M_instances <- {};
0078
0079 //Mapping from [LMID] -> MACHINE entry
0080 M_lmidmachines <- {};
0081
0082 M_wizzard <- WizardBase();
0083
0084 //
0085 // Parsed UBB Config:
0086 //
0087 M_resources <- {};
0088 M_sections <- {};
0089
0090 //Current values
0091 M_cur_section <- {};
0092 M_cur_default <- {};
0093
0094 //This is hash of arrays.
0095 //For one parameter there may be actually several parameters with the same
0096 //name
0097 M_cur_param <- {};
0098 M_values <- [];
0099
0100
0101 //Folders for generation
0102 //Each key is full path to the disk
0103 //with following attributes:
0104 //.exists true|false
0105 //.generated true|false
0106 //Used on error termination to cleanup
0107 M_folder_gen <- {};
0108
0109 //List of files generated
0110 //Used on error termination to clean
0111 M_files_gen <- [];
0112
0113 /**
0114 * Add resource/keyword value
0115 */
0116 function ubb_add_val(arg)
0117 {
0118 M_values.append(arg);
0119 }
0120
0121 /**
0122 * Add resource parameter
0123 */
0124 function ubb_add_res_parm(arg)
0125 {
0126 M_resources[arg] <-M_values;
0127 M_values<-[]; //reset
0128 }
0129
0130 /**
0131 * Add section parameter
0132 */
0133 function ubb_add_sect_parm(arg)
0134 {
0135 local is_default=false;
0136
0137 print("Adding param ["+arg+"]");
0138
0139 if (arg == "DEFAULT:")
0140 {
0141 is_default=true;
0142 }
0143
0144 param <- {};
0145
0146 //Add link to previous default
0147 param.defaults <- M_cur_default;
0148 param.keywords <- {};
0149 param.name <- arg;
0150
0151 //Refresh current default
0152 if (is_default)
0153 {
0154 param.is_default<-true;
0155 M_cur_default = param;
0156 }
0157 else
0158 {
0159 param.is_default<-false;
0160 }
0161
0162 //Save current param
0163 M_cur_param = param;
0164
0165 //Now this is array
0166 if (! (arg in M_cur_section.params))
0167 {
0168 M_cur_section.params[arg]<-[]
0169 }
0170
0171 //Get this individual in the order
0172 M_cur_section.order.append(param);
0173 //This is now array.
0174 M_cur_section.params[arg].append(param);
0175
0176 }
0177
0178 /**
0179 * Mark group as participating in routing
0180 */
0181 function ubb_mark_group_routed(arg)
0182 {
0183 if (arg in M_sections["*GROUPS"].params
0184 && !( "routed" in M_sections["*GROUPS"].params[arg]))
0185 {
0186 M_sections["*GROUPS"].params[arg][0].routed<-true;
0187 print("GROUP ["+arg+"] -> routed");
0188 }
0189 }
0190
0191 /**
0192 * Add keyword to parameter, callback
0193 */
0194 function ubb_add_sect_keyw(arg)
0195 {
0196 M_cur_param.keywords[arg] <- M_values;
0197
0198 if (M_cur_section.name == "*ROUTING" && arg=="RANGES")
0199 {
0200 //Parse the DDR range.
0201 ubb_ddr_parse(M_values[0]);
0202 }
0203
0204 M_values<-[];
0205 }
0206
0207 /**
0208 * Add section, callback
0209 */
0210 function ubb_add_sect(arg)
0211 {
0212 M_sections[arg] <- {};
0213 M_sections[arg].params <- {};
0214 M_sections[arg].name <- arg;
0215 M_sections[arg].order <- [];
0216
0217 M_cur_default <- {};
0218 M_cur_section = M_sections[arg];
0219 M_cur_param <- {};
0220 }
0221
0222
0223 ////////////////////////////////////////////////////////////////////////////////
0224 // Support functions
0225 ////////////////////////////////////////////////////////////////////////////////
0226
0227 /**
0228 * Return common part of two directories
0229 * @path1 first path
0230 * @path2 second path
0231 * @return common part of "/" if not matching anthing
0232 */
0233 function dircommon(path1, path2)
0234 {
0235 local ret="";
0236 local dir1 = split(path1, "/");
0237 local dir2 = split(path2, "/");
0238 local len1 = dir1.len();
0239 local len2 = dir2.len();
0240
0241 for (local i=0; i<len1 && i<len2; i++)
0242 {
0243 if (dir1[i]==dir2[i])
0244 {
0245 if (i==0)
0246 {
0247 ret =dir1[i];
0248 }
0249 else
0250 {
0251 ret=ret + "/" + dir1[i];
0252 }
0253 }
0254 else
0255 {
0256 break;
0257 }
0258 }
0259 return ret;
0260 }
0261
0262 /**
0263 * Componets that does not matches
0264 * @param dir1 components from this directory will be returned (trailing)
0265 * which does not matches first path
0266 * @param dir2 path against which to test
0267 * @param relative non matching path in dir1
0268 */
0269 function dirdiff(path1, path2)
0270 {
0271 local ret="";
0272 local dir1 = split(path1, "/");
0273 local dir2 = split(path2, "/");
0274 local len1 = dir1.len();
0275 local len2 = dir2.len();
0276 local started=false;
0277
0278 for (local i=0; i<len1; i++)
0279 {
0280 if (started || i>=len2 || dir2[i]!=dir1[i])
0281 {
0282 started=true;
0283
0284 if (ret=="")
0285 {
0286 ret=dir1[i];
0287 }
0288 else
0289 {
0290 ret=ret+"/"+dir1[i];
0291 }
0292 }
0293 }
0294 return ret;
0295 }
0296
0297 ////////////////////////////////////////////////////////////////////////////////
0298 // Generator section
0299 ////////////////////////////////////////////////////////////////////////////////
0300
0301
0302 /**
0303 * Additional initializations
0304 */
0305 function init()
0306 {
0307 if ("FREEBSD"==getosname())
0308 {
0309 M_wizzard.qpath = "/mnt/mqueue";
0310 }
0311 }
0312
0313 /**
0314 * Schedule directory creation.
0315 * Folder creation is performed in recursive way by consulting each section to
0316 * the schedule and disk existence prior adding to the hash.
0317 * @param folder folder to create
0318 */
0319 function schedule_directory(folder)
0320 {
0321 local comps = split(folder, "/");
0322 local path="/";
0323 local len = comps.len();
0324
0325 for (local i=1; i<len; i++)
0326 {
0327
0328 if (path=="/")
0329 {
0330 path+=comps[i];
0331 }
0332 else
0333 {
0334 path+="/"+comps[i];
0335 }
0336
0337 if (!(path in M_folder_gen) && !fileexists(path))
0338 {
0339 M_folder_gen[path]<-{};
0340 }
0341 }
0342 }
0343
0344 /**
0345 * Get default value for section param.
0346 * Note that platformscript does not allow to use identifier "default" as
0347 * variable name.
0348 * @param param param object of the section
0349 * @param keyword keyword of interest
0350 * @param stock_default stock default value (if param value not present, and any
0351 * chained default value is not found
0352 * @return action value read
0353 */
0354 function get_val(param, keyword, stock_default)
0355 {
0356 if (! (keyword in param.keywords))
0357 {
0358 //Lookup in reverse order from current default
0359 local dflt = param.defaults;
0360
0361 while (("keywords" in dflt) && !(keyword in dflt.keywords))
0362 {
0363 //Step back to previous default
0364 if ("defaults" in dflt)
0365 {
0366 dflt = dflt.defaults;
0367 }
0368 else
0369 {
0370 //No more defaults in the section
0371 break;
0372 }
0373 }
0374
0375 if ("keywords" in dflt && keyword in dflt.keywords)
0376 {
0377 return dflt.keywords[keyword];
0378 }
0379 else
0380 {
0381 if (stock_default==null)
0382 {
0383 throw("No stock "+keyword+" value available for param ["+param.name+"]");
0384 }
0385 return stock_default;
0386 }
0387 }
0388 else
0389 {
0390 return param.keywords[keyword];
0391 }
0392 }
0393
0394 /**
0395 * Return number of services for instance used for routing
0396 * @param instance instance of interest
0397 * @return max number of routing services used
0398 */
0399 function get_routing_services(instance)
0400 {
0401 local cnt=0;
0402
0403 if ("*SERVICES" in M_sections)
0404 {
0405 foreach(idx,service in M_sections["*SERVICES"].order)
0406 {
0407 if (!service.is_default)
0408 {
0409 local range = {};
0410 local service_srvgrp = get_val(service, "SRVGRP", "");
0411
0412 //Service may have no group -> affect all
0413 if (service_srvgrp =="")
0414 {
0415 cnt++;
0416 }
0417 else
0418 {
0419 local group_lmid = get_val(
0420 M_sections["*GROUPS"].params[service_srvgrp[0]][0], "LMID", null)[0];
0421
0422 if (group_lmid == instance.lmid)
0423 {
0424 cnt++;
0425 }
0426
0427 }
0428 }
0429 }
0430 }
0431 print("Routing services: "+cnt);
0432 return cnt;
0433 }
0434
0435 /**
0436 * Reserve number of slots in free ranges
0437 * remove those ranges from next time use.
0438 * @param instance current machine instance we are generating
0439 * @slots number of slots required
0440 * @return start index that is guaranteed to have + (slots-1) free
0441 * places.
0442 */
0443 function reserve_range(instance, slots)
0444 {
0445 local ret = 0;
0446
0447 foreach(idx,range in instance.free_ranges)
0448 {
0449 if (range.min + (slots-1) <= range.max)
0450 {
0451 ret=range.min;
0452 //Reduce
0453 range.min=range.min+slots;
0454
0455 if (range.min>range.max)
0456 {
0457 instance.free_ranges.remove(idx);
0458 }
0459 break;
0460 }
0461 }
0462
0463 if (0==ret)
0464 {
0465 throw("Cannot find free binary range for number of "+slots+" slots");
0466 }
0467
0468 return ret;
0469 }
0470
0471 /**
0472 * Prepare free ID ranges for given instance. For upper number we take
0473 * number of total servers + 2000.
0474 * @param instance instance of interest
0475 */
0476 function prep_free_ranges(instance)
0477 {
0478 //We need to register all ranges
0479 //All ranges must be sorted
0480 //The free interval must be detected
0481 local start = 1;
0482 instance.ranges<-[];
0483
0484 if ("*SERVERS" in M_sections) foreach(idx,server in M_sections["*SERVERS"].order)
0485 {
0486 if (!server.is_default)
0487 {
0488 local range = {};
0489 local server_srvgrp = get_val(server, "SRVGRP", "");
0490 local group_lmid = get_val(
0491 M_sections["*GROUPS"].params[server_srvgrp[0]][0], "LMID", null)[0];
0492
0493 if (group_lmid == instance.lmid)
0494 {
0495 //OK this is our server
0496 range.min <-get_val(server, "MIN", ["1"]);
0497 range.max <-get_val(server, "MAX", range.min);
0498 range.srvid <-get_val(server, "SRVID", "");
0499
0500 if (range.max[0].tointeger() < range.min[0].tointeger())
0501 {
0502 throw(format("Invalid server %s range min=%d > max=%d",
0503 server.name, range.min[0].tointeger()
0504 , range.max[0].tointeger()));
0505 }
0506 range.min = range.min[0].tointeger()+range.srvid[0].tointeger();
0507 range.max = range.max[0].tointeger()+range.srvid[0].tointeger();
0508
0509 //print(format("Used rang srvid: %s %d - %d",
0510 // range.srvid[0], range.min[0], range.max[0]));
0511 instance.ranges.append(range);
0512 }
0513 }
0514 }
0515
0516 instance.ranges.sort(@(a,b) a.min <=> b.min);
0517 instance.free_ranges <- [];
0518
0519 foreach(idx,range in instance.ranges)
0520 {
0521 print(format("Used range %d - %d",
0522 range.min, range.max));
0523 }
0524
0525 //Add first range if any
0526 local len = instance.ranges.len();
0527 if (len > 0)
0528 {
0529 if (instance.ranges[0].min > 1)
0530 {
0531 local fr = {};
0532 fr.min <- 1;
0533 fr.max <- (instance.ranges[0].min - 1);
0534 instance.free_ranges.append(fr);
0535 }
0536
0537 local i=0;
0538 for (; i<len-1; i++)
0539 {
0540 if (instance.ranges[i].max+1< instance.ranges[i+1].min)
0541 {
0542 local fr = {};
0543 fr.min <- (instance.ranges[i].max + 1);
0544 fr.max <- (instance.ranges[i+1].min - 1);
0545 instance.free_ranges.append(fr);
0546 }
0547 }
0548
0549 //Add some free range+10K
0550 local fr = {};
0551 fr.min <- (instance.ranges[i].max + 1);
0552 fr.max <- (instance.ranges[i].max + 1 + 10000);
0553 instance.free_ranges.append(fr);
0554 }
0555 else
0556 {
0557 local fr = {};
0558 fr.min <- 1;
0559 fr.max <- 10000;
0560 instance.free_ranges.append(fr);
0561 }
0562
0563 foreach(idx,range in instance.free_ranges)
0564 {
0565 print(format("Free range %d - %d",
0566 range.min, range.max));
0567 }
0568
0569 }
0570
0571 /**
0572 * Prepare groups of interest for this particular node.
0573 * Mark the group is it used or not (used if routed or have xa).
0574 * Prepare open/close infos / xa infos, (extract xa settings).
0575 */
0576 function prep_groups(instance)
0577 {
0578 foreach(idx,igroup in M_sections["*GROUPS"].params)
0579 {
0580 //Single group only supported...
0581 local group = igroup[0];
0582 //Get our groups
0583 if ( !group.is_default && (get_val(group, "LMID", null)[0] == instance.lmid))
0584 {
0585 group.plot<-false;
0586 group.tmsrv_plotted<-false;
0587
0588 local ubb_openinfo = get_val(group, "OPENINFO", [""])[0];
0589 local ubb_tmsname = get_val(group, "TMSNAME", [""])[0];
0590
0591 if (ubb_tmsname!="")
0592 {
0593 //Mark that this instance uses xa.
0594 instance.xa_used<-true;
0595
0596 group.rmid <- get_val(group, "GRPNO", null)[0];
0597 local openinfo = ubb_openinfo;
0598
0599 //Get the Switch name
0600 local ex = regexp(@"^(.*):.*");
0601
0602 local res = {};
0603 if (openinfo!="")
0604 {
0605 local cap = ex.capture(openinfo);
0606 if (null!=cap)
0607 {
0608 local res = cap[1];
0609 group.xaswitchname <-openinfo.slice(res.begin, res.end);
0610 }
0611 else
0612 {
0613 group.xaswitchname <-"";
0614 }
0615 }
0616 else
0617 {
0618 group.xaswitchname <-"";
0619 }
0620 //Save TMS too..
0621 group.tmsname<-ubb_tmsname;
0622
0623 print("Got switch: ["+group.xaswitchname+"]");
0624
0625 //Support for null switch
0626 if (group.tmsname=="TMS")
0627 {
0628 group.openinfo <- "-";
0629 group.closeinfo <- "-";
0630 group.driverlib<-"libndrxxanulls."+M_wizzard.shared_lib_pfx;
0631 group.rmlib<-"-";
0632 }
0633 else if (group.xaswitchname== "TUXEDO/QM")
0634 {
0635 //Special case for MQ
0636 //We only need Qspace name, the data will be stored
0637 // in app_home/qdata/rm<rmid>
0638
0639 ex = regexp(@"^.*:.*:(.*)");
0640 res = ex.capture(openinfo)[1];
0641 local qspace = openinfo.slice(res.begin,res.end);
0642 group.openinfo <- "datadir=\"${NDRX_APPHOME}/qdata/"+qspace+"\",qspace=\""+qspace+"\"";
0643 group.closeinfo <- "${NDRX_XA_OPEN_STR}";
0644
0645 //Schedule folder for creation.
0646 if ( !(instance.app_home+"/qdata" in M_folder_gen))
0647 {
0648 //M_folder_gen[instance.app_home+"/qdata"]<-{};
0649 schedule_directory(instance.app_home+"/qdata");
0650 }
0651 group.qspace<-qspace;
0652 group.data_folder <- instance.app_home+"/qdata/"+qspace;
0653 //M_folder_gen[group.data_folder]<-{};
0654 schedule_directory(group.data_folder);
0655
0656 //Hashmap of auto-queues served by given queue space
0657 //internally may contains .trantime setting override.
0658 group.auto_queues<-{};
0659 group.workers<-0;
0660
0661 }
0662 else
0663 {
0664 ex = regexp(@"^.*:(.*)");
0665 res = ex.capture(openinfo)[1];
0666 //Extract the values, transform the "TUXEDO/QM"
0667 group.openinfo <- openinfo.slice(res.begin,res.end);
0668 local closeinfo = get_val(group, "CLOSEINFO", [""])[0];
0669
0670 if (closeinfo=="")
0671 {
0672 group.closeinfo <- "${NDRX_XA_OPEN_STR}";
0673 }
0674 else
0675 {
0676 ex = regexp(@"^.*:(.*)");
0677 res = ex.capture(closeinfo)[1];
0678 group.closeinfo <- closeinfo.slice(res.begin,res.end);
0679 }
0680 }
0681
0682 //Schedule folders to be created
0683 if ( !(instance.app_home+"/tmlogs" in M_folder_gen))
0684 {
0685 schedule_directory(instance.app_home+"/tmlogs");
0686 }
0687
0688 group.tmlogs<-instance.app_home+"/tmlogs/rm"+group.rmid;
0689 group.tmlogs_rel<-"${NDRX_APPHOME}/tmlogs/rm"+group.rmid;
0690 //M_folder_gen[group.tmlogs]<-{};
0691 schedule_directory(group.tmlogs);
0692
0693 }
0694
0695 //Load envfile, if any...
0696 prep_envfile(group, group);
0697
0698 }
0699 }
0700 }
0701
0702 /**
0703 * Merge defaults for current instance
0704 */
0705 function merge_defaults(instance)
0706 {
0707 local prev_server={};
0708 prev_server.is_default<-false;
0709 foreach (idx, server in instance.servers)
0710 {
0711 //If current one is default and previous is also de
0712 if (server.is_default && prev_server.is_default)
0713 {
0714 //Copy all current server attribs over prev_server
0715 //and delete curren entry.
0716 foreach (idx, keyword in server)
0717 {
0718 prev_server[idx]<-server[idx];
0719 }
0720 //Remove current entry...
0721 instance.servers[idx].deleted<-true;
0722 }
0723 else
0724 {
0725 //Ensure deleted tags
0726 if (!("deleted" in instance.servers[idx]))
0727 {
0728 instance.servers[idx].deleted<-false;
0729 }
0730 prev_server = server;
0731 }
0732
0733 //Mark group used...
0734 if ("SRVGRP" in server)
0735 {
0736 M_sections["*GROUPS"].params[server["SRVGRP"]][0].plot<-true;
0737 }
0738 }
0739
0740 }
0741
0742 /**
0743 * Escape string, includes escape for double quotes, and xml escape of
0744 * <>&
0745 * @param str string to escape
0746 * @param attr is this XML attribute value?
0747 * @return escaped string
0748 */
0749 function escape_clopt_xml(str, attr)
0750 {
0751 local ret = "";
0752 foreach (i, c in str)
0753 {
0754 local chr=format("%c", c);
0755
0756 switch (chr)
0757 {
0758 case "\"":
0759 if (attr)
0760 {
0761 chr = """;
0762 }
0763 else
0764 {
0765 chr = "\\"+chr;
0766 }
0767 break;
0768 case "&":
0769 chr = "&";
0770 break;
0771 case "<":
0772 chr = "<";
0773 break;
0774 case ">":
0775 chr = ">";
0776 break;
0777 }
0778
0779 ret=ret+chr;
0780 }
0781
0782 return ret;
0783 }
0784
0785 /**
0786 * Prepare binaries:
0787 * For this LMID:
0788 * Remove: WSL/TMSYSEVT/JSL/TMMETADATA/TMFAN/TMQFORWARD
0789 * Mark: if found TMSYSEVT or TMUSREVT -> tpevsrv needed.
0790 * Mark: If WSL or JSL used -> restincl needed (incl clients section) & cpmsrv
0791 * Extract: Group qspace shall be appended with automatic Qs from TMQFORWARD instances.
0792 * Transform: If for server -A is found, remove it. If -A is not found, add -N
0793 * as Enduro/X advertises all by default.
0794 * Transform: TMQUEUE -> tmqueue, replace min/max=1, update clopt.
0795 */
0796 function prep_servers(instance)
0797 {
0798 local server_optstring = "Aa:s:e:Ghl:n:o:Pp:rtv";
0799
0800 // phase 1. Get infos about the system
0801 if ("*SERVERS" in M_sections) foreach(idx,server in M_sections["*SERVERS"].order) if (!server.is_default)
0802 {
0803 local server_srvgrp = get_val(server, "SRVGRP", null);
0804 local group_lmid =get_val(M_sections["*GROUPS"].params[ server_srvgrp[0] ][0], "LMID", null)[0];
0805
0806 if (group_lmid == instance.lmid)
0807 {
0808 local remove = false;
0809 print("Processing binary: ["+server.name+"]");
0810
0811 /* Detect the type of the binary */
0812
0813 switch(server.name)
0814 {
0815 case "WSL":
0816 case "WSH":
0817 case "GWWS":
0818 case "JSL":
0819 case "JSH":
0820 instance.restin<-true;
0821 remove=true;
0822 break;
0823 case "TMSYSEVT":
0824 case "TMUSREVT":
0825 instance.events<-true;
0826 remove=true;
0827 break;
0828 case "TMMETADATA":
0829 case "TMFAN":
0830 remove=true;
0831 break;
0832 case "TMQUEUE":
0833 //Translate to tmqueue
0834 server.name="tmqueue";
0835 server.keywords["MIN"]<-["1"];
0836 server.keywords["MAX"]<-["1"];
0837 //Match the forwarders at plotting, by group lookup...
0838 //NOTE: -e will be generated afterwards
0839 server.keywords["CLOPT"]<-["-A -r -- -s1 -p10"];
0840
0841 break;
0842 case "TMQFORWARD":
0843
0844 remove=true;
0845
0846 local group = M_sections["*GROUPS"].params[server_srvgrp[0]][0];
0847 //Parse the group of the forward
0848 //and parse the clopt of forward, so that we get
0849 //queue name &
0850 //Extract min setting (used to set workers for the Q)
0851 //Extract -t from clopt second group
0852 //Extract -q Q1,Q2,etc. from clopt second group
0853
0854 local fwd_min = get_val(server, "MIN", ["1"])[0];
0855 local fwd_clopt = get_val(server, "CLOPT", ["-A"])[0];
0856
0857 //server opt string + tmqforward optstring
0858 local clopt_parsed = parseclopt2(fwd_clopt,
0859 server_optstring, "q:t:i:b:ednf:");
0860
0861 //assume -1 no special timeout used.
0862 local trantime = "-1";
0863
0864 foreach(idx,opt in clopt_parsed.args2)
0865 {
0866 if (opt.opt=="t")
0867 {
0868 trantime= opt.val;
0869 break;
0870 }
0871 }
0872
0873 foreach(idx,opt in clopt_parsed.args2)
0874 {
0875 if (opt.opt=="q")
0876 {
0877 //Split Q by ,
0878 //And load each Q
0879 local qs = split(opt.val, ",");
0880 foreach (idx, qq in qs)
0881 {
0882 //Must be loaded.
0883 print(format("Adding Q [%s] to group [%s] trantime: %s",
0884 qq, group.name, trantime));
0885 group.auto_queues[qq]<-{};
0886 group.auto_queues[qq].queue<-qq;
0887 group.auto_queues[qq].trantime<-trantime;
0888 group.auto_queues[qq].workers<-fwd_min;
0889 group.workers+=fwd_min.tointeger();
0890 }
0891 }
0892 }
0893 break;
0894 }
0895
0896 //do not process the deleted servers
0897 if (remove)
0898 {
0899 server.deleted<-true;
0900 }
0901
0902 }
0903 }
0904
0905
0906 // phase 2. Prepare binaries into instance.servers array, each element is hash with
0907 // key settings for the server or default to be generated.
0908 instance.servers<-[]
0909
0910 local new_srv = {};
0911
0912 //Add some reasonable defaults (mainly required by Enduro/X
0913 //and may be merged later if we get some further defaults
0914 new_srv.min<-1;
0915 new_srv.max<-1;
0916 new_srv.autokill<-1;
0917 new_srv.start_max<-10;
0918 new_srv.pingtime<-100;
0919 new_srv.ping_max<-800;
0920 new_srv.end_max<-10;
0921 new_srv.killtime<-1;
0922 new_srv.respawn<-"Y";
0923 new_srv.is_default<-true;
0924 instance.servers.append(new_srv);
0925
0926 //Add common configuration server
0927 new_srv = {};
0928 new_srv.bin<-"cconfsrv";
0929 new_srv.srvid <- reserve_range(instance, 2);
0930 new_srv.sysopt <- "-e ${NDRX_ULOG}/cconfsrv.${NDRX_SVSRVID}.log -r";
0931 new_srv.min<-2;
0932 new_srv.max<-2;
0933 new_srv.is_default<-false;
0934 instance.servers.append(new_srv);
0935
0936 //Add tpadmin server
0937 new_srv = {};
0938 new_srv.bin<-"tpadmsv";
0939 new_srv.srvid <- reserve_range(instance, 2);
0940 new_srv.sysopt <- "-e ${NDRX_ULOG}/tpadmsv.${NDRX_SVSRVID}.log -r";
0941 new_srv.min<-2;
0942 new_srv.max<-2;
0943 new_srv.is_default<-false;
0944 instance.servers.append(new_srv);
0945
0946 //Add event server if used.
0947 if ("events" in instance)
0948 {
0949 new_srv = {};
0950 new_srv.bin<-"tpevsrv";
0951 new_srv.srvid <- reserve_range(instance, 1);
0952 new_srv.sysopt <- "-e ${NDRX_ULOG}/tpevsrv.${NDRX_SVSRVID}.log -r";
0953 new_srv.min<-1;
0954 new_srv.max<-1;
0955 new_srv.mindispatchthreads<-5;
0956 new_srv.maxdispatchthreads<-5;
0957 new_srv.is_default<-false;
0958 instance.servers.append(new_srv);
0959 }
0960
0961 //Add networking if used.
0962 if ( instance.machine.networked)
0963 {
0964 //link this machine with other networked machines.
0965 //the order of the machines
0966 //lets without -f, add manually if different architecture hosts
0967 //have been found.
0968 foreach(idx,val in M_sections["*MACHINES"].params)
0969 {
0970 local machine = val[0];
0971
0972 if (machine.is_default || !machine.networked)
0973 {
0974 continue;
0975 }
0976
0977 local lmid = machine.keywords.LMID[0];
0978
0979 new_srv = {};
0980 new_srv.bin<-"tpbridge";
0981 new_srv.srvid <-reserve_range(instance, 1);
0982 new_srv.min<-1;
0983 new_srv.max<-1;
0984 new_srv.is_default<-false;
0985
0986 if (machine.nodeid < instance.nodeid)
0987 {
0988 //In this case we take passive role and accept incoming connections
0989 new_srv.sysopt<-("-e ${NDRX_ULOG}/tpbridge.${NDRX_SVSRVID}.log");
0990
0991 new_srv.appopt<-"-n"+machine.nodeid+" -r -i 0.0.0.0 -p "
0992 +(NET_BASE_PORT+instance.nodeid*M_port_mul+machine.nodeid)+" -tP -z30";
0993
0994 instance.servers.append(new_srv);
0995 }
0996 else if (machine.nodeid > instance.nodeid)
0997 {
0998
0999 //This is active role
1000 //port number shall match our node id
1001 //we shall build port number as 100*listening_node+active_node
1002 new_srv.sysopt<-"-e ${NDRX_ULOG}/tpbridge.${NDRX_SVSRVID}.log";
1003
1004 if ("ip" in machine)
1005 {
1006 new_srv.appopt<-"-n"+machine.nodeid+" -r -i "+machine.ip+" -p "
1007 +(NET_BASE_PORT+machine.nodeid*M_port_mul+instance.nodeid)+" -tA -z30";
1008 }
1009 else
1010 {
1011 new_srv.appopt<-"-n"+machine.nodeid+" -r -h "+machine.hostname+" -p "
1012 +(NET_BASE_PORT+machine.nodeid*M_port_mul+instance.nodeid)+" -tA -z30";
1013 }
1014
1015 instance.servers.append(new_srv);
1016 }
1017 }
1018 }
1019
1020 //loop over the binaries & defaults, if not deleted, add (for our instance)
1021 if ("*SERVERS" in M_sections) foreach(idx,server in M_sections["*SERVERS"].order) if (! ("deleted" in server))
1022 {
1023 local proceed=false;
1024 local server_srvgrp = "";
1025 local group = {};
1026 if (server.is_default)
1027 {
1028 //Any default is OK
1029 proceed=true;
1030 }
1031 else
1032 {
1033 server_srvgrp = get_val(server, "SRVGRP", null)[0];
1034 group = M_sections["*GROUPS"].params[server_srvgrp][0];
1035 local group_lmid =get_val(group, "LMID", null)[0];
1036 if (group_lmid==instance.lmid)
1037 {
1038 proceed=true;
1039 }
1040 }
1041
1042 if (proceed)
1043 {
1044 //Process the binary, add to arrays...
1045 new_srv = {};
1046
1047 if (server_srvgrp!="")
1048 {
1049 new_srv.srvgrp <-server_srvgrp;
1050 new_srv.group<-group;
1051 }
1052
1053 new_srv.bin <- server.name;
1054 new_srv.is_default <- server.is_default;
1055
1056 if (!new_srv.is_default)
1057 {
1058 //Load clopt & app opt
1059 local clopt = get_val(server, "CLOPT", ["-A"])[0];
1060 local clopt_parsed = parseclopt1(clopt, server_optstring);
1061 local have_A=false;
1062 local have_e=false;
1063 local sysopt = "";
1064 local appopt = "";
1065
1066 //Check is -A not present, if so then we need to set -B
1067 foreach(idx,opt in clopt_parsed.args1)
1068 {
1069 //opt -s in Enduro/X is opt -S
1070
1071 if (opt.opt=="s")
1072 {
1073 opt.opt="S";
1074 }
1075
1076 if (opt.opt=="A")
1077 {
1078 have_A=true;
1079 }
1080
1081 // works for both error and stdout
1082 if (opt.opt=="e" || opt.opt=="o")
1083 {
1084 if (opt.opt=="e")
1085 {
1086 have_e=true;
1087 }
1088
1089 //Detect the directory + schedule
1090 //I.e. part of $NDRX_ULOG
1091 //I.e. part of $NDRX_APPHOME with default under log
1092 //Not a part of anything. Also.. remember add prefix
1093
1094 local e_val = opt.val;
1095
1096 if (M_opt_P!="" && M_opt_P!="/")
1097 {
1098 e_val=M_opt_P+e_val;
1099 }
1100 local e_file = basename(e_val);
1101 local e_dir = dirname(e_val);
1102
1103 local ulog_common = dircommon(e_dir, instance.log_full);
1104 local ulog_diff = dirdiff(e_dir, instance.log_full);
1105
1106 if (ulog_common==instance.log_full)
1107 {
1108 schedule_directory(e_dir);
1109
1110 if (ulog_diff=="")
1111 {
1112 //OK this is ULOG folder, no schedule
1113 opt.val = "${NDRX_ULOG}/" + e_file;
1114 }
1115 else
1116 {
1117 //OK this is ULOG folder, no schedule
1118 opt.val = "${NDRX_ULOG}/" + ulog_diff + "/" + e_file;
1119 }
1120 }
1121 else
1122 {
1123 //Is it common to apphome, if so try to use
1124 //relative path to the output.
1125 local common = dircommon(e_dir, instance.app_home);
1126 local diff = dirdiff(e_dir, instance.app_home);
1127
1128 if (common==instance.app_home)
1129 {
1130 if (diff!="")
1131 {
1132 schedule_directory(e_dir);
1133 opt.val = "${NDRX_APPHOME}/"+diff+"/"+e_file;
1134 }
1135 else
1136 {
1137 //We go under default log folder...
1138 schedule_directory(instance.app_home+"/log");
1139 opt.val = "${NDRX_APPHOME}/log/"+e_file;
1140 }
1141 }
1142 else
1143 {
1144 schedule_directory(e_dir);
1145 opt.val=e_val;
1146 }
1147 }
1148 }
1149 }
1150
1151 if (!have_A)
1152 {
1153 //Do not advertise built services
1154 sysopt = "-B";
1155 }
1156
1157 //Add -e file to write the stderr to log files.
1158 if (!have_e)
1159 {
1160 if (sysopt!="")
1161 {
1162 sysopt= sysopt + " " + "-e ${NDRX_ULOG}/"+new_srv.bin+".${NDRX_SVSRVID}.log";
1163 }
1164 else
1165 {
1166 sysopt= "-e ${NDRX_ULOG}/"+new_srv.bin+".${NDRX_SVSRVID}.log";
1167 }
1168 }
1169
1170 //If string contains tab, space or newline - add quotes
1171 //For both sysopt and appopt.
1172
1173 //Now generate clopt...
1174 foreach(idx,opt in clopt_parsed.args1) if (opt.opt != "A")
1175 {
1176 if (sysopt=="")
1177 {
1178 sysopt="-"+opt.opt;
1179 }
1180 else
1181 {
1182 sysopt=sysopt + " -"+opt.opt;
1183 }
1184 if ("val" in opt)
1185 {
1186 //Escape " if found
1187 if (null!=opt.val.find(" ") || null!=opt.val.find("\n") || null!=opt.val.find("\t"))
1188 {
1189 sysopt=sysopt + " \"" + escape_clopt_xml(opt.val, false) + "\"";
1190 }
1191 else
1192 {
1193 sysopt=sysopt + " " + escape_clopt_xml(opt.val, false);
1194 }
1195 }
1196 }
1197
1198 //Generate app opts
1199 foreach(idx,opt in clopt_parsed.freeargs)
1200 {
1201 if (appopt=="")
1202 {
1203 appopt=opt;
1204 }
1205 else
1206 {
1207 if (null!=opt.find(" ") || null!=opt.find("\n") || null!=opt.find("\t"))
1208 {
1209 appopt=appopt + " \"" + escape_clopt_xml(opt, false) + "\"";
1210 }
1211 else
1212 {
1213 appopt=appopt + " " + escape_clopt_xml(opt, false);
1214 }
1215 }
1216 }
1217
1218 print("Built sysopt ["+sysopt+"]");
1219 print("Built appopt ["+appopt+"]");
1220 new_srv.sysopt <- sysopt;
1221 new_srv.appopt <- appopt;
1222 }
1223
1224 if ("SRVGRP" in server.keywords)
1225 {
1226 new_srv.cctag<-server.keywords.SRVGRP[0];
1227 }
1228
1229 if ("SRVID" in server.keywords)
1230 {
1231 new_srv.srvid<-server.keywords.SRVID[0];
1232 }
1233
1234 if ("MIN" in server.keywords)
1235 {
1236 new_srv.min<-server.keywords.MIN[0];
1237 }
1238
1239 if ("MAX" in server.keywords)
1240 {
1241 new_srv.max<-server.keywords.MAX[0];
1242 }
1243 else if ("min" in new_srv)
1244 {
1245 new_srv.max<-new_srv.min;
1246 }
1247
1248 if (!server.is_default)
1249 {
1250 //resolve min/max for final check
1251 new_srv.real_min <- get_val(server, "MIN", ["1"])[0].tointeger();
1252 //Default MAX is same as MIN
1253 new_srv.real_max <- get_val(server, "MAX", [new_srv.real_min])[0].tointeger();
1254 }
1255
1256 if ("ENVFILE" in server.keywords)
1257 {
1258 new_srv.env<-server.keywords.ENVFILE[0];
1259 }
1260
1261 //Not used on Linux
1262 if ("RQADDR" in server.keywords)
1263 {
1264 new_srv.rqaddr<-server.keywords.RQADDR[0];
1265 }
1266
1267 if ("RESTART" in server.keywords)
1268 {
1269 new_srv.respawn<-server.keywords.RESTART[0];
1270 }
1271
1272
1273 //If MAXDISPATCHTHREADS<=1, then no need for this setting at all
1274 //Except, we shall understand do we get something from previous defaults
1275 //If there is no previous default, then do not plot this
1276 //If there is previous default and it differs form this setting, then we need to plot
1277 //the thing, also if max=1, then min shall be set to 1 too...
1278
1279 //Get default
1280 local default_max=1;
1281 local default_min=1;
1282
1283 if ("keywords" in server.defaults)
1284 {
1285 default_max = get_val(server.defaults, "MAXDISPATCHTHREADS", ["1"])[0].tointeger();
1286 default_min = get_val(server.defaults, "MINDISPATCHTHREADS", ["1"])[0].tointeger();
1287 }
1288
1289 if ("MAXDISPATCHTHREADS" in server.keywords)
1290 {
1291 local thrds = server.keywords.MAXDISPATCHTHREADS[0].tointeger();
1292 //Set only if default was bigger than 1
1293 if (thrds<=1 && default_max>1 || thrds>1)
1294 {
1295 //Enduro/X default is 1
1296 if (thrds<1)
1297 {
1298 thrds=1;
1299 }
1300 new_srv.maxdispatchthreads<-thrds;
1301 }
1302 }
1303
1304 if ("MINDISPATCHTHREADS" in server.keywords)
1305 {
1306 local thrds = server.keywords.MINDISPATCHTHREADS[0].tointeger();
1307 //Set only if default was bigger than 1
1308 if (thrds<=1 && default_min>1 || thrds>1)
1309 {
1310 //Enduro/X default is 1
1311 if (thrds<1)
1312 {
1313 thrds=1;
1314 }
1315 new_srv.mindispatchthreads<-thrds;
1316 }
1317 }
1318
1319 //THREADSTACKSIZE this goes to process, thus read value from defaults...
1320 local stacksz = get_val(server, "THREADSTACKSIZE", "");
1321
1322 if (stacksz!="")
1323 {
1324 //Our stack is in KB
1325 new_srv.threadstacksize<- (stacksz[0].tointeger() / 1024);
1326 }
1327
1328 instance.servers.append(new_srv);
1329 }
1330
1331 }
1332
1333 if (instance.xa_used || instance.restin)
1334 {
1335 //Add new defaults, do not re-use cctag
1336 new_srv={};
1337 new_srv.min<-1;
1338 new_srv.max<-1;
1339 new_srv.respawn<-"Y";
1340 new_srv.is_default<-true;
1341 new_srv.cctag<-"/"; //Use root global settings
1342 instance.servers.append(new_srv);
1343
1344 if (instance.xa_used)
1345 {
1346 //Add new defaults, do not re-use cctag
1347 new_srv={};
1348 new_srv.bin <- "tmrecoversv";
1349 new_srv.srvid <- reserve_range(instance, 1);
1350 new_srv.real_min<-1;
1351 new_srv.real_max<-1;
1352 new_srv.is_default<-false;
1353 new_srv.sysopt <- "-e ${NDRX_ULOG}/tmrecoversv.${NDRX_SVSRVID}.log -r";
1354 new_srv.appopt <- "-p -s10";
1355 new_srv.cctag<-"/"; //Allow merge
1356 instance.servers.append(new_srv);
1357 }
1358
1359 if (instance.restin)
1360 {
1361 //Add new defaults, do not re-use cctag
1362 new_srv={};
1363 new_srv.bin <- "cpmsrv";
1364 new_srv.srvid <- reserve_range(instance, 1);
1365 new_srv.real_min<-1;
1366 new_srv.real_max<-1;
1367 new_srv.is_default<-false;
1368 new_srv.sysopt <- "-e ${NDRX_ULOG}/cpmsrv.${NDRX_SVSRVID}.log -r";
1369 new_srv.appopt <- "-k3 -i1";
1370 new_srv.cctag<-"/"; //Allow merge
1371 instance.servers.append(new_srv);
1372 }
1373 }
1374
1375
1376 //phase 4. Merge consecutive defaults, remove last default if no binaries follow
1377 merge_defaults(instance);
1378
1379 //OK Ready to plot debug, forward queues, server defaults + servers.
1380 local servers_final=[];
1381
1382 // Copy all servers to final list
1383 // in case if xa is used, add tmsrv firstly
1384 foreach (idx, server in instance.servers)
1385 {
1386 //Check does this depend on XA group?
1387 //srvgrp is for translated binaries. which basically pull in the tmsrv
1388 if (!server.is_default && "srvgrp" in server &&
1389 !server.deleted)
1390 {
1391 local group = M_sections["*GROUPS"].params[ server.srvgrp ][0];
1392 local tmscout =get_val(group, "TMSCOUNT", [""])[0];
1393
1394 //This is tmsrv group
1395 if ("tmsname" in group && !group.tmsrv_plotted)
1396 {
1397 local cnt = 1;
1398 if (tmscout!="")
1399 {
1400 cnt = tmscout.tointeger();
1401 }
1402 //Add tmsrv here
1403 //Keep the same group setting
1404 //either direct cctag (if set) or then it must come from
1405 //the default.
1406 new_srv = {};
1407
1408
1409 if (group.xaswitchname== "TUXEDO/QM" || group.tmsname=="TMS")
1410 {
1411 new_srv.bin<-"tmsrv";
1412 }
1413 else
1414 {
1415 new_srv.bin<-get_val(group, "TMSNAME", null)[0];
1416 }
1417
1418 new_srv.srvid <- reserve_range(instance, cnt);
1419 new_srv.sysopt <- "-e ${NDRX_ULOG}/"+new_srv.bin+".${NDRX_SVSRVID}.log -r";
1420 new_srv.appopt <- "-t1 -l "+group.tmlogs_rel;
1421 new_srv.min<-cnt;
1422 new_srv.max<-cnt;
1423 if ("cctag" in server)
1424 {
1425 new_srv.cctag<- server.cctag;
1426 }
1427 new_srv.is_default<-false;
1428 group.tmsrv_plotted=true;
1429 servers_final.append(new_srv);
1430 }
1431
1432 //perform correction on tmq so that forward thread pool
1433 if (server.bin=="tmqueue")
1434 {
1435 //get the group & forwarder count
1436 //workers
1437 local workers = server.group.workers;
1438
1439 if (workers<10)
1440 {
1441 workers=10;
1442 }
1443
1444 server.appopt+=" -f"+workers;
1445 }
1446 }
1447
1448 if (!server.deleted)
1449 {
1450 servers_final.append(server);
1451 }
1452 }
1453
1454 instance.servers = servers_final;
1455
1456 //Remove any un-needed defaults from the end
1457 local len = instance.servers.len();
1458
1459 for (local i=len-1; i>=0; i--)
1460 {
1461 local server = instance.servers[i];
1462 if (server.is_default)
1463 {
1464 instance.servers.remove(i);
1465 }
1466 else
1467 {
1468 break;
1469 }
1470 }
1471
1472 //mark groups to plot
1473 foreach (idx, server in instance.servers)
1474 {
1475 if ("group" in server)
1476 {
1477 server.group.plot=true;
1478 }
1479 }
1480
1481 //Merge from bellow to to up.
1482 //if all servers bellow default have the same group and default have also
1483 //group set, then set common group in the default and clear the cctag
1484 //setting from bellow servers
1485 local len = instance.servers.len();
1486 local last_cctag = "";
1487 local first = false;
1488 local continue_till_default = false;
1489 local matching_servers = [];
1490
1491 for (local i=len-1; i>=0; i--)
1492 {
1493 local server = instance.servers[i];
1494
1495 if (i==len-1)
1496 {
1497 first=true;
1498 }
1499
1500 if (server.is_default)
1501 {
1502 if (!continue_till_default && last_cctag!="")
1503 {
1504 //Merge the cctag to default
1505 server.cctag<-last_cctag;
1506 //Remove default from all linked servers
1507
1508 foreach (idx, sv in matching_servers)
1509 {
1510 delete sv.cctag;
1511 }
1512
1513 }
1514 else
1515 {
1516 //If and there is no default cctag for this default
1517 //then stop merging.
1518 //Because then further default merges might affect this particular
1519 //group and following binaries (i.e. get cctag from previous default)
1520 //Thus stop merging here.
1521 break;
1522 }
1523
1524 first = true;
1525 last_cctag="";
1526 matching_servers = [];
1527 }
1528 else if (continue_till_default)
1529 {
1530 continue;
1531 }
1532 else if ("cctag" in server)
1533 {
1534 if (first)
1535 {
1536 last_cctag = server.cctag;
1537 //Append in case if we want to merge the group...
1538 matching_servers.append(server);
1539 first=false;
1540 }
1541 else if (last_cctag != server.cctag)
1542 {
1543 last_cctag="";
1544 continue_till_default=true;
1545 }
1546 else
1547 {
1548 matching_servers.append(server);
1549 }
1550 }
1551 else
1552 {
1553 continue_till_default=true;
1554 }
1555 }
1556
1557 //Verify server IDs
1558 local id_check = {};
1559 local have_dup = false;
1560
1561 foreach (idx, server in instance.servers)
1562 {
1563 if (server.is_default)
1564 {
1565 continue;
1566 }
1567
1568 local min = 0;
1569 local max = 0;
1570
1571 if ("min" in server)
1572 {
1573 min = server.min;
1574 //For further processing
1575 server.real_min<-min;
1576 }
1577 else
1578 {
1579 min = server.real_min;
1580 }
1581
1582 if ("max" in server)
1583 {
1584 max = server.max;
1585 //For further processing
1586 server.real_max<-max;
1587 }
1588 else
1589 {
1590 max = server.real_max;
1591 }
1592
1593 print("BIN "+server.bin+" srvid="+server.srvid+" max="+max);
1594
1595 for (local i=server.srvid.tointeger(); i<server.srvid.tointeger()+max.tointeger(); i++)
1596 {
1597 print("Check: "+i);
1598 if (i in id_check)
1599 {
1600 error("Got duplicate id ["+i+"] -> fallback to new assign");
1601 have_dup = true;
1602 //break; keep looping needs to setup all real_max
1603 }
1604
1605 id_check[i]<-true;
1606 }
1607
1608 }
1609
1610 //IDFREE_SPACE
1611 //Loop over the all binaries
1612 local srvid_base=1;
1613 if (have_dup || "1"== M_opt_A)
1614 {
1615 print("Assigning new numbers...");
1616 foreach (idx, server in instance.servers)
1617 {
1618 if (server.is_default)
1619 {
1620 continue;
1621 }
1622
1623 server.srvid=srvid_base;
1624 srvid_base+=(server.real_max.tointeger()+IDFREE_SPACE);
1625 }
1626 }
1627
1628 }
1629
1630 /**
1631 * prepare networking. This will update each
1632 * As we need range of ports. we will extract only ip address from.
1633 * IPv6 recognition during migration currently is not supported (just add manually
1634 * to the tpbridge)
1635 * machine with LMID with ip/host/binding port information
1636 */
1637 function prep_networking()
1638 {
1639 if (!("*NETWORK" in M_sections))
1640 {
1641 return;
1642 }
1643
1644 // Assign each ip addr/or hostname, and add networked param
1645 foreach(idx,net in M_sections["*NETWORK"].params) if (!net[0].is_default)
1646 {
1647 local net = net[0];
1648 local naddr = get_val(net, "NADDR", null)[0];
1649 print(format("parsing lmid=[%s] naddr=[%s]", net.name, naddr));
1650 M_lmidmachines[idx].networked<-true;
1651
1652 if (regexp("^//[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}:.*").match(naddr))
1653 {
1654 local ex = regexp("^//([0-9]{1,3})\\.([0-9]{1,3})\\.([0-9]{1,3})\\.([0-9]{1,3}):.*");
1655 local res = ex.capture(naddr);
1656 local ip = naddr.slice(res[1].begin,res[1].end)+"."+
1657 naddr.slice(res[2].begin,res[2].end)+"."+
1658 naddr.slice(res[3].begin,res[3].end)+"."+
1659 naddr.slice(res[4].begin,res[4].end);
1660 print("Extract by ip: "+ip);
1661 M_lmidmachines[idx].ip <- ip;
1662
1663 }
1664 else if (regexp("^//.*:.*").match(naddr))
1665 {
1666 local ex = regexp(@"^//(.*):.*");
1667 local res = ex.capture(naddr);
1668 local hostname = naddr.slice(res[1].begin,res[1].end);
1669 print("Extract by hostname: "+hostname);
1670 M_lmidmachines[idx].hostname <- hostname;
1671 }
1672 /* extract by hex */
1673 else if (regexp("^\\\\[xX]0002[0-9A-Fa-f]{12}").match(naddr)
1674 || regexp("^0[xX]0002[0-9A-Fa-f]{12}").match(naddr))
1675 {
1676 local hex_str = "";
1677
1678 if ("\\"==substr(naddr, 0, 1))
1679 {
1680 // \x0002 or \\X0002
1681 hex_str=substr(naddr, 6);
1682 }
1683 else
1684 {
1685 // 0x0002 or 0X0002
1686 hex_str=substr(naddr, 6);
1687 }
1688 print("Got hex ip: [" +hex_str+ "]");
1689
1690 local ex = regexp("^....(..)(..)(..)(..)");
1691 local res = ex.capture(naddr);
1692 local ip = hex2int(hex_str.slice(res[1].begin,res[1].end)) + "." +
1693 hex2int(hex_str.slice(res[2].begin,res[2].end)) + "." +
1694 hex2int(hex_str.slice(res[3].begin,res[3].end)) + "." +
1695 hex2int(hex_str.slice(res[4].begin,res[4].end));
1696 print("Extract by hex-ip: "+ip);
1697 M_lmidmachines[idx].ip <- ip;
1698 }
1699 else
1700 {
1701
1702 local ip_start = NET_DEFAULT_IP_START + M_lmidmachines[idx].nodeid;
1703 local ip = "192.168.88."+ip_start;
1704 print("Use default addresses: "+ip);
1705 M_lmidmachines[idx].ip <- ip;
1706 }
1707
1708 }
1709
1710 }
1711
1712
1713 /**
1714 * Function parses envfile and performs few transformations
1715 * @param obj object to add envfile data
1716 * @param object onto which to resolve the data
1717 */
1718 function prep_envfile(obj, resolv_obj)
1719 {
1720 //Key is env name, value is value
1721 obj.envfile <-{};
1722
1723 //local envfile = get_val(instance.machine, "ENVFILE", [""])[0];
1724
1725 local envfile = get_val(resolv_obj, "ENVFILE", [""])[0];
1726
1727 if (envfile!="")
1728 {
1729 print("Parsing env file ["+envfile+"]");
1730 local myfile = file(envfile,"r");
1731
1732 //Read line by line, process it
1733 while (1)
1734 {
1735
1736 local line = "";
1737 try
1738 {
1739 line = myfile.readline(1024*10);
1740 }
1741 catch (e)
1742 {
1743 myfile.close();
1744 print(e);
1745 throw e;
1746 }
1747
1748 if (line=="" && myfile.eos())
1749 {
1750 break;
1751 }
1752
1753 if (line=="")
1754 {
1755 continue;
1756 }
1757
1758 if (regexp(@"^#.*|[ \t\r]+#.*").match(line))
1759 {
1760 continue;
1761 }
1762
1763 //Process the line
1764 local ex = regexp("^(.*)=(.*)");
1765 local res = ex.capture(line);
1766
1767 if (res.len()!=3)
1768 {
1769 error("Invalid envfile line ["+line+"] - ignoring");
1770 continue;
1771 }
1772
1773 local envname = line.slice(res[1].begin,res[1].end);
1774 local envvalue = line.slice(res[2].begin,res[2].end);
1775
1776 if (envname=="FIELDTBLS32")
1777 {
1778 envname="FIELDTBLS";
1779 }
1780
1781 if (envname=="FLDTBLDIR32")
1782 {
1783 envname="FLDTBLDIR";
1784 }
1785
1786 if (envname=="VIEWDIR32")
1787 {
1788 envname="VIEWDIR";
1789 }
1790
1791 if (envname=="VIEWFILES32")
1792 {
1793 envname="VIEWFILES";
1794 }
1795
1796 //Additional processing
1797 if (envname=="FIELDTBLS")
1798 {
1799 envvalue=envvalue+",Exfields";
1800 }
1801
1802 if (envname=="FLDTBLDIR")
1803 {
1804 envvalue=envvalue+":${NDRX_HOME}/share/endurox/ubftab";
1805 }
1806
1807 print(format("Adding env from envfile [%s] = [%s]", envname, envvalue));
1808 obj.envfile[envname]<-envvalue;
1809 }
1810
1811 myfile.close();
1812 }
1813
1814 }
1815
1816 /**
1817 * Generate set file
1818 * @param instance current object of interest
1819 */
1820 function gen_set_file(instance)
1821 {
1822 //Prepare set file
1823 //Needs to set CCONFIG to the file, not the folder, if several configs
1824 //live there, it will load them all.
1825 instance.set_text <-
1826 /******************************************************************************/
1827 @"#/bin/bash
1828 #
1829 # @(#) Load this script in environment before Enduro/X start
1830 #
1831
1832 # update to correspond actual Enduro/X installation path
1833 export NDRX_HOME=/usr
1834 export NDRX_APPHOME="+instance.app_home+@"
1835 export NDRX_CCONFIG="+instance.ndrx_conf+"/"+"app."+instance.prefix+@".ini
1836 export CDPATH=$CDPATH:.:${NDRX_APPHOME}
1837 export PATH=$PATH:"+instance.ndrx_bin;
1838 /******************************************************************************/
1839
1840 //Add additional folders to the path, if exists ...
1841
1842 foreach (idx, folder in instance.appdir_add)
1843 {
1844 if (folder.use_rel)
1845 {
1846 instance.set_text+=":${NDRX_APPHOME}/"+folder.path;
1847 }
1848 else
1849 {
1850 instance.set_text+=":"+folder.path;
1851 }
1852 }
1853
1854 //To ensure that on newver version Go works correctly.
1855 if (instance.restin)
1856 {
1857 instance.set_text+="\n";
1858 instance.set_text+="export GODEBUG=\"asyncpreemptoff=1\"";
1859 }
1860
1861 if (M_wizzard.mk_mq_dev)
1862 {
1863 instance.set_text+="\n";
1864 instance.set_text=instance.set_text+@"
1865 if [ ! -d """+M_wizzard.qpath+@""" ]; then
1866 mkdir """+M_wizzard.qpath+@"""
1867 fi";
1868 }
1869
1870
1871
1872 instance.set_text+="\n";
1873
1874 print("set_file: [\n"+instance.set_text+"]");
1875 }
1876
1877 /**
1878 * Generate ini file.
1879 * This contains sections:
1880 * - global env
1881 * - cctag/group envs
1882 * - debug settings
1883 * - forward queues, if any
1884 */
1885 function gen_ini_file(instance)
1886 {
1887 //TODO: Generate IPCKEY based on NDRX_NODEID, allow several instances on the same server
1888 instance.ini_text <-
1889 /******************************************************************************/
1890 @"[@global]
1891 NDRX_CLUSTERISED=1
1892 NDRX_CMDWAIT=1
1893 NDRX_CONFIG="+instance.ndrx_conf+"/ndrxconfig."+instance.prefix+@".xml
1894 NDRX_ULOG="+instance.log_rel+@"
1895 NDRX_DMNLOG=${NDRX_ULOG}/ndrxd.log
1896 NDRX_DPID=${NDRX_APPHOME}/tmp/ndrxd."+instance.prefix+@".pid
1897 NDRX_DQMAX=100
1898 NDRX_IPCKEY="+(instance.ipckey.tointeger()+(instance.machine.nodeid-1)*100)+@"
1899 NDRX_LDBAL=0
1900 NDRX_LEV=5
1901 NDRX_LOG=${NDRX_ULOG}/xadmin.log
1902 NDRX_MSGMAX=100
1903 NDRX_MSGSIZEMAX=56000
1904 NDRX_NODEID="+instance.machine.nodeid+@"
1905 NDRX_QPATH="+M_wizzard.qpath+@"
1906 NDRX_QPREFIX=/"+instance.prefix+@"
1907 NDRX_RNDK="+rands(8)+@"
1908 NDRX_SRVMAX="+ M_resources["MAXSERVERS"][0]+@"
1909 NDRX_SVCMAX="+M_resources["MAXSERVICES"][0]+@"
1910 NDRX_TOUT="+ M_resources["SCANUNIT"][0].tointeger()
1911 * M_resources["BLOCKTIME"][0].tointeger() +@"
1912 NDRX_UBFMAXFLDS=16000
1913 NDRX_LIBEXT="+M_wizzard.shared_lib_pfx+@"
1914 # TODO, replace if not found already in env:
1915 # If present in envfile, these will be imported bellow.
1916 #FIELDTBLS=Exfields
1917 #FLDTBLDIR=${NDRX_APPHOME}/ubftab
1918 NDRX_RTSVCMAX="+instance.rtsvcmax+@"
1919 NDRX_RTCRTMAX="+M_resources["MAXRTDATA"][0]+@"
1920 ";
1921 foreach(env, val in instance.envfile)
1922 {
1923 instance.ini_text+=format("%s=%s\n", env, val);
1924 }
1925 /******************************************************************************/
1926
1927 /* per group / cctag settings: */
1928 foreach(idx,igroup in M_sections["*GROUPS"].params)
1929 {
1930 local group = igroup[0];
1931 //Get our groups
1932 if (!group.is_default &&
1933 get_val(group, "LMID", null)[0] == instance.lmid && group.plot)
1934 {
1935 local group_text ="[@global/"+group.name+"]";
1936
1937 //Support for null switches...
1938 if ("tmsname" in group && "driverlib" in group)
1939 {
1940 group_text+=
1941 @"
1942 NDRX_XA_RES_ID="+group.rmid+@"
1943 NDRX_XA_OPEN_STR="+group.openinfo+@"
1944 NDRX_XA_CLOSE_STR="+group.closeinfo+@"
1945 NDRX_XA_DRIVERLIB="+group.driverlib+@"
1946 NDRX_XA_RMLIB="+group.rmlib+@"
1947 NDRX_XA_LAZY_INIT=0";
1948 /******************************************************************************/
1949 }
1950 else if ("xaswitchname" in group && "TUXEDO/QM"==group.xaswitchname)
1951 {
1952 group_text+=
1953 @"
1954 NDRX_XA_RES_ID="+group.rmid+@"
1955 NDRX_XA_OPEN_STR="+group.openinfo+@"
1956 NDRX_XA_CLOSE_STR="+group.closeinfo+@"
1957 NDRX_XA_DRIVERLIB=libndrxxaqdisks."+M_wizzard.shared_lib_pfx+@"
1958 NDRX_XA_RMLIB=libndrxxaqdisk."+M_wizzard.shared_lib_pfx+@"
1959 NDRX_XA_LAZY_INIT=0";
1960 /******************************************************************************/
1961 }
1962 else if ("xaswitchname" in group)
1963 {
1964 group_text+=
1965 /******************************************************************************/
1966 @"
1967 NDRX_XA_RES_ID="+group.rmid+@"
1968 NDRX_XA_OPEN_STR="+group.openinfo+@"
1969 NDRX_XA_CLOSE_STR="+group.closeinfo+@"
1970 # use built in switch resolver
1971 NDRX_XA_DRIVERLIB=libndrxxatmsx."+M_wizzard.shared_lib_pfx+@"
1972 NDRX_XA_RMLIB=-
1973 NDRX_XA_LAZY_INIT=1
1974 NDRX_XA_FLAGS=RECON:*:3:100";
1975 /******************************************************************************/
1976 }
1977
1978 if ("routed" in group)
1979 {
1980 group_text+="\nNDRX_RTGRP="+group.name;
1981 }
1982
1983 //Plot any envfile imports...
1984 foreach(env, val in group.envfile)
1985 {
1986 group_text+=format("\n%s=%s", env, val);
1987 }
1988
1989 instance.ini_text+="\n"+group_text+"\n";
1990 }
1991 }
1992
1993 // Debugs per binary:
1994 // Also needs to decide is it relative path or full path...
1995 // And if it contains sub-folders (as with appdir), recursively we shall
1996 // create them.
1997 instance.ini_text+="\n"+
1998 /******************************************************************************/
1999 @"[@debug]
2000 #* - goes for all binaries not listed bellow
2001 *= ndrx=3 ubf=1 tp=3 threaded=l file=${NDRX_ULOG}/endurox.log
2002 xadmin=file=${NDRX_ULOG}/xadmin.log
2003 ndrxd=file=${NDRX_ULOG}/ndrxd.log
2004 ";
2005 /******************************************************************************/
2006
2007 //Plot server debug only once.
2008 local server_plotted={};
2009 foreach (idx, server in instance.servers) if (!server.is_default)
2010 {
2011 if (!(server.bin in server_plotted))
2012 {
2013 //Add debug entry.
2014 //TODO: no srvid for cpmsrv, tpevsrv
2015 instance.ini_text+=format("%s=file=${NDRX_ULOG}/%s.${NDRX_SVSRVID}.log\n",
2016 server.bin, server.bin);
2017 server_plotted[server.bin] <-true;
2018 }
2019 }
2020
2021 if (instance.restin)
2022 {
2023 instance.ini_text+=format("restincl=file=${NDRX_ULOG}/restincl.rin1.log\n");
2024 }
2025
2026 //Process the queues now
2027 foreach(idx,igroup in M_sections["*GROUPS"].params)
2028 {
2029 //Single group only supported...
2030 local group = igroup[0];
2031 //Get our groups
2032 if ( !group.is_default && (get_val(group, "LMID", null)[0] == instance.lmid)
2033 && "qspace" in group)
2034 {
2035 instance.ini_text+="\n"+
2036 @"[@queue/"+group.name+@"]
2037 # Review as necessary, see q.conf man page for details
2038 @=svcnm=@,autoq=n,tries=3,waitinit=0,waitretry=30,waitretrymax=90,memonly=n,mode=fifo,workers=1
2039 ";
2040 //Add forward queues
2041 foreach (indx,qq in group.auto_queues)
2042 {
2043 instance.ini_text+=format("%s=autoq=y", indx, indx);
2044 if (qq.workers!="1")
2045 {
2046 instance.ini_text+=format(",workers=%s", qq.workers);
2047 }
2048
2049 if (qq.trantime!="-1")
2050 {
2051 instance.ini_text+=format(",txtout=%s", qq.trantime);
2052 }
2053 instance.ini_text+="\n";
2054 }
2055 }
2056 }
2057
2058 //Add restin config, if WSL/JOLT seen
2059 if (instance.restin)
2060 {
2061 instance.ini_text+="\n"+
2062 @"[@restin]
2063 defaults={""errors"":""json2ubf"", ""conv"":""json2ubf""}
2064
2065 # Instance 1, see restincl manpage for the web service formats
2066 [@restin/RIN1]
2067 port=8080
2068 ip=0.0.0.0
2069 # invoke by: http://this.host:8080/SOME_SERVICE1
2070 /SOME_SERVICE1={""svc"":""SOME_SERVICE1""}
2071 /SOME_SERVICE2={""svc"":""SOME_SERVICE2""}
2072 ";
2073 }
2074
2075 print("app.ini: [\n"+instance.ini_text+"]");
2076
2077 }
2078
2079 /**
2080 * Generate Enduro/X final XML configuration
2081 */
2082 function gen_xml_file(instance)
2083 {
2084 /* global settings */
2085 instance.xml_text <-
2086 /******************************************************************************/
2087 @"<?xml version=""1.0"" ?>
2088 <endurox>
2089 <appconfig>
2090 <sanity>1</sanity>
2091 <brrefresh>5</brrefresh>
2092 <restart_min>1</restart_min>
2093 <restart_step>1</restart_step>
2094 <restart_max>5</restart_max>
2095 <restart_to_check>20</restart_to_check>
2096 <gather_pq_stats>Y</gather_pq_stats>
2097 </appconfig>
2098 ";
2099 /******************************************************************************/
2100
2101 //Process binary by binary...
2102 local servers_open = false;
2103 local tab="";
2104 foreach (idx, server in instance.servers)
2105 {
2106 if (server.is_default)
2107 {
2108
2109 if (servers_open)
2110 {
2111 instance.xml_text+=
2112 " </servers>\n";
2113 tab="";
2114 servers_open=false;
2115 }
2116 instance.xml_text+=
2117 " <defaults>\n";
2118 }
2119 else
2120 {
2121 if (!servers_open)
2122 {
2123 instance.xml_text+=
2124 " <servers>\n";
2125 servers_open=true;
2126 tab=" ";
2127 }
2128 instance.xml_text+=
2129 tab+" <server name=\""+server.bin+"\">\n";
2130 }
2131
2132 if ("autokill" in server)
2133 {
2134 instance.xml_text+=
2135 tab+" <autokill>"+server.autokill+"</autokill>\n";
2136 }
2137
2138 if ("start_max" in server)
2139 {
2140 instance.xml_text+=
2141 tab+" <start_max>"+server.start_max+"</start_max>\n";
2142 }
2143
2144 if ("pingtime" in server)
2145 {
2146 instance.xml_text+=
2147 tab+" <pingtime>"+server.pingtime+"</pingtime>\n";
2148 }
2149
2150 if ("ping_max" in server)
2151 {
2152 instance.xml_text+=
2153 tab+" <ping_max>"+server.ping_max+"</ping_max>\n";
2154 }
2155
2156 if ("end_max" in server)
2157 {
2158 instance.xml_text+=
2159 tab+" <end_max>"+server.end_max+"</end_max>\n";
2160 }
2161
2162 if ("killtime" in server)
2163 {
2164 instance.xml_text+=
2165 tab+" <killtime>"+server.killtime+"</killtime>\n";
2166 }
2167
2168 if ("cctag" in server)
2169 {
2170 if (server.cctag=="/")
2171 {
2172 instance.xml_text+=
2173 tab+" <cctag/>\n";
2174 }
2175 else
2176 {
2177 instance.xml_text+=
2178 tab+" <cctag>"+server.cctag+"</cctag>\n";
2179 }
2180 }
2181
2182 if ("min" in server)
2183 {
2184 instance.xml_text+=
2185 tab+" <min>"+server.min+"</min>\n";
2186 }
2187
2188 if ("max" in server)
2189 {
2190 instance.xml_text+=
2191 tab+" <max>"+server.max+"</max>\n";
2192 }
2193
2194 if ("srvid" in server)
2195 {
2196 instance.xml_text+=
2197 tab+" <srvid>"+server.srvid+"</srvid>\n";
2198 }
2199
2200 if ("env" in server)
2201 {
2202 instance.xml_text+=
2203 tab+" <env>"+server.env+"</env>\n";
2204 }
2205
2206 if ("rqaddr" in server)
2207 {
2208 instance.xml_text+=
2209 tab+" <rqaddr>"+server.rqaddr+"</rqaddr>\n";
2210 }
2211
2212 if ("respawn" in server)
2213 {
2214 instance.xml_text+=
2215 tab+" <respawn>"+server.respawn+"</respawn>\n";
2216 }
2217
2218 if ("mindispatchthreads" in server)
2219 {
2220 instance.xml_text+=
2221 tab+" <mindispatchthreads>"+server.mindispatchthreads+"</mindispatchthreads>\n";
2222 }
2223
2224 if ("maxdispatchthreads" in server)
2225 {
2226 instance.xml_text+=
2227 tab+" <maxdispatchthreads>"+server.maxdispatchthreads+"</maxdispatchthreads>\n";
2228 }
2229
2230 if ("sysopt" in server)
2231 {
2232 instance.xml_text+=
2233 tab+" <sysopt>"+server.sysopt+"</sysopt>\n";
2234 }
2235
2236 if ("appopt" in server && server.appopt!="")
2237 {
2238 instance.xml_text+=
2239 tab+" <appopt>"+server.appopt+"</appopt>\n";
2240 }
2241
2242 if (!server.is_default && "threadstacksize" in server)
2243 {
2244 instance.xml_text+=
2245 tab+@" <envs><env name=""NDRX_THREADSTACKSIZE"">"+server.threadstacksize+"</env></envs>\n";
2246 }
2247
2248 if (server.is_default)
2249 {
2250 instance.xml_text+=
2251 tab+" </defaults>\n";
2252 }
2253 else
2254 {
2255 instance.xml_text+=
2256 tab+" </server>\n";
2257 }
2258
2259 }
2260
2261 //Terminate the servers if was open..
2262 if (servers_open)
2263 {
2264 instance.xml_text+=
2265 " </servers>\n";
2266 tab="";
2267 servers_open=false;
2268 }
2269
2270 //Write client (restin) if needed.
2271 if (instance.restin)
2272 {
2273 instance.xml_text+=
2274 @" <clients>
2275 <client cmdline=""restincl"">
2276 <exec tag=""RESTIN"" autostart=""Y"" subsect=""RIN1"" cctag=""RIN1"" log=""${NDRX_ULOG}/restincl.rin1.log""/>
2277 </client>
2278 </clients>
2279 ";
2280 }
2281
2282 //Prepare services.
2283 if (instance.rtservices > 0)
2284 {
2285 //Only once
2286 local service_once={};
2287 /* global settings */
2288 instance.xml_text+=" <services>\n";
2289
2290 //OK, plot services section.
2291 foreach(idx,service in M_sections["*SERVICES"].order)
2292 {
2293 local svcok = false;
2294
2295 if (!service.is_default)
2296 {
2297 if (service_once)
2298 local range = {};
2299 local service_srvgrp = get_val(service, "SRVGRP", "");
2300
2301 //Service may have no group -> affect all
2302 if (service_srvgrp =="")
2303 {
2304 svcok=true;
2305 }
2306 else
2307 {
2308 local group_lmid = get_val(
2309 M_sections["*GROUPS"].params[service_srvgrp[0]][0], "LMID", null)[0];
2310
2311 if (group_lmid == instance.lmid)
2312 {
2313 svcok=true;
2314 }
2315 }
2316 }
2317 else
2318 {
2319 svcok=true;
2320 }
2321
2322 if (svcok)
2323 {
2324 if (service.name in service_once)
2325 {
2326 //Skip this, already exported
2327 continue;
2328 }
2329
2330 service_once[service.name]<-true;
2331
2332 if (service.is_default)
2333 {
2334 instance.xml_text+=" <defaults";
2335 }
2336 else
2337 {
2338 instance.xml_text+=" <service svcnm=\""+service.name+"\"";
2339 }
2340
2341 if ("PRIO" in service.keywords)
2342 {
2343 instance.xml_text+=" prio=\""+service.keywords["PRIO"][0]+"\"";
2344 }
2345
2346 if ("ROUTING" in service.keywords)
2347 {
2348 instance.xml_text+=" routing=\""+service.keywords["ROUTING"][0]+"\"";
2349 }
2350
2351 if ("AUTOTRAN" in service.keywords)
2352 {
2353 instance.xml_text+=" autotran=\""+service.keywords["AUTOTRAN"][0]+"\"";
2354 }
2355
2356 if ("TRANTIME" in service.keywords)
2357 {
2358 instance.xml_text+=" trantime=\""+service.keywords["TRANTIME"][0]+"\"";
2359 }
2360
2361 //close the tag
2362 instance.xml_text+="/>\n";
2363 }
2364 }
2365
2366 instance.xml_text+=" </services>\n";
2367
2368 }
2369
2370 //Prepare routing
2371
2372 if ("*ROUTING" in M_sections)
2373 {
2374 local rt_started=false;
2375
2376 foreach(idx,route in M_sections["*ROUTING"].params)
2377 {
2378 //Single routing supported only (afaik Tuxedo also has requires single route)
2379 route = route[0];
2380 local field = get_val(route, "FIELD", null)[0];
2381 local fieldtype = get_val(route, "FIELDTYPE", ["STRING"])[0];
2382 local ranges = get_val(route, "RANGES", null)[0];
2383 local buftype = get_val(route, "BUFTYPE", null)[0];
2384
2385 if (buftype!="FML32" && buftype!="FML")
2386 {
2387 error(format("Ignoring route [%s] as buffer type [%s] not supported",
2388 route.name, buftype));
2389 continue;
2390 }
2391
2392 if (!rt_started)
2393 {
2394 instance.xml_text+=" <routing>\n";
2395 rt_started=true;
2396 }
2397
2398 instance.xml_text+=
2399 @" <route routing="""+route.name+@""">
2400 <field>"+field+@"</field>
2401 <ranges>"+ranges+@"</ranges>
2402 <buftype>UBF</buftype>
2403 </route>
2404 ";
2405
2406 }
2407
2408 if (rt_started)
2409 {
2410 instance.xml_text+=" </routing>\n";
2411 }
2412
2413 }
2414
2415 instance.xml_text+="</endurox>\n";
2416
2417 print("ndrxconfig.ini: [\n"+instance.xml_text+"]");
2418
2419 }
2420
2421 /**
2422 * Write configuration files to the disk
2423 */
2424 function write_files()
2425 {
2426 local config_exists = false;
2427 //Verify files (ask for overwrite, if any missing)
2428 foreach (idx, instance in M_instances)
2429 {
2430 if (fileexists(instance.set_file))
2431 {
2432 //print file name
2433 print_stdout("Configuration file exists: ["+instance.set_file+"]\n");
2434 config_exists = true;
2435 }
2436
2437 if (fileexists(instance.ini_file))
2438 {
2439 //print file name
2440 print_stdout("Configuration file exists: ["+instance.ini_file+"]\n");
2441 config_exists = true;
2442 }
2443
2444 if (fileexists(instance.xml_file))
2445 {
2446 //print file name
2447 print_stdout("Configuration file exists: ["+instance.xml_file+"]\n");
2448 config_exists = true;
2449 }
2450 }
2451
2452 //Ask for confirmation, if needed
2453 if (config_exists
2454 && M_opt_y != "1"
2455 && 0==chk_confirm("Really overwrite Enduro/X configuration files?"))
2456 {
2457 print("Aborted configuration file generation");
2458 return;
2459 }
2460
2461 //TODO: Create folders (if any missing, log created)
2462 //Firstly transfer the hashmap to array, sort the array, so that shortest
2463 //folders come first as we are going to create them (if not exists)
2464
2465 local dirs = [];
2466
2467 foreach (idx, dir in M_folder_gen)
2468 {
2469 dir.name<-idx;
2470 dirs.append(dir);
2471 }
2472
2473 //sort
2474 dirs.sort(@(a,b) a.name.len() <=> b.name.len());
2475
2476 //Check dirs...
2477 foreach (idx, dir in dirs)
2478 {
2479 print("Creating directory ["+dir.name+"]...");
2480 mkdir(dir.name);
2481 dir.created<-true;
2482 }
2483
2484 //Write files
2485 foreach (idx, instance in M_instances)
2486 {
2487 try
2488 {
2489 local out = file(instance.set_file,"w");
2490 out.writes(instance.set_text);
2491 out.close();
2492 }
2493 catch (e)
2494 {
2495 print(e);
2496 throw(format("Cannot write [%s] file", instance.set_file));
2497 }
2498
2499 M_files_gen.append(instance.set_file);
2500
2501 //Give chmod to execute the file
2502 chmod(instance.set_file, "755");
2503
2504 try
2505 {
2506 local out = file(instance.ini_file,"w");
2507 out.writes(instance.ini_text);
2508 out.close();
2509 }
2510 catch (e)
2511 {
2512 print(e);
2513 throw(format("Cannot write [%s] file", instance.ini_file));
2514 }
2515
2516 M_files_gen.append(instance.ini_file);
2517
2518 try
2519 {
2520 local out = file(instance.xml_file,"w");
2521 out.writes(instance.xml_text);
2522 out.close();
2523 }
2524 catch (e)
2525 {
2526 print(e);
2527 throw(format("Cannot write [%s] file", instance.xml_file));
2528 }
2529
2530 M_files_gen.append(instance.xml_file);
2531 }
2532
2533 }
2534
2535 /**
2536 * General validation
2537 */
2538 function validate()
2539 {
2540 local grp_nos = {};
2541
2542 //Validate that groups are unique
2543 foreach(idx, group in M_sections["*GROUPS"].order) if (!group.is_default)
2544 {
2545 local no = get_val(group, "GRPNO", null)[0];
2546
2547 if (no in grp_nos)
2548 {
2549 throw("Duplicate GRPNO="+no+" for group ["+group.name+"]");
2550 }
2551 else
2552 {
2553 grp_nos[no]<-"1";
2554 }
2555 }
2556 }
2557 /**
2558 * Generate Enduro/X configs. Callback from ubb2ex. At this point
2559 * all configs are parsed.
2560 */
2561 function ex_generate(arg)
2562 {
2563 local nodeid=1;
2564
2565 print(format("M_opt_n=[%s]", M_opt_n));
2566 print(format("M_opt_y=[%s]", M_opt_y));
2567 print(format("M_opt_L=[%s]", M_opt_L));
2568 print(format("M_opt_A=[%s]", M_opt_A));
2569 print(format("M_opt_P=[%s]", M_opt_P));
2570 print(format("M_opt_O=[%s]", M_opt_O));
2571
2572 M_port_mul = M_opt_O.tointeger();
2573 if ("./"==substr(M_opt_P, 0, 2))
2574 {
2575 M_opt_P = getcwd() + "/" +substr(M_opt_P, 2);
2576 }
2577 else if (M_opt_P!="" && "/"!=substr(M_opt_P, 0, 1))
2578 {
2579 //This is relative too
2580 M_opt_P = getcwd() + "/" + M_opt_P;
2581 }
2582
2583 print(format("M_opt_P=[%s]", M_opt_P));
2584
2585 init();
2586 validate();
2587
2588 //Open output objects for each of the machine
2589 if (!("*MACHINES" in M_sections))
2590 {
2591 print("No machines defined");
2592 return;
2593 }
2594
2595 foreach(idx,val in M_sections["*MACHINES"].params) if (!val[0].is_default)
2596 {
2597 val[0].nodeid<-nodeid;
2598 val[0].networked<-false;
2599 nodeid++;
2600
2601 //Map LMID:MACHINE
2602 local lmid = get_val(val[0], "LMID", null)[0];
2603 M_lmidmachines[lmid]<-val[0];
2604 }
2605
2606 //Assign networking identifiers
2607 prep_networking();
2608
2609 //Generate each node
2610 foreach(idx,val in M_sections["*MACHINES"].params)
2611 {
2612 local machine = val[0];
2613
2614 if (machine.is_default)
2615 {
2616 continue;
2617 }
2618
2619 local instance = {};
2620 instance.name <- machine.name;
2621 instance.lmid <- machine.keywords.LMID[0];
2622
2623 if (M_opt_L!="" && instance.lmid!=M_opt_L)
2624 {
2625 print("Skipping node ["+instance.lmid+"] as -l present");
2626 continue;
2627 }
2628
2629 M_instances[machine.name]<-instance;
2630
2631 instance.restin <-false;
2632 instance.xa_used<-false;
2633 print("Machine: "+instance.name);
2634
2635 local ubb_appdir = get_val(val[0], "APPDIR", null)[0];
2636 local ubb_tuxconfig = get_val(val[0], "TUXCONFIG", null)[0];
2637
2638 if (M_opt_P!="" && M_opt_P!="/")
2639 {
2640 ubb_tuxconfig = M_opt_P + ubb_tuxconfig;
2641 }
2642
2643 local ubb_appdir_add = [];
2644
2645 if (null!=ubb_appdir.find(":"))
2646 {
2647 //Split the ubb_appdir and use first part for ubb_appdir purposes.
2648 //the other parts later would be checked for relative/full type
2649 //and would be filled to ubb_appdir_add variable.
2650
2651 local appdirs = split(ubb_appdir, ":");
2652 ubb_appdir = appdirs[0];
2653
2654 local len = appdirs.len();
2655
2656 for (local i=1; i<len; i++)
2657 {
2658 local add = {};
2659
2660 if (M_opt_P!="" && M_opt_P!="/")
2661 {
2662 add.orgpath<- (M_opt_P + appdirs[i]);
2663 }
2664 else
2665 {
2666 add.orgpath<- appdirs[i];
2667 }
2668
2669 ubb_appdir_add.append(add);
2670 }
2671 }
2672
2673 if (M_opt_P!="" && M_opt_P!="/")
2674 {
2675 ubb_appdir = M_opt_P + ubb_appdir;
2676 }
2677
2678 instance.prefix <- machine.keywords["LMID"][0].tolower();
2679
2680 instance.app_home <- dircommon(ubb_appdir, ubb_tuxconfig);
2681 instance.use_rel <- false;
2682
2683 if (instance.app_home==M_opt_P)
2684 {
2685 //this is absolute path
2686 instance.app_home = ubb_appdir;
2687 instance.app_bin <- ubb_appdir;
2688 instance.app_conf <- dirname(ubb_tuxconfig);
2689
2690 instance.ndrx_conf<- instance.app_conf;
2691 instance.ndrx_bin <- instance.app_bin;
2692
2693 instance.set_file <-format("%s/set%s", instance.app_conf, instance.prefix);
2694 instance.xml_file <-format("%s/ndrxconfig.%s.xml", instance.app_conf, instance.prefix);
2695 instance.ini_file <-format("%s/app.%s.ini", instance.app_conf, instance.prefix);
2696
2697 }
2698 else
2699 {
2700 if (instance.app_home=="")
2701 {
2702 //Exception case, not nice..
2703 instance.app_home="/";
2704 }
2705
2706 instance.app_bin <- dirdiff(ubb_appdir, instance.app_home);
2707
2708 instance.app_conf <- dirdiff(dirname(ubb_tuxconfig),
2709 instance.app_home);
2710
2711 instance.use_rel = true;
2712
2713 //Prepare further folders
2714
2715 local conf_full = instance.app_home;
2716 if (instance.app_conf!="")
2717 {
2718 instance.ndrx_conf <- "${NDRX_APPHOME}/"+instance.app_conf;
2719 conf_full+="/"+instance.app_conf;
2720 }
2721 else
2722 {
2723 instance.ndrx_conf <- "${NDRX_APPHOME}";
2724 }
2725
2726 if (instance.app_bin!="")
2727 {
2728 instance.ndrx_bin <- "${NDRX_APPHOME}/"+instance.app_bin;
2729 }
2730 else
2731 {
2732 instance.ndrx_bin <- "${NDRX_APPHOME}";
2733 }
2734
2735 instance.set_file <-format("%s/set%s", conf_full, instance.prefix);
2736 instance.xml_file <-format("%s/ndrxconfig.%s.xml", conf_full, instance.prefix);
2737 instance.ini_file <-format("%s/app.%s.ini", conf_full, instance.prefix);
2738 }
2739
2740 //Process additional folders
2741 foreach(dix, folder in ubb_appdir_add)
2742 {
2743 local common = dircommon(folder.orgpath, instance.app_home)
2744
2745 //Schedule folder anyway
2746 //M_folder_gen[folder.orgpath]<-{};
2747 schedule_directory(folder.orgpath);
2748
2749 if (common!=instance.app_home)
2750 {
2751 //Use full path ..
2752 folder.path <- folder.orgpath;
2753 folder.use_rel <- false;
2754 }
2755 else
2756 {
2757 folder.path <- dirdiff(folder.orgpath, instance.app_home);
2758 folder.use_rel <- true;
2759 }
2760 }
2761
2762 //Additional folders to be added
2763 instance.appdir_add <- ubb_appdir_add;
2764 instance.nodeid <- machine.nodeid;
2765
2766 if (regexp("^0x.*|^0X.*").match(M_resources["IPCKEY"][0]))
2767 {
2768 instance.ipckey <- substr(M_resources["IPCKEY"][0], 2);
2769 }
2770 else
2771 {
2772 instance.ipckey <- M_resources["IPCKEY"][0]
2773 }
2774
2775 //Resolve logging directory also... full or relative against the home
2776 //TODO: Add support for TLOGDEVICE
2777 //Move processing to the function, which would retun full path
2778 //Or relative path, or prefered default.
2779 local log_dir = get_val(val[0], "ULOGPFX", [""])[0];
2780
2781 if (log_dir=="")
2782 {
2783 instance.log_full <- instance.app_home + "/log";
2784 instance.log_rel <- "${NDRX_APPHOME}/log";
2785 }
2786 else
2787 {
2788 //Add prefix
2789 if (M_opt_P!="" && M_opt_P!="/")
2790 {
2791 log_dir = M_opt_P + log_dir;
2792 }
2793
2794 //Just take dirname
2795 log_dir = dirname(log_dir);
2796
2797 if (log_dir==instance.app_home)
2798 {
2799 //Use log anyway
2800 instance.log_full <- instance.app_home + "/log";
2801 instance.log_rel <- "${NDRX_APPHOME}/log";
2802 }
2803 else
2804 {
2805 local common = dircommon(log_dir, instance.app_home);
2806 local diff = dirdiff(log_dir, instance.app_home);
2807
2808 if (common!=instance.app_home)
2809 {
2810 //use full path to logs...
2811 instance.log_full <- log_dir;
2812 instance.log_rel <- log_dir;
2813 }
2814 else
2815 {
2816 //Relative to app home with log suffix...
2817 if (diff!="")
2818 {
2819 instance.log_full <- instance.app_home+"/"+diff;
2820 instance.log_rel <- "${NDRX_APPHOME}/"+diff;
2821 }
2822 else
2823 {
2824 instance.log_full <- instance.app_home;
2825 instance.log_rel <- "${NDRX_APPHOME}";
2826 }
2827 }
2828 }
2829 }
2830
2831 //Dump some config
2832 print("use_rel = "+instance.use_rel);
2833 print("nodeid = "+instance.nodeid);
2834 print("prefix = "+instance.prefix);
2835 print("app_home = "+instance.app_home);
2836 print("app_conf = "+instance.app_conf);
2837 print("app_bin = "+instance.app_bin);
2838
2839 print("set_file = "+instance.set_file);
2840 print("ini_file = "+instance.ini_file);
2841 print("xml_file = "+instance.xml_file);
2842
2843 //Load Tuxedo defaults
2844 if (!("BLOCKTIME" in M_resources))
2845 {
2846 M_resources["BLOCKTIME"]<-[];
2847 M_resources["BLOCKTIME"].append("6");
2848 }
2849
2850 if (!("SCANUNIT" in M_resources))
2851 {
2852 M_resources["SCANUNIT"]<-[];
2853 M_resources["SCANUNIT"].append("10");
2854 }
2855
2856 //Use Enduro/X default 20K
2857 if (!("MAXSERVICES" in M_resources) || M_resources["MAXSERVICES"][0].tointeger() < 20000)
2858 {
2859 M_resources["MAXSERVICES"]<-[];
2860 M_resources["MAXSERVICES"].append("20000");
2861 }
2862
2863 if (!("MAXSERVERS" in M_resources) || M_resources["MAXSERVERS"][0].tointeger() < 10000)
2864 {
2865 M_resources["MAXSERVERS"]<-[];
2866 M_resources["MAXSERVERS"].append("10000");
2867 }
2868
2869 if (!("MAXSERVERS" in M_resources) || M_resources["MAXSERVERS"][0].tointeger() < 10000)
2870 {
2871 M_resources["MAXSERVERS"]<-[];
2872 M_resources["MAXSERVERS"].append("10000");
2873 }
2874
2875 //Default transaction time
2876 if (!("MAXTRANTIME" in M_resources))
2877 {
2878 M_resources["MAXTRANTIME"]<-[];
2879 M_resources["MAXTRANTIME"].append("2147483647");
2880 }
2881
2882 if (!("MAXRTDATA" in M_resources) || M_resources["MAXRTDATA"][0].tointeger() < 102400)
2883 {
2884 M_resources["MAXRTDATA"]<-[];
2885 M_resources["MAXRTDATA"].append("102400");
2886 }
2887
2888 //Calculate number of routing services, if any.
2889 instance.rtservices<-get_routing_services(instance);
2890
2891 //resources setting
2892 instance.rtsvcmax<-get_routing_services(instance);
2893
2894 //Use fault, if not any set.
2895 if (instance.rtsvcmax == 0)
2896 {
2897 instance.rtsvcmax = 1000;
2898 }
2899 else
2900 {
2901 //For performance reasons use more.
2902 instance.rtsvcmax *=2;
2903 }
2904
2905 //Schedule system folders
2906 schedule_directory(instance.app_home);
2907 schedule_directory(instance.log_full);
2908 schedule_directory(instance.app_home+"/tmp");
2909
2910 if (instance.use_rel)
2911 {
2912 schedule_directory(instance.app_home+"/"+instance.app_bin);
2913 schedule_directory(instance.app_home+"/"+instance.app_conf);
2914 }
2915 else
2916 {
2917 //M_folder_gen[instance.app_conf]<-{};
2918 schedule_directory(instance.app_conf);
2919 }
2920
2921 instance.machine<-machine;
2922
2923 //Process groups
2924 prep_groups(instance);
2925
2926 //Prepare ranges
2927 prep_free_ranges(instance);
2928
2929 //Build server listings
2930 prep_servers(instance);
2931
2932 //Load the instance environment
2933 prep_envfile(instance, instance.machine);
2934
2935 //Generate file contents:
2936 gen_set_file(instance);
2937 gen_ini_file(instance);
2938 gen_xml_file(instance);
2939 }
2940
2941 if (M_opt_n!="1")
2942 {
2943 write_files();
2944 }
2945 }
2946
2947 /**
2948 * Cleanup call when script have failed.
2949 * @param arg not used
2950 */
2951 function ex_cleanup(arg)
2952 {
2953 print("File cleanup...");
2954 //Remove created files
2955 foreach (idx, file in M_files_gen)
2956 {
2957 try
2958 {
2959 print("Removing ["+file+"]");
2960 unlink(file);
2961 }
2962 catch (e)
2963 {
2964 error(e);
2965 }
2966 }
2967
2968 print("Folder cleanup...");
2969 //Remove created directories...
2970 local dirs = [];
2971
2972 foreach (idx, dir in M_folder_gen)
2973 {
2974 dir.name<-idx;
2975 dirs.append(dir);
2976 }
2977
2978 //sort, reverse order, deepest dir first...
2979 dirs.sort(@(a,b) b.name.len() <=>a.name.len());
2980
2981 //Check dirs...
2982 foreach (idx, dir in dirs)
2983 {
2984 if ("created" in dir)
2985 {
2986 print("Removing directory ["+dir.name+"]...");
2987 try
2988 {
2989 rmdir(dir.name);
2990 }
2991 catch (e)
2992 {
2993 //Just print error..
2994 error(e);
2995 }
2996 }
2997 }
2998 }