index
int64
0
0
repo_id
stringclasses
93 values
file_path
stringlengths
15
128
content
stringlengths
14
7.05M
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/set.hpp
#ifndef PSEUDO_SET_HPP #define PSEUDO_SET_HPP /* Copyright (C) 2018 Xavier Andrade This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <fstream> #include <map> #include <string> #include "element.hpp" #include "detect_format.hpp" #include "psml.hpp" #include "psp8.hpp" #include "qso.hpp" #include "upf1.hpp" #include "upf2.hpp" #include <dirent.h> #include <iostream> namespace pseudopotential { class set { private: struct element_values { std::string file_path_; int lmax_; int llocal_; }; typedef std::map<std::string, element_values> element_map; element_map map_; bool automatic_; public: set(const std::string &dirname) { DIR *dir = opendir(dirname.c_str()); struct dirent *ent; while ((ent = readdir(dir)) != NULL) { const std::string filename(ent->d_name); const std::string fullname = dirname + "/" + filename; if (filename == "." || filename == "..") continue; pseudopotential::format format = detect_format(fullname); if (format == pseudopotential::format::FILE_NOT_FOUND || format == pseudopotential::format::UNKNOWN) continue; // we open the pseudo just to get the species symbol, this could be done // in a better way pseudopotential::base *pseudo = NULL; std::string symbol; switch (format) { case pseudopotential::format::QSO: pseudo = new pseudopotential::qso(fullname); break; case pseudopotential::format::UPF1: pseudo = new pseudopotential::upf1(fullname, /*uniform_grid = */ true); break; case pseudopotential::format::UPF2: pseudo = new pseudopotential::upf2(fullname, /*uniform_grid = */ true); break; case pseudopotential::format::PSML: pseudo = new pseudopotential::psml(fullname, /*uniform_grid = */ true); break; case pseudopotential::format::PSP8: pseudo = new pseudopotential::psp8(fullname); break; default: // get the symbol from the name for (int ii = 0; ii < 3; ii++) { char cc = filename[ii]; bool is_letter = (cc >= 'a' && cc <= 'z') || (cc >= 'A' && cc <= 'Z'); if (!is_letter) break; symbol.push_back(cc); } } if (pseudo) symbol = pseudo->symbol(); delete pseudo; element_values vals; vals.file_path_ = fullname; vals.lmax_ = INVALID_L; vals.llocal_ = INVALID_L; map_[symbol] = vals; } std::ifstream defaults_file((dirname + "/set_defaults").c_str()); if (defaults_file) { std::string line; // first line are comments getline(defaults_file, line); while (true) { std::string symbol; defaults_file >> symbol; if (defaults_file.eof()) break; if (has(symbol)) { int z; std::string fname; defaults_file >> fname; defaults_file >> z; defaults_file >> map_[symbol].lmax_; defaults_file >> map_[symbol].llocal_; } getline(defaults_file, line); } defaults_file.close(); } closedir(dir); } bool has(const element &el) const { return map_.find(el.symbol()) != map_.end(); } const std::string &file_path(const element &el) const { return map_.at(el.symbol()).file_path_; } int lmax(const element &el) const { return map_.at(el.symbol()).lmax_; } int llocal(const element &el) const { return map_.at(el.symbol()).llocal_; } // Iterator interface class iterator { private: element_map::iterator map_it_; public: iterator(const element_map::iterator &map_it) : map_it_(map_it) {} iterator &operator++() { ++map_it_; return *this; } friend bool operator!=(const iterator &a, const iterator &b) { return a.map_it_ != b.map_it_; } element operator*() { return element(map_it_->first); } }; iterator begin() { return iterator(map_.begin()); } iterator end() { return iterator(map_.end()); } }; } // namespace pseudopotential #endif
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/share_directory.hpp
#ifndef PSEUDO_SHARE_DIRECTORY_HPP #define PSEUDO_SHARE_DIRECTORY_HPP /* Copyright (C) 2018 Xavier Andrade This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <string> #include "config.h" namespace pseudopotential { class share_directory { public: static void set(const std::string &dir) { directory() = dir; } static std::string get() { if (directory().size() != 0) return directory(); return SHARE_DIR; } private: static std::string &directory() { static std::string directory_; return directory_; } }; } // namespace pseudopotential #endif // Local Variables: // mode: c++ // coding: utf-8 // End:
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/share_directory_low.cc
#include "share_directory.hpp" #include "string_f.h" /* fortran <-> c string compatibility issues */ #include "fortran_types.h" extern "C" void FC_FUNC_(share_directory_set, SHARE_DIRECTORY_SET)(STR_F_TYPE dir_f STR_ARG1) { char *dir_c; TO_C_STR1(dir_f, dir_c); pseudopotential::share_directory::set(dir_c); free(dir_c); }
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/spline.cc
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2013, Lawrence Livermore National Security, LLC. // qb@ll: Qbox at Lawrence Livermore // // This file is part of qb@ll. // // Produced at the Lawrence Livermore National Laboratory. // Written by Erik Draeger (draeger1@llnl.gov) and Francois Gygi // (fgygi@ucdavis.edu). Based on the Qbox code by Francois Gygi Copyright (c) // 2008 LLNL-CODE-635376. All rights reserved. // // qb@ll is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details, in the file COPYING in the // root directory of this distribution or <http://www.gnu.org/licenses/>. // #include <config.h> /******************************************************************************* * * spline.c * ******************************************************************************/ #include "spline.h" #include <assert.h> // ewd DEBUG #include <iostream> void spline(const double *x, const double *y, int n, double yp1, double ypn, double *y2) { int i, k; double p, qn, sig, un, *u = new double[n]; if (yp1 >= 1.e30) { y2[0] = 0.0; u[0] = 0.0; } else { y2[0] = -0.5; assert(x[1] - x[0] > 0.0); u[0] = (3.0 / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - yp1); } for (i = 1; i < n - 1; i++) { assert(x[i + 1] > x[i]); sig = (x[i] - x[i - 1]) / (x[i + 1] - x[i - 1]); p = sig * y2[i - 1] + 2.0; y2[i] = (sig - 1.0) / p; u[i] = (6.0 * ((y[i + 1] - y[i]) / (x[i + 1] - x[i]) - (y[i] - y[i - 1]) / (x[i] - x[i - 1])) / (x[i + 1] - x[i - 1]) - sig * u[i - 1]) / p; } if (ypn >= 1.e30) { qn = 0.0; un = 0.0; } else { qn = 0.5; un = (3.0 / (x[n - 1] - x[n - 2])) * (ypn - (y[n - 1] - y[n - 2]) / (x[n - 1] - x[n - 2])); } y2[n - 1] = (un - qn * u[n - 2]) / (qn * y2[n - 2] + 1.0); for (k = n - 2; k >= 0; k--) { y2[k] = y2[k] * y2[k + 1] + u[k]; } delete[] u; } void splint(const double *xa, const double *ya, const double *y2a, int n, double x, double *y) { int k, khi, klo; double a, b, h; klo = 0; khi = n - 1; while (khi - klo > 1) { k = (khi + klo) / 2; if (xa[k] > x) khi = k; else klo = k; } // ewd DEBUG if (khi > n - 1) { std::cout << "ERROR.SPLINT: khi = " << khi << ", n = " << n << std::endl; return; } if (klo > n - 1) { std::cout << "ERROR.SPLINT: klo = " << klo << ", n = " << n << std::endl; return; } h = xa[khi] - xa[klo]; assert(h > 0.0); a = (xa[khi] - x) / h; b = (x - xa[klo]) / h; *y = a * ya[klo] + b * ya[khi] + h * h * (1.0 / 6.0) * ((a * a * a - a) * y2a[klo] + (b * b * b - b) * y2a[khi]); } void splintd(const double *xa, const double *ya, const double *y2a, int n, double x, double *y, double *dy) { int k, khi, klo; double a, b, h; klo = 0; khi = n - 1; while (khi - klo > 1) { k = (khi + klo) / 2; if (xa[k] > x) khi = k; else klo = k; } h = xa[khi] - xa[klo]; assert(h > 0.0); a = (xa[khi] - x) / h; b = (x - xa[klo]) / h; *y = a * ya[klo] + b * ya[khi] + h * h * (1.0 / 6.0) * ((a * a * a - a) * y2a[klo] + (b * b * b - b) * y2a[khi]); *dy = (ya[khi] - ya[klo]) / h + h * (((1.0 / 6.0) - 0.5 * a * a) * y2a[klo] + (0.5 * b * b - (1.0 / 6.0)) * y2a[khi]); }
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/spline.h
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2013, Lawrence Livermore National Security, LLC. // qb@ll: Qbox at Lawrence Livermore // // This file is part of qb@ll. // // Produced at the Lawrence Livermore National Laboratory. // Written by Xavier Andrade (xavier@llnl.gov), Erik Draeger // (draeger1@llnl.gov) and Francois Gygi (fgygi@ucdavis.edu). // Based on the Qbox code by Francois Gygi Copyright (c) 2008 // LLNL-CODE-635376. All rights reserved. // // qb@ll is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details, in the file COPYING in the // root directory of this distribution or <http://www.gnu.org/licenses/>. // #include <config.h> /******************************************************************************* * * spline.h * ******************************************************************************/ #ifndef SPLINE_H #define SPLINE_H #include <vector> #define SPLINE_FLAT_BC 0.0 /* Flat boundary condition (y'=0) */ #define SPLINE_NATURAL_BC 1.e31 /* Natural boundary condition (Y"=0) */ void spline(const double *x, const double *y, int n, double yp1, double ypn, double *y2); void splint(const double *xa, const double *ya, const double *y2a, int n, double x, double *y); void splintd(const double *xa, const double *ya, const double *y2a, int n, double x, double *y, double *dy); class Spline { public: Spline() {} void fit(const double *x, double *y, int n, double yp1, double ypn) { x_.resize(n); y_.resize(n); y2_.resize(n); for (int ii = 0; ii < n; ii++) { x_[ii] = x[ii]; y_[ii] = y[ii]; } spline(x, y, n, yp1, ypn, &y2_[0]); } double value(const double &x) const { double y; splint(&x_[0], &y_[0], &y2_[0], x_.size(), x, &y); return y; } void derivative(const double &x, double &y, double &dy) const { splintd(&x_[0], &y_[0], &y2_[0], x_.size(), x, &y, &dy); } private: std::vector<double> x_; std::vector<double> y_; std::vector<double> y2_; }; #endif // Local Variables: // mode: c++ // End:
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/upf.hpp
#ifndef PSEUDO_UPF_HPP #define PSEUDO_UPF_HPP /* Copyright (C) 2018 Xavier Andrade This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <cassert> #include <cmath> #include <fstream> #include <iostream> #include <sstream> #include <vector> #include "anygrid.hpp" namespace pseudopotential { class upf : public pseudopotential::anygrid { public: upf(bool uniform_grid) : pseudopotential::anygrid(uniform_grid) {} double d_ij(int l, int i, int j) const { assert(l >= 0 && l <= lmax_); assert(i >= 0 && i <= nchannels()); assert(j >= 0 && j <= nchannels()); return dij_[l * nchannels() * nchannels() + i * nchannels() + j]; } protected: int llocal() const { return llocal_; } int nchannels() const { return nchannels_; } double &d_ij(int l, int i, int j) { assert(l >= 0 && l <= lmax_); assert(i >= 0 && i <= nchannels()); assert(j >= 0 && j <= nchannels()); return dij_[l * nchannels() * nchannels() + i * nchannels() + j]; } void extrapolate_first_point(std::vector<double> &function_) const { assert(function_.size() >= 4); assert(grid_.size() >= 4); double x1 = grid_[1]; double x2 = grid_[2]; double x3 = grid_[3]; double f1 = function_[1]; double f2 = function_[2]; double f3 = function_[3]; // obtained from: // http://www.wolframalpha.com/input/?i=solve+%7Bb*x1%5E2+%2B+c*x1+%2B+d+%3D%3D+f1,++b*x2%5E2+%2B+c*x2+%2B+d+%3D%3D+f2,+b*x3%5E2+%2B+c*x3+%2B+d+%3D%3D+f3+%7D++for+b,+c,+d function_[0] = f1 * x2 * x3 * (x2 - x3) + f2 * x1 * x3 * (x3 - x1) + f3 * x1 * x2 * (x1 - x2); function_[0] /= (x1 - x2) * (x1 - x3) * (x2 - x3); } std::vector<double> dij_; int llocal_; int start_point_; int nchannels_; }; } // namespace pseudopotential #endif
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/upf1.hpp
#ifndef PSEUDO_UPF1_HPP #define PSEUDO_UPF1_HPP /* Copyright (C) 2018 Xavier Andrade This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <cassert> #include <cmath> #include <fstream> #include <iostream> #include <sstream> #include <vector> #include "base.hpp" #include "upf.hpp" #include <rapidxml.hpp> #include "element.hpp" namespace pseudopotential { class upf1 : public pseudopotential::upf { public: upf1(const std::string &filename, bool uniform_grid = false) : pseudopotential::upf(uniform_grid), file_(filename.c_str()), buffer_((std::istreambuf_iterator<char>(file_)), std::istreambuf_iterator<char>()) { filename_ = filename; buffer_.push_back('\0'); doc_.parse<0>(&buffer_[0]); std::istringstream header(doc_.first_node("PP_HEADER")->value()); std::string line; int version_number; header >> version_number; getline(header, line); header >> symbol_; symbol_ = element::trim(symbol_); getline(header, line); std::string pseudo_type; header >> pseudo_type; getline(header, line); // nlcc tag getline(header, line); getline(header, xc_functional_); header >> zval_; getline(header, line); // total energy getline(header, line); // cutoff getline(header, line); // skip lmax getline(header, line); int size; header >> size; getline(header, line); header >> nwavefunctions_; header >> nprojectors_; getline(header, line); std::transform(pseudo_type.begin(), pseudo_type.end(), pseudo_type.begin(), ::tolower); if (pseudo_type == "nc" || pseudo_type == "sl") { type_ = pseudopotential::type::KLEINMAN_BYLANDER; } else if (pseudo_type == "uspp") { throw status::UNSUPPORTED_TYPE_ULTRASOFT; } else if (pseudo_type == "paw") { throw status::UNSUPPORTED_TYPE_PAW; } else { throw status::UNSUPPORTED_TYPE; } // Read the grid { rapidxml::xml_node<> *node = doc_.first_node("PP_MESH")->first_node("PP_R"); assert(node); std::istringstream stst(node->value()); // check whether the first point is zero or not double xmin; stst >> xmin; start_point_ = 0; if (xmin > 1.0e-10) start_point_ = 1; grid_.resize(size + start_point_); grid_[0] = 0.0; grid_[start_point_] = xmin; for (int ii = 0; ii < size - 1; ii++) stst >> grid_[1 + start_point_ + ii]; assert(fabs(grid_[0]) <= 1e-10); mesh_size_ = 0; for (double rr = 0.0; rr <= grid_[grid_.size() - 1]; rr += mesh_spacing()) mesh_size_++; } { rapidxml::xml_node<> *node = doc_.first_node("PP_MESH")->first_node("PP_R"); assert(node); std::istringstream stst(node->value()); grid_weights_.resize(size + start_point_); grid_weights_[0] = 0.5 * (grid_[1] - grid_[0]); for (int ii = 0; ii < size; ii++) stst >> grid_weights_[start_point_ + ii]; } // lmax and lloc { proj_l_.resize(nprojectors()); proj_c_.resize(nprojectors()); rapidxml::xml_node<> *node = doc_.first_node("PP_NONLOCAL")->first_node("PP_BETA"); std::vector<bool> has_l(MAX_L, false); lmax_ = 0; nchannels_ = 0; int iproj = 0; while (node) { std::string line; std::istringstream stst(node->value()); int read_i, read_l; stst >> read_i >> read_l; read_i--; assert(iproj == read_i); lmax_ = std::max(lmax_, read_l); has_l[read_l] = true; proj_l_[iproj] = read_l; proj_c_[iproj] = 0; for (int jproj = 0; jproj < iproj; jproj++) if (read_l == proj_l_[jproj]) proj_c_[iproj]++; nchannels_ = std::max(nchannels_, proj_c_[iproj] + 1); node = node->next_sibling("PP_BETA"); iproj++; } assert(lmax_ >= 0); llocal_ = -1; for (int l = 0; l <= lmax_; l++) if (!has_l[l]) llocal_ = l; } // Read dij { rapidxml::xml_node<> *node = doc_.first_node("PP_NONLOCAL")->first_node("PP_DIJ"); assert(node); dij_.resize(nchannels() * nchannels() * (lmax_ + 1)); for (unsigned kk = 0; kk < dij_.size(); kk++) dij_[kk] = 0.0; std::istringstream stst(node->value()); int nnonzero; stst >> nnonzero; getline(stst, line); for (int kk = 0; kk < nnonzero; kk++) { int ii, jj; double val; stst >> ii >> jj >> val; val *= 2.0; // convert from 1/Rydberg to 1/Hartree ii--; jj--; assert(proj_l_[ii] == proj_l_[jj]); d_ij(proj_l_[ii], proj_c_[ii], proj_c_[jj]) = val; } } } pseudopotential::format format() const { return pseudopotential::format::UPF2; } int size() const { return buffer_.size(); }; std::string description() const { return doc_.first_node("PP_INFO")->value(); } std::string symbol() const { return symbol_; } int atomic_number() const { element el(symbol()); return el.atomic_number(); } double mass() const { element el(symbol()); return el.mass(); } double valence_charge() const { return zval_; } pseudopotential::exchange exchange() const { if (xc_functional_ == "PBE") return pseudopotential::exchange::PBE; if (xc_functional_ == "PBESOL") return pseudopotential::exchange::PBE_SOL; if (xc_functional_ == "SLA PW NOGX NOGC") return pseudopotential::exchange::LDA; if (xc_functional_ == "BLYP") return pseudopotential::exchange::B88; return pseudopotential::exchange::UNKNOWN; } pseudopotential::correlation correlation() const { if (xc_functional_ == "PBE") return pseudopotential::correlation::PBE; if (xc_functional_ == "PBESOL") return pseudopotential::correlation::PBE_SOL; if (xc_functional_ == "SLA PW NOGX NOGC") return pseudopotential::correlation::LDA_PW; if (xc_functional_ == "BLYP") return pseudopotential::correlation::LYP; return pseudopotential::correlation::UNKNOWN; } void local_potential(std::vector<double> &potential) const { rapidxml::xml_node<> *node = doc_.first_node("PP_LOCAL"); assert(node); potential.resize(grid_.size()); std::istringstream stst(node->value()); for (unsigned ii = 0; ii < grid_.size() - start_point_; ii++) { stst >> potential[ii + start_point_]; potential[ii + start_point_] *= 0.5; // Convert from Rydberg to Hartree } if (start_point_ > 0) extrapolate_first_point(potential); interpolate(potential); } int nprojectors() const { return nprojectors_; } int nprojectors_per_l(int l) const { int nchannel = 0; for (int jproj = 0; jproj < nprojectors(); jproj++) if (proj_l_[jproj] == l) nchannel = std::max(proj_c_[jproj]+1, nchannel); return nchannel; } void projector(int l, int i, std::vector<double> &proj) const { proj.clear(); rapidxml::xml_node<> *node = doc_.first_node("PP_NONLOCAL")->first_node("PP_BETA"); assert(node); int iproj = 0; while (l != proj_l_[iproj] || i != proj_c_[iproj]) { iproj++; node = node->next_sibling("PP_BETA"); if (!node) return; } std::string line; std::istringstream stst(node->value()); int read_i, read_l, size; stst >> read_i >> read_l; getline(stst, line); assert(read_l == proj_l_[iproj]); stst >> size; getline(stst, line); assert(size >= 0); assert(size <= int(grid_.size())); proj.resize(grid_.size()); for (int ii = 0; ii < size; ii++) stst >> proj[ii + start_point_]; for (unsigned ii = size; ii < grid_.size() - start_point_; ii++) proj[ii + start_point_] = 0.0; // the projectors come in Rydberg and multiplied by r, so we have to divide // and fix the first point for (unsigned ii = 1; ii < proj.size(); ii++) proj[ii] /= 2.0 * grid_[ii]; extrapolate_first_point(proj); interpolate(proj); } bool has_radial_function(int l) const { return false; } void radial_function(int l, std::vector<double> &function) const { function.clear(); } void radial_potential(int l, std::vector<double> &function) const { function.clear(); } bool has_nlcc() const { return doc_.first_node("PP_NLCC"); } void nlcc_density(std::vector<double> &density) const { rapidxml::xml_node<> *node = doc_.first_node("PP_NLCC"); assert(node); std::istringstream stst(node->value()); density.resize(grid_.size()); for (unsigned ii = 0; ii < grid_.size() - start_point_; ii++) stst >> density[start_point_ + ii]; extrapolate_first_point(density); // this charge does not come multiplied by anything interpolate(density); } bool has_density() const { return doc_.first_node("PP_RHOATOM"); } void density(std::vector<double> &val) const { rapidxml::xml_node<> *node = doc_.first_node("PP_RHOATOM"); assert(node); val.resize(grid_.size()); std::istringstream stst(node->value()); for (unsigned ii = 0; ii < grid_.size() - start_point_; ii++) stst >> val[start_point_ + ii]; // the density comes multiplied by 4\pi r for (unsigned ii = 1; ii < val.size(); ii++) val[ii] /= 4.0 * M_PI * grid_[ii] * grid_[ii]; extrapolate_first_point(val); interpolate(val); } int nwavefunctions() const { return nwavefunctions_; } void wavefunction(int index, int &n, int &l, double &occ, std::vector<double> &proj) const { rapidxml::xml_node<> *node = doc_.first_node("PP_PSWFC"); assert(node); std::istringstream stst(node->value()); std::string line; // skip until the correct wavefunction for (int ii = 0; ii < index; ii++) { double tmp; stst >> line; getline(stst, line); for (unsigned ii = 0; ii < grid_.size() - start_point_; ii++) stst >> tmp; } std::string label; stst >> label >> l >> occ; getline(stst, line); if (label == "s") { n = 1; } else { n = atoi(label.substr(0, 1).c_str()); } proj.resize(grid_.size()); for (unsigned ii = 0; ii < grid_.size() - start_point_; ii++) stst >> proj[ii + start_point_]; // the wavefunctions come multiplied by r, so we have to divide and fix the // first point for (unsigned ii = 1; ii < grid_.size() - start_point_; ii++) proj[ii] /= grid_[ii]; extrapolate_first_point(proj); interpolate(proj); } bool has_total_angular_momentum() const { return doc_.first_node("PP_ADDINFO"); } int projector_2j(int l, int ic) const { if (l == 0) return 1; rapidxml::xml_node<> *node = doc_.first_node("PP_ADDINFO"); assert(node); std::istringstream stst(node->value()); for (int iwf = 0; iwf < nwavefunctions_; iwf++) { std::string line; stst >> line; getline(stst, line); } for (int iproj = 0; iproj < nprojectors_; iproj++) { int read_l; stst >> read_l; assert(read_l == proj_l_[iproj]); if (proj_l_[iproj] == l && proj_c_[iproj] == ic) { double read_j; stst >> read_j; return lrint(read_j * 2.0); } else { std::string line; getline(stst, line); } } assert(false); return 0; } int wavefunction_2j(int ii) const { assert(ii >= 0 && ii <= nwavefunctions_); rapidxml::xml_node<> *node = doc_.first_node("PP_ADDINFO"); assert(node); std::istringstream stst(node->value()); double j; for (int iwf = 0; iwf < ii; iwf++) { std::string label; int n, l; double occ; stst >> label >> n >> l >> j >> occ; } return lrint(j * 2.0); } private: std::ifstream file_; std::vector<char> buffer_; rapidxml::xml_document<> doc_; std::string symbol_; std::string xc_functional_; double zval_; int nwavefunctions_; int nprojectors_; std::vector<int> proj_l_; std::vector<int> proj_c_; }; } // namespace pseudopotential #endif
0
ALLM/M3/octopus/src
ALLM/M3/octopus/src/species/upf2.hpp
#ifndef PSEUDO_UPF2_HPP #define PSEUDO_UPF2_HPP /* Copyright (C) 2018 Xavier Andrade This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <cassert> #include <cmath> #include <fstream> #include <iostream> #include <sstream> #include <vector> #include "anygrid.hpp" #include "base.hpp" #include <rapidxml.hpp> #include "element.hpp" namespace pseudopotential { class upf2 : public pseudopotential::upf { public: upf2(const std::string &filename, bool uniform_grid = false) : pseudopotential::upf(uniform_grid), file_(filename.c_str()), buffer_((std::istreambuf_iterator<char>(file_)), std::istreambuf_iterator<char>()) { filename_ = filename; buffer_.push_back('\0'); doc_.parse<0>(&buffer_[0]); root_node_ = doc_.first_node("UPF"); if (!root_node_) throw status::FORMAT_NOT_SUPPORTED; if (root_node_->first_attribute("version")->value()[0] != '2') throw status::FORMAT_NOT_SUPPORTED; std::string pseudo_type = root_node_->first_node("PP_HEADER") ->first_attribute("pseudo_type") ->value(); if (pseudo_type == "NC" || pseudo_type == "SL") { type_ = pseudopotential::type::KLEINMAN_BYLANDER; } else if (pseudo_type == "USPP") { throw status::UNSUPPORTED_TYPE_ULTRASOFT; } else if (pseudo_type == "PAW") { throw status::UNSUPPORTED_TYPE_PAW; } else { throw status::UNSUPPORTED_TYPE; } assert(root_node_); // Read the grid { rapidxml::xml_base<> *xmin = root_node_->first_node("PP_MESH")->first_attribute("xmin"); start_point_ = 0; if (xmin && fabs(value<double>(xmin)) > 1.0e-10) start_point_ = 1; rapidxml::xml_node<> *node = root_node_->first_node("PP_MESH")->first_node("PP_R"); assert(node); rapidxml::xml_attribute<> *size_attr = node->first_attribute("size"); // some files seems to have this information elsewhere if (size_attr == NULL) size_attr = root_node_->first_node("PP_MESH")->first_attribute("mesh"); if (size_attr == NULL) throw status::FORMAT_NOT_SUPPORTED; int size = value<int>(size_attr); grid_.resize(size + start_point_); std::istringstream stst(node->value()); grid_[0] = 0.0; for (int ii = 0; ii < size; ii++) stst >> grid_[start_point_ + ii]; assert(fabs(grid_[0]) <= 1e-10); } { rapidxml::xml_node<> *node = root_node_->first_node("PP_MESH")->first_node("PP_RAB"); assert(node); int size = get_size(node); grid_weights_.resize(size + start_point_); std::istringstream stst(node->value()); grid_weights_[0] = 0.5 * (grid_[1] - grid_[0]); for (int ii = 0; ii < size; ii++) stst >> grid_weights_[start_point_ + ii]; mesh_size_ = 0; for (double rr = 0.0; rr <= grid_[grid_.size() - 1]; rr += mesh_spacing()) mesh_size_++; } // calculate lmax (we can't trust the one given by the file :-/) std::vector<bool> has_l(MAX_L, false); lmax_ = 0; nchannels_ = 0; proj_l_.resize(nprojectors()); proj_c_.resize(nprojectors()); // projector info for (int iproj = 0; iproj < nprojectors(); iproj++) { std::ostringstream tag; tag << "PP_BETA." << iproj + 1; rapidxml::xml_node<> *node = root_node_->first_node("PP_NONLOCAL")->first_node(tag.str().c_str()); assert(node); int read_l = value<int>(node->first_attribute("angular_momentum")); lmax_ = std::max(lmax_, read_l); proj_l_[iproj] = read_l; has_l[read_l] = true; // now calculate the channel index, by counting previous projectors with // the same l proj_c_[iproj] = 0; for (int jproj = 0; jproj < iproj; jproj++) if (read_l == proj_l_[jproj]) proj_c_[iproj]++; nchannels_ = std::max(nchannels_, proj_c_[iproj] + 1); } assert(lmax_ >= 0); llocal_ = -1; for (int l = 0; l <= lmax_; l++) if (!has_l[l]) llocal_ = l; // Read dij once { rapidxml::xml_node<> *node = root_node_->first_node("PP_NONLOCAL")->first_node("PP_DIJ"); assert(node); dij_.resize((lmax_ + 1) * nchannels_ * nchannels_); for (unsigned kk = 0; kk < dij_.size(); kk++) dij_[kk] = 0.0; std::istringstream stst(node->value()); for (int ii = 0; ii < nprojectors(); ii++) { for (int jj = 0; jj < nprojectors(); jj++) { double val; stst >> val; if (proj_l_[ii] != proj_l_[jj]) { assert(fabs(val) < 1.0e-10); continue; } val *= 0.5; // convert from Rydberg to Hartree d_ij(proj_l_[ii], proj_c_[ii], proj_c_[jj]) = val; } } } } pseudopotential::format format() const { return pseudopotential::format::UPF2; } int size() const { return buffer_.size(); }; std::string description() const { return root_node_->first_node("PP_INFO")->value(); } std::string symbol() const { return element::trim(root_node_->first_node("PP_HEADER") ->first_attribute("element") ->value()); } int atomic_number() const { element el(symbol()); return el.atomic_number(); } double mass() const { element el(symbol()); return el.mass(); } double valence_charge() const { return value<double>( root_node_->first_node("PP_HEADER")->first_attribute("z_valence")); } pseudopotential::exchange exchange() const { std::string functional = root_node_->first_node("PP_HEADER") ->first_attribute("functional") ->value(); if (functional == "PBE") return pseudopotential::exchange::PBE; if (functional == "PBESOL") return pseudopotential::exchange::PBE_SOL; if (functional == "SLA PW NOGX NOGC") return pseudopotential::exchange::LDA; if (functional == "BLYP") return pseudopotential::exchange::B88; return pseudopotential::exchange::UNKNOWN; } pseudopotential::correlation correlation() const { std::string functional = root_node_->first_node("PP_HEADER") ->first_attribute("functional") ->value(); if (functional == "PBE") return pseudopotential::correlation::PBE; if (functional == "PBESOL") return pseudopotential::correlation::PBE_SOL; if (functional == "SLA PW NOGX NOGC") return pseudopotential::correlation::LDA_PW; if (functional == "BLYP") return pseudopotential::correlation::LYP; return pseudopotential::correlation::UNKNOWN; } void local_potential(std::vector<double> &potential) const { rapidxml::xml_node<> *node = root_node_->first_node("PP_LOCAL"); assert(node); int size = get_size(node); potential.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) { stst >> potential[ii + start_point_]; potential[ii + start_point_] *= 0.5; // Convert from Rydberg to Hartree } if (start_point_ > 0) extrapolate_first_point(potential); interpolate(potential); } int nprojectors() const { return value<int>( root_node_->first_node("PP_HEADER")->first_attribute("number_of_proj")); } int nprojectors_per_l(int l) const { int nchannel = 0; for (int jproj = 0; jproj < nprojectors(); jproj++) if (proj_l_[jproj] == l) nchannel = std::max(proj_c_[jproj]+1, nchannel); return nchannel; } void projector(int l, int i, std::vector<double> &proj) const { rapidxml::xml_node<> *node = NULL; int iproj = 0; while ((l != proj_l_[iproj] || i != proj_c_[iproj]) && iproj < nprojectors()) { iproj++; } std::stringstream tag; tag << "PP_BETA." << iproj+1; node = root_node_->first_node("PP_NONLOCAL")->first_node(tag.str().c_str()); assert(node); int size = get_size(node); proj.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) stst >> proj[ii + start_point_]; // the projectors come multiplied by r, so we have to divide and fix the // first point for (int ii = 1; ii < size + start_point_; ii++) proj[ii] /= grid_[ii]; extrapolate_first_point(proj); interpolate(proj); } bool has_radial_function(int l) const { return false; } void radial_function(int l, std::vector<double> &function) const { function.clear(); } void radial_potential(int l, std::vector<double> &function) const { function.clear(); } bool has_nlcc() const { return root_node_->first_node("PP_NLCC"); } void nlcc_density(std::vector<double> &density) const { rapidxml::xml_node<> *node = root_node_->first_node("PP_NLCC"); assert(node); int size = get_size(node); density.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) stst >> density[start_point_ + ii]; extrapolate_first_point(density); // this charge does not come multiplied by anything interpolate(density); } bool has_total_angular_momentum() const { return root_node_->first_node("PP_SPIN_ORB"); } void beta(int iproj, int &l, std::vector<double> &proj) const { rapidxml::xml_node<> *node = NULL; std::stringstream tag; tag << "PP_BETA." << iproj + 1; node = root_node_->first_node("PP_NONLOCAL")->first_node(tag.str().c_str()); assert(node); l = value<int>(node->first_attribute("angular_momentum")); int size = get_size(node); proj.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) stst >> proj[ii + start_point_]; // the projectors come multiplied by r, so we have to divide and fix the // first point for (int ii = 1; ii < size + start_point_; ii++) proj[ii] /= grid_[ii]; extrapolate_first_point(proj); interpolate(proj); } void dnm_zero(int nbeta, std::vector<std::vector<double>> &dnm) const { dnm.resize(nbeta); for (int i = 0; i < nbeta; i++) { dnm[i].resize(nbeta); for (int j = 0; j < nbeta; j++) { dnm[i][j] = dij_[i * nbeta + j]; } } } bool has_density() const { return root_node_->first_node("PP_RHOATOM"); } void density(std::vector<double> &val) const { rapidxml::xml_node<> *node = root_node_->first_node("PP_RHOATOM"); assert(node); int size = get_size(node); val.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) stst >> val[start_point_ + ii]; // the density comes multiplied by 4\pi r for (int ii = 1; ii < size + start_point_; ii++) val[ii] /= 4.0 * M_PI * grid_[ii] * grid_[ii]; extrapolate_first_point(val); interpolate(val); } int nwavefunctions() const { return value<int>( root_node_->first_node("PP_HEADER")->first_attribute("number_of_wfc")); } void wavefunction(int index, int &n, int &l, double &occ, std::vector<double> &proj) const { rapidxml::xml_node<> *node = NULL; std::stringstream tag; tag << "PP_CHI." << index + 1; node = root_node_->first_node("PP_PSWFC")->first_node(tag.str().c_str()); assert(node); // not all files have "n", so we might have to parse the label if (node->first_attribute("n")) { n = value<int>(node->first_attribute("n")); } else { std::string label = node->first_attribute("label")->value(); n = atoi(label.substr(0, 1).c_str()); } l = value<int>(node->first_attribute("l")); occ = value<double>(node->first_attribute("occupation")); int size = get_size(node); proj.resize(size + start_point_); std::istringstream stst(node->value()); for (int ii = 0; ii < size; ii++) stst >> proj[ii + start_point_]; // the wavefunctions come multiplied by r, so we have to divide and fix the // first point for (int ii = 1; ii < size + start_point_; ii++) proj[ii] /= grid_[ii]; extrapolate_first_point(proj); interpolate(proj); } // Retreive the value of 2*j for relativistic projectors int projector_2j(int l, int ic) const { if (l == 0) return 1; for (int iproj = 0; iproj < nprojectors(); iproj++) { std::stringstream tag; tag << "PP_RELBETA." << iproj + 1; rapidxml::xml_node<> *node = root_node_->first_node("PP_SPIN_ORB")->first_node(tag.str().c_str()); assert(node); std::string labell = node->first_attribute("lll")->value(); if(atoi(labell.c_str()) == l && proj_c_[iproj] == ic){ std::string labelj = node->first_attribute("jjj")->value(); float j = atof(labelj.c_str()); return lrint(j * 2.0); } } return -1; } // Retreive the value of 2*j for relativitstic wavefunctions int wavefunction_2j(int ii) const { std::stringstream tag; tag << "PP_RELWFC." << ii; rapidxml::xml_node<> *node = root_node_->first_node("PP_SPIN_ORB")->first_node(tag.str().c_str()); assert(node); std::string label = node->first_attribute("jchi")->value(); float j = atof(label.c_str()); return lrint(j * 2.0); } private: int get_size(const rapidxml::xml_node<> *node) const { int size = grid_.size() - start_point_; rapidxml::xml_attribute<> *size_attr = node->first_attribute("size"); if (size_attr != NULL) size = value<int>(size_attr); return size; } std::ifstream file_; std::vector<char> buffer_; rapidxml::xml_document<> doc_; rapidxml::xml_node<> *root_node_; std::vector<int> proj_l_; std::vector<int> proj_c_; }; } // namespace pseudopotential #endif
0
ALLM/M3/octopus
ALLM/M3/octopus/test/fortuno_app.f90
!> @brief Register tests by providing a test suite instance containing all tests associated !! with an Octopus module. !! !! For example: !! ``` !! call execute_app(testitems = [testsuite_oct_module, testsuite_oct_module2]) !! ``` !! where a test_suite instance contains the module name, and an array of test_case !! instances for routines in that module (one test_case per unit test): !! ``` !! testsuite_sort = test_suite("basic/sort", & !! items = [test_case("test_sort", test_sort)], & !! ) !!``` !! Note: this routine does not return but stops the program with the right exit code. program test_suite use fortuno_interface_m, only : execute_cmd_app use testsuite_sort_oct_m implicit none call execute_cmd_app(& testitems=[& testsuite_sort()& ]& ) end program test_suite
0
ALLM/M3/octopus/test
ALLM/M3/octopus/test/basic/test_sort.f90
module testsuite_sort_oct_m use fortuno_interface_m use sort_oct_m implicit none private public :: testsuite_sort contains !> Returns a suite instance, wrapped as test_item function testsuite_sort() result(res) type(test_item), allocatable :: res res = test_suite("basic/sort", & items = [& test_case("test_sort", test_sort)& ]& ) end function testsuite_sort subroutine test_sort() integer :: list(3) list = [2, 1, 3] call sort(list) call check(all(list == [1, 2, 3])) end subroutine test_sort end module testsuite_sort_oct_m
0
ALLM/M3/octopus
ALLM/M3/octopus/testsuite/oct-run_regression_test.pl
#!/usr/bin/env perl # # Copyright (C) 2005-2020 H. Appel, M. Marques, X. Andrade, D. Strubbe, M. Lueders, H. Glawe # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # use strict; use warnings; use Getopt::Std; use File::Basename; use File::Spec; use Fcntl qw(:mode :flock); use Time::HiRes qw(gettimeofday tv_interval); use Scalar::Util qw(looks_like_number); use File::Temp qw/tempdir/; sub usage { print <<EndOfUsage; Copyright (C) 2005-2020 H. Appel, M. Marques, X. Andrade, D. Strubbe, M. Lueders, H. Glawe Usage: oct-run_regression_test.pl [options] -n dry-run -v verbose -h this usage -D name of the directory where to look for the executables -s run everything serial -f filename of testsuite [required] -p preserve working directories -l copy output log to current directory -L PATH name of the output log [default: out.log] -m run matches only (assumes there are work directories) -r print a report into a YAML files -G deviceID offset for CUDA run -w preserve the runtime warning file Exit codes: 0 all tests passed 1..253 number of test failures 254 test skipped 255 internal error Report bugs to <octopus-devel\@tddft.org> EndOfUsage exit 0; } my $precnum; sub set_precision{ my $p = $_[0]; if($p ne "default"){ $precnum = 1.0*$p; } else { $precnum = 0.0001 } } # Check whether STDOUT is a terminal. If not, no ANSI sequences are # emitted. my %color_start; my %color_end; if(-t STDOUT) { $color_start{blue}="\033[34m"; $color_end{blue}="\033[0m"; $color_start{red}="\033[31m"; $color_end{red}="\033[0m"; $color_start{green}="\033[32m"; $color_end{green}="\033[0m"; } else { $color_start{blue}=""; $color_end{blue}=""; $color_start{red}=""; $color_end{red}=""; $color_start{green}=""; $color_end{green}=""; } if (not @ARGV) { usage; } our($opt_f, $opt_r, $opt_h, $opt_s, $opt_l, $opt_L, $opt_D, $opt_G, $opt_m, $opt_p, $opt_n, $opt_v, $opt_w); getopts("nlvhD:c:f:L:spmr:G:w"); my $out_log = ($opt_L ? $opt_L : "out.log"); my $output_file_warning = "runtime_warnings"; if(!$opt_w) { unlink($output_file_warning); } # Handle options $opt_h && usage; my $exec_directory; if($opt_D) { $exec_directory = $opt_D; if($exec_directory !~ /^\//){ $exec_directory = get_env("PWD")."/$exec_directory"; } } else { $exec_directory = "/usr/bin"; } if(length($opt_f) == 0) { die255("You must supply the name of a test file with the -f option."); } my $aexec = get_env("EXEC"); my $global_np = get_env("OCT_TEST_MPI_NPROCS"); # FIXME: all test files should declare Processors #$np = "serial"; my $is_parallel = 0; my $mpiexec; my $machinelist; my ($np, $nslots, $my_nslots, $specify_np); # FIXME: could bake in mpiexec at configure time if(!$opt_s) { # MPI stuff $mpiexec = get_env("MPIEXEC"); $machinelist = get_env("MACHINELIST"); if ("$mpiexec" eq "") { $mpiexec = `which mpiexec 2> /dev/null`; } chomp($mpiexec); if( "$mpiexec" eq "" ) { print "No mpiexec found: running in serial.\n\n"; } else { $np = 1; $is_parallel = 1; } } else { $mpiexec = ""; } # default number of processors for MPI runs is 2 $np = 2; my $enabled = ""; # FIXME: should Enabled be optional? my $expect_error = 0; # check for controlled failure my $error_match_done = 1; # check that at least one error-match has been done. my $command_env; # Handle GPU offset my $offset_GPU = defined $opt_G ? $opt_G : -1; if($offset_GPU >= 0) { $command_env = "OCT_PARSE_ENV=1 OCT_AccelDevice=$offset_GPU"; } else { $command_env = ""; } # This variable counts the number of failed testcases. my $failures = 0; my $tempdirpath = get_env("TEMPDIRPATH"); if ("$tempdirpath" eq "") { $tempdirpath = '/tmp'; } if (! -d $tempdirpath) { mkdir $tempdirpath; } set_precision("default"); # Define the parser for the if..elseif..else..endif structures: # Conditional elements are defined through the if..[elseif..else]..endif structure. # The conditions are specified as argument (in parenthesis) of the if [or elseif]. # # Conditions can be of the form: (avail[able] COND1 [(and|,) COND2 ...]) # global variables, defining the state of the parser: # array to hold a set of conditions: my @conditions= (); my $options_available; # recursion level of nested if blocks: my $if_level = 0; # array of flags, indicating whether an if..else..endif block has been satisfied. # The array index is the recursion level. # Once a condition in a if..elseif..else..endif structure has been met, the if_done # for this level is set to 1 and further blocks of the same level will be skipped. my @if_started = (); my @if_done = (); my $skip = 0; sub parse_condition { # This routine parses a string recursively to look for 'avail*' 'and' and ',' # and push found requirements to @($_[1]). my $condition = $_[0]; my @required = @{$_[1]}; if ($condition =~ /\s*avail\w*\s*(\w*)\s*$/i ) { parse_condition($1, $_[1]); } # parse comma separated options elsif ($condition =~ /\b(\w*)\b\s+and\s+(.*)$/i ) { push(@{$_[1]}, $1); parse_condition($2, $_[1]); } # parse 'and' separated options elsif ($condition =~ /\b(\w*)\b\s+,\s+(.*)$/i ) { push(@{$_[1]}, $1); parse_condition($2, $_[1]); } elsif ($condition =~ /^(\w*)$/ ) { push(@{$_[1]}, $1); } else { die255( "Ill-formed option condition.\n" ); } } sub check_conditions { # This is a combined test to determine whether a certain step in the test needs to be executed. # This check takes into account: # - the level of the if blocks # - whether a if-block already has been satisfied # - whether prerequisits for a run are fulfilled. my @required_options = (); my $result=1; if($if_level>0) { # collect required options in $_: foreach(@{$_[0]}) { parse_condition($_, \@required_options); } # check whether all required options are present: foreach(@required_options) { $result = $result * ($options_available =~ /$_/i); } } return ((not $if_done[$if_level]) and (not $skip) and $result); } # Set test_succeeded flag to 'TRUE' (=1). Only change to 'FALSE' (=0) if a test fails. my $test_succeeded = 1; $if_done[0] = 0; my $pwd = get_env("PWD"); my $workdir; my $scriptname; my $matchdir; if (!$opt_m) { my $name = $opt_f; $name =~ s/\.\.\///g; $name =~ s/\//-/g; $workdir = tempdir("$tempdirpath/octopus" . "-" . $name . ".XXXXXX"); chomp($workdir); system ("rm -rf $workdir"); mkdir $workdir; $scriptname = "$workdir/matches.sh"; open(SCRIPT, ">$scriptname") or die255("Could not create '$scriptname'."); print SCRIPT "#\!/usr/bin/env bash\n\n"; print SCRIPT "perl $0 -m -D $exec_directory -f $opt_f\n"; close(SCRIPT); chmod 0755, $scriptname; $matchdir = $workdir; } else { $workdir = $pwd; } # testsuite open(TESTSUITE, "<".$opt_f ) or die255("Cannot open testsuite file '$opt_f'."); my (%report, $r_match_report, $r_matches_array, $r_input_report); my %test; my ($test_start, $test_end); my ($basename, $basedir, $basecommand, $testname, $command, $command_line); my ($input_base, $input_file); my ($return_value, $cp_return); my $mode; my ($workfiles, $file_cp); my @wfiles; my $elapsed; my $value; my $name; my $line_num; while ($_ = <TESTSUITE>) { # remove trailing newline chomp; # remove leading whitespace $_ =~ s/^\s+//; # remove trailing whitespace $_ =~ s/\s+$//; # skip blank lines next if (length($_) == 0); # skip comments next if /^#/; if ( $_ =~ /^Test\s*:\s*(.*)\s*$/) { $test{"name"} = $1; if($test{"name"} eq "") { die255("No name was provided with Test tag."); } print "$color_start{blue} ***** $test{\"name\"} ***** $color_end{blue} \n\n"; print "Using workdir : $workdir\n"; if($opt_p) { print "Workdir will be saved.\n"; } print "Using test file : $opt_f \n"; $basename = basename($opt_f); $basedir = basename(dirname(File::Spec->rel2abs($opt_f))); $testname = "$basedir/$basename"; $report{$testname} = {"input" => {}}; } elsif ( $_ =~ /^Enabled\s*:\s*(.*)\s*$/) { %test = (); $enabled = $1; $enabled =~ s/^\s*//; $enabled =~ s/\s*$//; $test{"enabled"} = $enabled; $report{$testname}{"enabled"} = $enabled; if ( $enabled eq "No") { print STDERR "Test disabled: skipping test\n\n"; skip_exit(); } elsif ( $enabled eq "no-GPU") { if ($options_available =~ "cuda") { print STDERR "Test for GPU disabled: skipping test\n\n"; skip_exit(); } } elsif ( $enabled eq "no-GPU-MPI") { if ($options_available =~ "cuda" && $options_available =~ "mpi") { print STDERR "Test for GPU and MPI disabled: skipping test\n\n"; skip_exit(); } } elsif ( $enabled ne "Yes") { if (!$opt_p && !$opt_m) { system ("rm -rf $workdir"); } die255("Unknown option 'Enabled = $enabled' in testsuite file."); } } elsif ( $_ =~ /^Program\s*:\s*(.*)\s*$/) { $command = "$exec_directory/$1"; # FIXME: should we do this for a dry-run? if( ! -x "$command") { $command = "$exec_directory/../utils/$1"; } if( ! -x $command) { die255("Executable '$1' not available."); } $basecommand = basename($command); $report{$testname}{"command"} = $basecommand; $options_available = 'dummy ' . `$command -c`; chomp($options_available); if($is_parallel && $options_available !~ "mpi") { print "Running in serial since executable was not compiled with MPI.\n"; $is_parallel = 0; } # FIXME: import Options to BGW version } elsif ( $_ =~ /^TestGroups\s*:\s*(.*)\s*$/) { # handled by oct-run_testsuite.sh my @groups = split(/[;,]\s*/, $1); $report{$testname}{"testgroups"} = \@groups; } else { if ( $enabled eq "") { die255("Testsuite file must set Enabled tag before another (except Test, Program, Options, TestGroups)."); } if ( $_ =~ /^Util\s*:\s*(.*)\s*$/ || $_ =~ /^MPIUtil\s*:\s*(.*)\s*$/) { if( $_ =~ /^Util\s*:\s*(.*)\s*$/) {$np = "serial";} $command = "$exec_directory/$1"; if( ! -x "$command") { $command = "$exec_directory/../utils/$1"; } $report{$testname}{"util"} = $1; if( ! -x "$command") { die255("Cannot find utility '$1'."); } } elsif ( $_ =~ /^MPIUtil\s*:\s*(.*)\s*$/) { $command = "$exec_directory/$1"; if( ! -x "$command") { $command = "$exec_directory/../utils/$1"; } $report{$testname}{"util"} = $1; if( ! -x "$command") { die255("Cannot find utility '$1'."); } } elsif ( $_ =~ /^Processors\s*:\s*(.*)\s*$/) { # FIXME: enforce this is "serial" or numeric $np = $1; } elsif ( $_ =~ /^\s*if\s*\((.*)\)\s*;\s*then\s*$/i ) { # Entering an IF region if ( not $if_done[$if_level] ) { push(@conditions,$1); $if_level += 1; $if_started[$if_level] = 0; $if_done[$if_level] = 0; } else { $skip = 1; } } elsif ( $_ =~ /^\s*else\s*$/i ) { if (not $skip ) { $if_done[$if_level] = $if_started[$if_level]; # $if_started[$if_level] = 0; pop(@conditions); push(@conditions, "dummy"); } } elsif ( $_ =~ /^\s*endif\s*$/i ) { if ( not $skip ) { $if_done[$if_level] = $if_started[$if_level]; $if_started[$if_level] = 0; if ($if_level==0) { die255("Ill-formed test file (unpaired endif.)\n"); } # Exiting IF region pop(@conditions); $if_started[$if_level-1] = $if_done[$if_level]; $if_done[$if_level] = undef; $if_level -= 1; } } elsif ( $_ =~ /^\w*Input\s*:\s*(.*)\s*$/ ) { if( check_conditions(\@conditions, $options_available)) { check_error_resolved(); $input_base = $1; $input_file = dirname($opt_f) . "/" . $input_base; my %input_report; $r_input_report = \%input_report; $report{$testname}{"input"}{basename($input_file)} = \%input_report; # The FailingInput is not really necessary, but can be used to make it explicit in the test file that an error is expected due to deliberate input errors. if( $_ =~ /^FailingInput/) { $expect_error = 1; } $input_report{"expected_failure"} = $expect_error?"Yes":"No"; my @matches_array; $r_matches_array = \@matches_array; $input_report{"matches"} = \@matches_array; if($is_parallel) { $input_report{"processors"} = $np; } else { $input_report{"processors"} = 1; } if ( $opt_m ) { print "\n\nFor input file : $input_file\n\n"; $return_value = 0; # FIXME: this works from outer directory, but not in archived subdirectories. $matchdir = "$workdir/$input_base"; } else { if( -f $input_file ) { print "\nUsing input file : $input_file\n"; $cp_return = system("cp $input_file $workdir/inp"); if($cp_return != 0) { die255("Copy failed (cp $input_file $workdir/inp)\n"); } # Ensure that the input file is writable so that it can # be overwritten by the next test. $mode = (stat "$workdir/inp")[2]; chmod $mode|S_IWUSR, "$workdir/inp"; } else { die255("Could not find input file '$input_file'."); } # serial or MPI run? if ( $is_parallel && $np ne "serial") { if("$global_np" ne "") { $np = $global_np; } if ("$mpiexec" =~ /ibrun/) { # used by SGE parallel environment $specify_np = ""; $my_nslots = "MY_NSLOTS=$np"; } elsif ("$mpiexec" =~ /runjob/) { # used by BlueGene $specify_np = "--np $np --exe"; $my_nslots = ""; } elsif ("$mpiexec" =~ /poe/) { # used by IBM PE $specify_np = ""; $my_nslots = "MP_PROCS=$np"; } else { # for mpirun and Cray's aprun $specify_np = "-n $np"; $my_nslots = ""; } $command_line = "cd $workdir; $command_env $my_nslots $mpiexec $specify_np $machinelist $aexec $command "; } else { $command_line = "cd $workdir; $command_env $aexec $command "; } # MPI implementations generally permit using more tasks than actual cores, and running tests this way makes it likely for developers to find race conditions. if($np ne "serial") { if($np > 4) { print "Note: this run calls for more than the standard maximum of 4 MPI tasks.\n"; } } $command_line = $command_line." > out 2> err"; print "Executing: " . $command_line . "\n"; if ( !$opt_n ) { $test_start = [gettimeofday]; $return_value = system("$command_line"); $test_end = [gettimeofday]; $elapsed = tv_interval($test_start, $test_end); printf("\tElapsed time: %8.1f s\n\n", $elapsed); if($return_value == 0) { printf "%-40s%s", " Execution", ": \t [ $color_start{green} OK $color_end{green} ] \n"; $input_report{"execution"} = "success"; # Set $error_match_done to TRUE to indicate that no error match needs to be done. $error_match_done = 1; } else { # In case of non-zero return value, we will not immediately mark the run as failling, but set a flag that a test for # the correct error message is obligatory. # # If that match was successful (i.e. the correct error message has been printed), we count it as success (passed). # If that match was unsuccessful or no match has been performed, we mark it as failed. print "Test run failed with exit code $return_value.\n"; print "These are the last lines of output:\n\n"; print "----------------------------------------\n"; system("tail -20 $workdir/out"); print "----------------------------------------\n\n"; print "These are the last lines of stderr:\n\n"; print "----------------------------------------\n"; system("tail -500 $workdir/err"); print "----------------------------------------\n\n"; $error_match_done = 0; } $test{"run"} = 1; } # Check for runtine warnings # At the moment only GFortran and Ifort warnings are parsed if (open(my $err_fh, "$workdir/err")) { my %err_count = (); my $linecache; my $got_name; while (<$err_fh>) { chomp; # remove newline from $_ if (/Fortran runtime warning/) { my $key = (defined $linecache ? "$linecache: $_" : "NO_LINECACHE: $_"); $err_count{$key}++; } elsif(/forrtl: warning/) { my $key = (defined $linecache ? "$linecache: $_" : "NO_LINECACHE: $_"); $err_count{$key}++; } elsif(/^\s*got\s*:\s*(\S+)\s*$/) { $got_name = $1 } elsif(/^\s*expected\s*:\s*(\S+)\s*$/) { my $key = (defined $got_name ? "pop_sub error: $got_name ($1)" : "NO_GOT_NAME: $_"); $err_count{$key}++; } else { $linecache=$_; } } # there were runtime warnings if (%err_count) { if (open(my $output_warnings, '>>', $output_file_warning)) { print $output_warnings "$input_base\n"; foreach my $key (sort keys %err_count) { print $output_warnings "$key ($err_count{$key} times)\n"; } print $output_warnings "\n"; } } } # copy all files of this run to archive directory with the name of the # current input file mkdir "$workdir/$input_base"; @wfiles = `ls -d $workdir/* | grep -v inp`; $workfiles = join("",@wfiles); $workfiles =~ s/\n/ /g; $cp_return = system("cp -r $workfiles $workdir/inp $workdir/$input_base"); if($cp_return != 0) { die255("Copy failed (cp -r $workfiles $workdir/inp $workdir/$input_base)\n"); } } } } elsif ( $_ =~ /^Precision\s*:\s*(.*)\s*$/) { set_precision($1); } elsif ( $_ =~ /^ExtraFile\s*:\s*(.*)\s*$/) { $file_cp = dirname($opt_f)."/".$1; $cp_return = system("cp $file_cp $workdir/"); } elsif ( $_ =~ /^match/ ) { # matches results when execution was successful if( check_conditions(\@conditions, $options_available)) { my %match_report; $r_match_report = \%match_report; # Mark this match-line as error match if it contains "error" in the name. my $error_match = ($_ =~ /error/i); if (!$opt_n && ($error_match xor ($return_value == 0) ) ) { push( @{$r_matches_array}, $r_match_report); if(run_match_new($_)){ printf "%-40s%s", "$name", ":\t [ $color_start{green} OK $color_end{green} ] \t (Calculated value = $value) \n"; if ($opt_v) { print_hline(); } if ($error_match) { $error_match_done = 1; } } else { printf "%-40s%s", "$name", ":\t [ $color_start{red} FAIL $color_end{red} ] \n"; print_hline(); $test_succeeded = 0; $failures++; } } $if_started[$if_level]=1; } } else { die255("Unknown command '$_'."); } } } check_error_resolved(); if ($opt_l && !$opt_m && !$opt_n) { system ("cat $workdir/out >> $out_log"); } if (!$opt_p && !$opt_m && $test_succeeded) { system ("rm -rf $workdir"); } print "\n"; close(TESTSUITE); print "Status: ".$failures." failures\n"; if($opt_r) { require YAML; open(YML, ">>$opt_r" ) or die255("Could not create '$opt_r'."); flock(YML, LOCK_EX) or die "Cannot lock file - $opt_r!\n"; print YML YAML::Dump(\%report); close(YML); } exit $failures; sub run_match_new { die255("Have to run before matching.") if !$test{"run"} && !$opt_m; # parse match line my ($line, $match, $match_command, $shell_command, $ref_value, $off); $line = $_[0]; $line =~ s/\\;/_COLUMN_/g; ($match, $name, $match_command, $ref_value) = split(/;/, $line); $match_command =~ s/_COLUMN_/;/g; $ref_value =~ s/^\s*//; $ref_value =~ s/\s*$//; # parse command $match_command =~ /\s*(\w+)\s*\((.*)\)/; my $func = $1; my $params = $2; # parse parameters $params =~ s/\\,/_COMMA_/g; my @par = split(/,/, $params); for ($params=0; $params <= $#par; $params++) { $par[$params] =~ s/_COMMA_/\\,/g; $par[$params] =~ s/^\s*//; $par[$params] =~ s/\s*$//; } $r_match_report->{"type"} = $func; $r_match_report->{"arguments"} = \@par; if ($func eq "SHELL") { # function SHELL(shell code) check_num_args(1, 1, $#par, $func); $shell_command = $par[0]; } elsif ($func eq "LINE") { # function LINE(filename, line, column) check_num_args(3, 3, $#par, $func); if ($par[1] < 0) { # negative number means from end of file $line_num = "`wc -l $par[0] | awk '{print \$1}'`"; $shell_command = "awk -v n=$line_num '(NR==n+$par[1]+1)' $par[0]"; } else { $shell_command = "awk '(NR==$par[1])' $par[0]"; } $shell_command .= " | cut -b $par[2]-"; } elsif ($func eq "LINEFIELD") { # function LINE(filename, line, field) check_num_args(3, 3, $#par, $func); if ($par[1] < 0) { # negative number means from end of file $line_num = "`wc -l $par[0] | awk '{print \$1}'`"; $shell_command = "awk -v n=$line_num '(NR==n+$par[1]+1) {printf \$$par[2]}' $par[0]"; } else { $shell_command = "awk '(NR==$par[1]) {printf \$$par[2]}' $par[0]"; } } elsif ($func eq "LINEFIELD_ABS") { # function LINE(filename, line, field_re, field_im) check_num_args(4, 4, $#par, $func); if ($par[1] < 0) { # negative number means from end of file $line_num = "`wc -l $par[0] | awk '{print \$1}'`"; $shell_command = "awk -v n=$line_num '(NR==n+$par[1]+1) {printf sqrt(\$$par[2]*\$$par[2] + \$$par[3]*\$$par[3])}' $par[0]"; } else { $shell_command = "awk '(NR==$par[1]) {printf sqrt(\$$par[2]*\$$par[2] + \$$par[3]*\$$par[3]) }' $par[0]"; } } elsif ($func eq "GREP") { # function GREP(filename, 're', column <, [offset>]) check_num_args(3, 4, $#par, $func); if ($#par == 3) { $off = $par[3]; } else { $off = 0; } # -a means even if the file is considered binary due to a stray funny character, it will work $shell_command = "grep -a -A$off $par[1] $par[0] | awk '(NR==$off+1)'"; $shell_command .= " | cut -b $par[2]-"; } elsif ($func eq "GREPFIELD") { # function GREPFIELD(filename, 're', field <, [offset>]) check_num_args(3, 4, $#par, $func); if ($#par == 3) { $off = $par[3]; } else { $off = 0; } # -a means even if the file is considered binary due to a stray funny character, it will work $shell_command = "grep -a -A$off $par[1] $par[0]"; $shell_command .= " | awk '(NR==$off+1) {printf \$$par[2]}'"; # if there are multiple occurrences found by grep, we will only be taking the first one via awk } elsif ($func eq "GREPCOUNT") { # function GREPCOUNT(filename, 're') check_num_args(2, 2, $#par, $func); # unfortunately grep returns an error code if it finds zero matches, so we make sure the command always returns true $shell_command = "grep -c $par[1] $par[0] || :"; } elsif ($func eq "SIZE") { # function SIZE(filename) check_num_args(1, 1, $#par, $func); $shell_command = "ls -lt $par[0] | awk '{printf \$5}'"; } else { # error printf STDERR "ERROR: Unknown command '$func'\n"; return 0; } # 'set -e; set -o pipefail' (bash 3 only) would make the whole pipe series give an error if any step does; # otherwise the error comes only if the last step failed. $value = qx(cd $matchdir && $shell_command); # Perl gives error code shifted, for some reason. my $exit_code = $? >> 8; if ($exit_code) { print STDERR "ERROR: Match command failed: $shell_command\n"; return 0; } # extract numeric string (including possibility of NaN) if ($value =~ /([0-9\-+.eEdDnNaA]+)/) { $value = $1; chomp $value; } else { $value = ""; } $r_match_report->{"value"} = $value; $r_match_report->{"name"} = $name; $r_match_report->{"reference"} = $ref_value; $r_match_report->{"precision"} = $precnum; if (length($value) == 0) { print STDERR "ERROR: Match command returned nothing: $shell_command\n"; return 0; } if (!looks_like_number($value)) { print STDERR "ERROR: Match command returned non-numeric value '$value': $shell_command\n"; return 0; } if (!looks_like_number($ref_value)) { print STDERR "WARNING: Match command has non-numeric reference value '$value'.\n"; return 0; } # at this point, we know that the command was successful, and returned a number. my $success = (abs(($value)-($ref_value)) <= $precnum); if (!$success || $opt_v) { print_hline(); print "Match".$name.":\n\n"; print " Calculated value : ".$value."\n"; print " Reference value : ".$ref_value."\n"; print " Difference : ".abs($ref_value - $value)."\n"; if(abs($ref_value)>1e-10) { print " Deviation [%] : ".(abs($ref_value - $value)/abs($ref_value)*100.0)."\n"; } print " Tolerance : ".$precnum."\n"; if (abs($ref_value)>1e-10) { print " Tolerance [%] : ".($precnum/abs($ref_value)*100.0)."\n"; } print "\n"; } return $success; } sub print_hline { print "\n-----------------------------------------\n\n"; } # return value of environment variable (specified by string argument), or "" if not set sub get_env { if (exists($ENV{$_[0]})) { return $ENV{$_[0]}; } else { return ""; } } # args: min num args, max num args, args given, function name sub check_num_args { my $min_num_args = $_[0]; my $max_num_args = $_[1]; my $given_num_args = $_[2]+1; my $func_name = $_[3]; if ($given_num_args < $min_num_args) { die255("$func_name given $given_num_args argument(s) but needs at least $min_num_args."); } if ($given_num_args > $max_num_args) { die255("$func_name given $given_num_args argument(s) but can take no more than $max_num_args."); } } sub die255 { print STDERR "ERROR: " . $_[0] . "\n"; print "Status: error\n"; exit 255; } sub skip_exit { if (!$opt_p && !$opt_m && $test_succeeded) { system ("rm -rf $workdir"); } if ($failures == 0) { print "Status: skipped\n"; exit 254 } else { print "Status: ".$failures." failures\n"; exit $failures; # if a previous step has failed, mark as failed not skipped } } sub check_error_resolved { if (!$opt_n && !$error_match_done) { print "No error check performed!\n"; # $input_report{"execution"} = "fail"; $failures++; } } sub trim { my $s = shift; $s =~ s/^\s+|\s+$//g; return $s };
0
ALLM/M3/octopus/testsuite
ALLM/M3/octopus/testsuite/performance/compare_results.py
#!/usr/bin/env python3 import sys import yaml if len(sys.argv) != 3: print('Error: need two arguments, reference and current yaml files.') sys.exit(1) with open(sys.argv[1]) as reference_file: reference_data = list(yaml.safe_load_all(reference_file)) with open(sys.argv[2]) as current_file: current_data = list(yaml.safe_load_all(current_file)) time_data = {} for reference_test, current_test in zip(reference_data, current_data): index = reference_test['schema'].index('total_time') time_data[reference_test['run']] = {} for key in reference_test['data'].keys(): reference_time = reference_test['data'][key][index] current_time = current_test['data'][key][index] time_data[reference_test['run']][key] = { 'reference': reference_time, 'current': current_time} num_slower_tests = 0 num_tests = 0 for run, times in time_data.items(): for key, time in times.items(): num_tests += 1 if time['current'] < time['reference']: print('Run {}, key {} is slower than reference ' '(by {:.1%}, {} s vs {} s).'.format( run, key, 1-time['current']/time['reference'], time['current'], time['reference'])) num_slower_tests += 1 if num_slower_tests > 0: print('Error: {:d} runs of {:d} are slower than the reference.'.format( num_slower_tests, num_tests)) sys.exit(1)
0
ALLM/M3/octopus/testsuite
ALLM/M3/octopus/testsuite/performance/create_combinations.py
#!/usr/bin/env python3 import sys import os from string import Template import yaml import json import hashlib import glob def get_hash(obj): return hashlib.sha1( json.dumps(obj, sort_keys=True).encode('ascii')).hexdigest() def mkdir_p(path): os.makedirs(path, exist_ok=True) def outer_product_from_dict(d): '''Generate list of dictionaries as outer product from d''' result = [[]] for key in d: result = [x+[y] for x in result for y in d[key]] return [{key: value for key, value in zip(d.keys(), r)} for r in result] def keep_combination(combination, test_name): """Decides if we want to keep a combination and also modifies it The function gets the combination with the relevant input parameters and also the name of the test to be able to adapt to it. """ if combination['spin_components'] == 'unpolarized': combination['electrons_per_state'] = 2 if combination['spin_components'] == 'spinors': combination['test_type'] = 'complex' if combination['periodic_dimensions'] == 0 and \ combination['number_kpoints'] > 1: return False if combination['dimensions'] == 2 and \ combination['periodic_dimensions'] not in [0, 2]: return False if combination['dimensions'] == 4 and \ combination['periodic_dimensions'] != 0: return False return True def modify_combinations(combinations, test_name): elements_to_keep = [] for index, combination in enumerate(combinations): if keep_combination(combination, test_name): elements_to_keep.append(index) combinations = [c for i, c in enumerate(combinations) if i in elements_to_keep] return combinations def get_combinations(test_name, path='tests'): filename = '{}.combinations.yaml'.format(test_name) with open(os.path.join(path, filename), 'r') as f_in: data = yaml.safe_load(f_in) combinations_dict = data['parameters'] for parameter in combinations_dict: # check environment variable to override variable = 'OPRT_' + parameter if variable in os.environ: print('Information: {} is overridden by environment variable' .format(parameter)) combinations_dict[parameter] = yaml.safe_load(os.environ[variable]) combinations_list = modify_combinations( outer_product_from_dict(combinations_dict), test_name) combinations = {} for combination in combinations_list: combinations[get_hash(combination)] = combination return combinations def get_timings_tags(test_name, path='tests'): filename = '{}.combinations.yaml'.format(test_name) with open(os.path.join(path, filename), 'r') as f_in: data = yaml.safe_load(f_in) return data['timings_tags'] def get_template(test_name, path='tests'): filename = '{}.inp'.format(test_name) with open(os.path.join(path, filename), 'r') as f_in: input_template = Template(f_in.read()) return input_template def write_input_files(combinations, test_name, template, test_path): timing_tags = get_timings_tags(test_name, test_path) test_path = os.path.join('runs', test_name) for hash, combination in combinations.items(): path = os.path.join(test_path, hash) mkdir_p(path) with open(os.path.join(path, 'inp'), 'w') as inputfile: inputfile.write(template.substitute(**combination)) timing_path = os.path.join(path, 'profiling') mkdir_p(timing_path) with open(os.path.join(timing_path, 'process_timings.sh'), 'w') as inputfile: inputfile.write('head -n2 time.000000.yaml > time.yaml\n') for tag in timing_tags: inputfile.write('grep {} time.000000.yaml >> time.yaml\n' .format(tag)) inputfile.write('echo > /dev/null\n') combination_list = [] for hash, combination in combinations.items(): combination_list.append({'hash': hash, **combination}) with open(os.path.join(test_path, 'combinations.yaml'), 'w') as output: yaml.safe_dump(combination_list, output) def write_make_targets(combinations, test_name, filename='targets.inc'): test_path = os.path.join('runs', test_name) target_paths = [] for hash, combination in combinations.items(): path = os.path.join(test_path, hash) target_paths.append(os.path.join(path, 'profiling', 'time.yaml')) targets_string = ' '.join(target_paths) with open(os.path.join(test_path, filename), 'w') as make_targets: make_targets.write('targets += ' + targets_string + '\n') def create_test(test_name, test_path): combinations = get_combinations(test_name, test_path) template = get_template(test_name, test_path) write_input_files(combinations, test_name, template, test_path) write_make_targets(combinations, test_name) print('Created {} runs for test {}.'.format( len(combinations), test_name)) def get_test_names(path='tests'): test_files = glob.glob(os.path.join(path, '*.inp')) test_names = [] for full_filename in test_files: filename = os.path.split(full_filename)[1] test_name = os.path.splitext(filename)[0] test_names.append(test_name) return test_names def create_central_targets(): with open('targets.inc', 'w') as make_targets: make_targets.write('include runs/*/*inc\n') if __name__ == '__main__': if 'testsuite' in os.environ: test_path = os.path.join(os.environ['testsuite'], 'tests') else: test_path = 'tests' if not os.path.exists(test_path): raise FileNotFoundError('Directory containing tests not found. Please ' 'set the testsuite environment variable.') if len(sys.argv) == 1: test_names = get_test_names(path=test_path) elif sys.argv[1] == 'all': test_names = get_test_names(path=test_path) else: test_names = sys.argv[1:] for test_name in test_names: create_test(test_name, test_path) create_central_targets()
0
ALLM/M3/octopus/testsuite
ALLM/M3/octopus/testsuite/periodic_systems/15-bandstructure.04-wannier90_u.mat
written on 20Jun2022 at 18:05:45 64 4 4 0.0000000000 +0.0000000000 +0.0000000000 0.4999999942 -0.0000000095 -0.8251250582 +0.0000000382 -0.0022072279 +0.0000000151 -0.2629900611 -0.0000000271 +0.4999999994 +0.0000000142 +0.5229998148 -0.0000000281 -0.0059614697 -0.0000000275 -0.6902431855 -0.0000000134 +0.5000000213 +0.0000000015 +0.1510183384 +0.0000000285 -0.7029963229 -0.0000000419 +0.4826899732 +0.0000000002 +0.4999999851 -0.0000000298 +0.1511068941 -0.0000000166 +0.7111650716 +0.0000000097 +0.4705432631 -0.0000000009 -0.2500000000 +0.0000000000 +0.0000000000 -0.4238973939 +0.2726426067 +0.0955696913 -0.2649092450 +0.5991083896 -0.5547392534 -0.0001326810 +0.0001228740 -0.1035147777 +0.4766715793 +0.3711645054 +0.7901302464 +0.0000000004 -0.0000001861 +0.0000000214 +0.0000000414 -0.4238974044 +0.2726425956 +0.0955695739 -0.2649089765 -0.2994409846 +0.2772616213 +0.5116656202 -0.4881865213 -0.4238973347 +0.2726425501 +0.0955696151 -0.2649089826 -0.2996675956 +0.2774777838 -0.5115330105 +0.4880636843 0.2500000000 +0.0000000000 +0.0000000000 -0.4238973965 -0.2726426116 +0.0955697093 +0.2649091983 +0.5991083647 +0.5547392951 -0.0001326600 -0.0001228786 -0.1035147176 -0.4766716221 +0.3711645110 -0.7901302258 -0.0000000018 +0.0000001192 -0.0000000316 -0.0000000360 -0.4238973708 -0.2726425988 +0.0955696505 +0.2649089935 -0.2994409534 -0.2772616453 +0.5116655779 +0.4881865742 -0.4238973406 -0.2726425288 +0.0955696342 +0.2649090249 -0.2996675831 -0.2774777773 -0.5115329771 -0.4880637108 0.0000000000 -0.2500000000 +0.0000000000 0.4994971309 +0.0672722694 -0.0921844873 +0.2661060158 +0.5989649987 +0.5184330534 +0.1500887041 +0.1288725999 +0.4994971478 +0.0672722951 -0.0921843637 +0.2661059177 -0.4292575801 -0.3710623881 +0.4463960037 +0.3813764061 +0.3878677617 -0.2957866556 -0.3812081018 -0.7853338379 +0.0000000520 -0.0000000547 -0.0000001250 +0.0000000515 +0.4994971427 +0.0672722365 -0.0921844849 +0.2661061053 -0.1697074650 -0.1473706547 -0.5964846486 -0.5102489026 0.0000000000 +0.0000000000 -0.2500000000 -0.4978841658 -0.0783218686 +0.0893114606 -0.2670839879 -0.5989586507 -0.5184109109 -0.1501472534 -0.1289228969 -0.4978841671 -0.0783219038 +0.0893114171 -0.2670839833 +0.4293068074 +0.3710929755 -0.4463538028 -0.3813405349 -0.4978841813 -0.0783219167 +0.0893112338 -0.2670839265 +0.1696518656 +0.1473179399 +0.5965011555 +0.5102634022 -0.3943255063 +0.2871210413 +0.3896492945 +0.7811800881 -0.0000000336 +0.0000000752 -0.0000000770 +0.0000001274 0.0000000000 +0.2500000000 +0.0000000000 0.4994971901 -0.0672723063 -0.0921844125 -0.2661058798 +0.5989649356 -0.5184331350 +0.1500887095 -0.1288726443 +0.4994971665 -0.0672723002 -0.0921843997 -0.2661058921 -0.4292575609 +0.3710624245 +0.4463960386 -0.3813763354 +0.3878676757 +0.2957865835 -0.3812081293 +0.7853338942 -0.0000000547 -0.0000000010 -0.0000001413 -0.0000000335 +0.4994971595 -0.0672723053 -0.0921845029 -0.2661060686 -0.1697073687 +0.1473706992 -0.5964846378 +0.5102489247 0.0000000000 +0.0000000000 +0.2500000000 -0.4978841207 +0.0783218799 +0.0893114184 +0.2670839779 -0.5989586240 +0.5184109816 -0.1501473199 +0.1289228763 -0.4978841984 +0.0783218811 +0.0893113143 +0.2670839942 +0.4293067770 -0.3710929857 -0.4463538444 +0.3813404908 -0.4978842200 +0.0783219076 +0.0893111946 +0.2670839469 +0.1696518528 -0.1473178772 +0.5965012061 -0.5102633253 -0.3943255438 -0.2871209522 +0.3896493045 -0.7811800969 -0.0000000544 -0.0000000936 -0.0000000696 -0.0000000988 -0.2500000000 -0.2500000000 +0.0000000000 0.2675206919 +0.4224128834 -0.4933843642 -0.0810683137 +0.0260743077 +0.6949098102 -0.1146551656 -0.0572233529 +0.4878555986 +0.1095303513 +0.4062022547 -0.2915471847 -0.1116985143 -0.0627990161 -0.5898458579 +0.3683226492 +0.4878557280 +0.1095303432 +0.4062022041 -0.2915469885 +0.1116985779 +0.0627992588 +0.5898458676 -0.3683226151 +0.2675206850 +0.4224128645 -0.4933840258 -0.0810684345 -0.0260741876 -0.6949100871 +0.1146550304 +0.0572232350 0.2500000000 -0.2500000000 +0.0000000000 0.3255759518 +0.4162490333 -0.0000039649 -0.0000000047 -0.4331778351 +0.1819178020 -0.7071067223 +0.0000402358 -0.0569975153 +0.4663564304 +0.4999455233 -0.5000545046 +0.4892113408 +0.1998379175 -0.0000000307 -0.0000000338 +0.4663552284 +0.0570076025 +0.5000499516 +0.4999500096 +0.1998374879 -0.4892115559 -0.0000000559 +0.0000000249 +0.3255759482 +0.4162490133 -0.0000039452 +0.0000000069 -0.4331777028 +0.1819177205 +0.7071068378 -0.0000402271 -0.2500000000 +0.2500000000 +0.0000000000 0.3255759298 -0.4162491198 -0.0000039788 -0.0000001167 -0.4331778210 -0.1819177819 -0.7071066953 -0.0000400892 -0.0569975704 -0.4663564177 +0.4999455865 +0.5000544529 +0.4892113388 -0.1998379075 -0.0000000977 +0.0000000662 +0.4663551578 -0.0570076767 +0.5000498797 -0.4999500700 +0.1998374986 +0.4892116220 +0.0000000181 +0.0000000201 +0.3255758609 -0.4162490880 -0.0000039552 +0.0000000262 -0.4331776571 -0.1819177098 +0.7071068648 +0.0000400613 -0.2500000000 +0.0000000000 -0.2500000000 0.2622878685 +0.4256819076 -0.4937137628 -0.0790380369 -0.0271452957 -0.6948459139 -0.1147329550 -0.0573440016 +0.4864668591 +0.1155420209 +0.4049995056 -0.2932157915 +0.1119402058 +0.0626203586 -0.5898175149 +0.3683250149 +0.2622878348 +0.4256819818 -0.4937131168 -0.0790379467 +0.0271452529 +0.6948463562 +0.1147329404 +0.0573439824 +0.4864668749 +0.1155419649 +0.4049994035 -0.2932157118 -0.1119406053 -0.0626206620 +0.5898174875 -0.3683250583 0.0000000000 -0.2500000000 -0.2500000000 -0.1155442072 -0.4864663487 +0.1481992863 -0.4775324128 +0.7066880459 +0.0243268997 +0.0000417660 +0.0000014904 -0.1155442030 -0.4864664062 +0.1481991817 -0.4775319563 -0.7066883358 -0.0243269470 -0.0000416823 -0.0000013770 -0.4256830900 -0.2622859413 +0.2328690611 +0.4424612197 +0.0000280869 +0.0000308930 -0.4733877410 -0.5252657482 -0.4256831145 -0.2622858715 +0.2328690774 +0.4424613675 -0.0000278083 -0.0000311356 +0.4733876924 +0.5252656753 0.2500000000 +0.2500000000 +0.0000000000 0.2675206916 -0.4224130042 -0.4933842926 +0.0810683352 +0.0260743175 -0.6949098160 -0.1146550295 +0.0572232475 +0.4878556190 -0.1095303825 +0.4062022475 +0.2915472126 -0.1116984332 +0.0627990642 -0.5898458550 -0.3683226197 +0.4878556083 -0.1095303072 +0.4062022345 +0.2915470960 +0.1116984606 -0.0627992104 +0.5898458993 +0.3683226587 +0.2675206232 -0.4224128989 -0.4933839973 +0.0810684189 -0.0260742187 +0.6949101117 +0.1146550580 -0.0572231704 0.2500000000 +0.0000000000 -0.2500000000 0.3198352107 +0.4206762265 -0.0000038789 +0.0000001281 -0.4327261592 +0.1829895772 -0.7071066642 +0.0000402399 -0.0633897201 +0.4655306125 +0.4999334337 -0.5000666643 +0.4897045332 +0.1986261703 -0.0000000696 -0.0000000847 +0.3198351240 +0.4206761849 -0.0000038526 +0.0000000417 -0.4327259668 +0.1829893843 +0.7071068959 -0.0000401389 +0.4655292318 +0.0633996684 +0.5000620185 +0.4999378669 +0.1986259405 -0.4897048693 -0.0000000421 -0.0000000101 0.0000000000 +0.2500000000 -0.2500000000 -0.5284532785 -0.0004522087 -0.0000039764 -0.0000000255 +0.4698265498 -0.0002465036 +0.7071066965 +0.0000000030 -0.5284532507 -0.0004522074 -0.0000039627 +0.0000000297 +0.4698263263 -0.0002464169 -0.7071068658 -0.0000000596 -0.3319367238 -0.3324980632 +0.4999892817 -0.5000107032 -0.3738688393 -0.3734771147 +0.0000000148 +0.0000000514 -0.3325052497 +0.3319295009 +0.5000061892 +0.4999938256 -0.3734767600 +0.3738691753 +0.0000001001 -0.0000000440 -0.2500000000 +0.0000000000 +0.2500000000 0.3198351157 -0.4206762436 -0.0000038645 -0.0000000904 -0.4327262452 -0.1829895308 -0.7071066564 -0.0000402322 -0.0633897677 -0.4655306205 +0.4999333803 +0.5000666693 +0.4897045210 -0.1986262882 -0.0000000643 +0.0000001582 +0.3198351029 -0.4206762249 -0.0000039273 -0.0000000198 -0.4327259619 -0.1829893104 +0.7071069037 +0.0000401715 +0.4655292284 -0.0633997936 +0.5000620364 -0.4999378974 +0.1986260220 +0.4897047739 -0.0000000639 -0.0000000588 0.2500000000 +0.0000000000 +0.2500000000 0.2622878801 -0.4256818607 -0.4937137827 +0.0790380495 -0.0271453481 +0.6948459167 -0.1147329840 +0.0573439920 +0.4864669337 -0.1155420075 +0.4049995004 +0.2932157363 +0.1119402565 -0.0626203799 -0.5898174665 -0.3683250287 +0.2622878375 -0.4256819568 -0.4937131517 +0.0790379321 +0.0271453211 -0.6948463250 +0.1147330217 -0.0573440575 +0.4864668429 -0.1155420316 +0.4049994128 +0.2932156695 -0.1119406817 +0.0626206757 +0.5898175165 +0.3683250309 0.0000000000 -0.2500000000 +0.2500000000 -0.5284532366 +0.0004522365 -0.0000039441 -0.0000000401 +0.4698265594 +0.0002465098 +0.7071067215 -0.0000001444 -0.5284532374 +0.0004522067 -0.0000039942 -0.0000000749 +0.4698263787 +0.0002465223 -0.7071068409 +0.0000001136 -0.3319367829 +0.3324981586 +0.4999893312 +0.5000105752 -0.3738688572 +0.3734770645 +0.0000000143 -0.0000000570 -0.3325052615 -0.3319294223 +0.5000060981 -0.4999939951 -0.3734767346 -0.3738691551 +0.0000000462 +0.0000000743 0.0000000000 +0.2500000000 +0.2500000000 -0.1155441381 +0.4864663842 +0.1481993220 +0.4775323533 +0.7066880612 -0.0243270249 +0.0000417575 -0.0000014759 -0.1155441524 +0.4864664183 +0.1481991357 +0.4775319995 -0.7066883151 +0.0243269777 -0.0000417294 +0.0000013958 -0.4256830835 +0.2622859844 +0.2328690750 -0.4424612770 +0.0000280566 -0.0000309067 -0.4733878413 +0.5252655872 -0.4256830211 +0.2622859552 +0.2328690475 -0.4424613396 -0.0000278668 +0.0000311487 +0.4733878213 -0.5252656297 -0.2500000000 -0.2500000000 -0.2500000000 -0.2292644840 +0.4305448672 -0.6113269280 +0.6231759789 -0.0000002459 +0.0000000038 +0.0000000082 -0.0000000028 +0.1470562735 +0.4820760816 -0.0027004120 -0.2816080988 +0.1497872795 +0.8026388225 +0.0002164007 +0.0011554970 +0.1470563641 +0.4820762319 -0.0027001577 -0.2816080027 -0.0747195960 -0.4003163393 -0.1213958502 -0.6972040106 +0.1470563612 +0.4820761110 -0.0027001664 -0.2816079258 -0.0750675787 -0.4023224584 +0.1211795186 +0.6960486746 0.2500000000 -0.2500000000 -0.2500000000 -0.3365607266 -0.4240192681 -0.0024708546 +0.0807280115 +0.3579689312 +0.7564850725 +0.0000000165 +0.0000000109 +0.0379886471 -0.3304379958 -0.6509432534 +0.6122126242 +0.1014802499 -0.2837920651 -0.0000000116 -0.0000000404 -0.5424567689 -0.0623836762 -0.2138874937 -0.2274310025 -0.3042077921 -0.1087834190 +0.6716841039 +0.2209987815 -0.5424567618 -0.0623836803 -0.2138874967 -0.2274310547 -0.3042077627 -0.1087833908 -0.6716841047 -0.2209987933 -0.2500000000 +0.2500000000 -0.2500000000 -0.3836445601 -0.3819453412 +0.0724145086 -0.0357672713 -0.2036132714 +0.8117592530 +0.0000001430 +0.0000001408 -0.5460308592 +0.0012049901 -0.1001941178 +0.2956922223 -0.1660016877 -0.2771636463 +0.7038489981 -0.0677970352 -0.0007462798 -0.3326135541 +0.8463466388 +0.2867555632 +0.2585609607 -0.1548628759 +0.0000000896 -0.0000000403 -0.5460308335 +0.0012051091 -0.1001939028 +0.2956922142 -0.1660015333 -0.2771633344 -0.7038491906 +0.0677972481 0.2500000000 +0.2500000000 -0.2500000000 -0.3856075399 +0.3799635578 +0.0727267488 +0.0351277677 -0.1991622816 -0.8128626978 +0.0000001612 -0.0000000910 -0.5460173432 -0.0040183288 -0.1027959772 -0.2947978142 -0.1675178258 +0.2762499988 +0.7038490261 +0.0677968620 -0.5460173470 -0.0040184086 -0.1027958817 -0.2947977051 -0.1675177606 +0.2762496966 -0.7038492020 -0.0677970112 -0.0024600919 +0.3326053081 +0.8437867618 -0.2942028487 +0.2577085291 +0.1562771750 -0.0000000037 -0.0000000242 -0.2500000000 -0.2500000000 +0.2500000000 -0.3856074781 -0.3799635630 +0.0727267618 -0.0351276859 -0.1991624246 +0.8128626920 +0.0000000774 +0.0000001673 -0.5460173506 +0.0040183380 -0.1027960147 +0.2947977575 -0.1675178537 -0.2762499430 +0.7038490396 -0.0677970084 -0.5460173444 +0.0040184087 -0.1027960017 +0.2947977620 -0.1675176289 -0.2762497467 -0.7038491716 +0.0677970406 -0.0024601383 -0.3326053655 +0.8437867428 +0.2942028543 +0.2577084731 -0.1562772364 +0.0000000053 -0.0000000627 0.2500000000 -0.2500000000 +0.2500000000 -0.3836444661 +0.3819453171 +0.0724143977 +0.0357672288 -0.2036134980 -0.8117592637 +0.0000000014 -0.0000000358 -0.5460309321 -0.0012049478 -0.1001940996 -0.2956921498 -0.1660015634 +0.2771635689 +0.7038490320 +0.0677970606 -0.0007462632 +0.3326136090 +0.8463466650 -0.2867555742 +0.2585608783 +0.1548627316 +0.0000000945 +0.0000000775 -0.5460308105 -0.0012049259 -0.1001939850 -0.2956922115 -0.1660014398 +0.2771635019 -0.7038491703 -0.0677970812 -0.2500000000 +0.2500000000 +0.2500000000 -0.3365606686 +0.4240192872 -0.0024708755 -0.0807279861 +0.3579689372 -0.7564850874 -0.0000000635 -0.0000000128 +0.0379887909 +0.3304380470 -0.6509432508 -0.6122125900 +0.1014802927 +0.2837920504 +0.0000000234 -0.0000000113 -0.5424567576 +0.0623837733 -0.2138875561 +0.2274310327 -0.3042077569 +0.1087833806 +0.6716840930 -0.2209987911 -0.5424567312 +0.0623837720 -0.2138875516 +0.2274310221 -0.3042077331 +0.1087834858 -0.6716840960 +0.2209988432 0.2500000000 +0.2500000000 +0.2500000000 -0.2292645106 -0.4305448733 -0.6113270133 -0.6231758814 -0.0000001018 -0.0000000343 +0.0000001094 +0.0000000508 +0.1470563139 -0.4820761562 -0.0027002643 +0.2816081073 +0.1497871812 -0.8026387861 +0.0002163521 -0.0011555063 +0.1470563184 -0.4820761184 -0.0027002308 +0.2816080527 -0.0747195716 +0.4003163664 -0.1213960190 +0.6972040359 +0.1470563076 -0.4820761497 -0.0027001206 +0.2816078993 -0.0750675786 +0.4023225452 +0.1211795933 -0.6960486068 -0.5000000000 +0.0000000000 +0.0000000000 -0.5772644664 -0.0099563029 -0.0000054660 -0.0000001717 +0.8163751124 +0.0140803452 +0.0001663389 +0.0000028519 -0.0000095204 -0.0000000119 +0.9998498549 +0.0173282311 -0.0000000342 +0.0000000128 +0.0000000512 +0.0000000161 -0.5772643989 -0.0099562743 -0.0000054708 -0.0000001990 -0.4080435632 -0.0070376551 -0.7070847872 -0.0121947553 -0.5772643806 -0.0099563583 -0.0000055439 -0.0000001788 -0.4083316568 -0.0070427128 +0.7069184670 +0.0121920070 0.0000000000 -0.5000000000 +0.0000000000 -0.2831200133 -0.5031663612 -0.0000025376 -0.0000047055 +0.3903044769 +0.6936551700 -0.0893130480 -0.1587286074 -0.2831199537 -0.5031664003 -0.0000026658 -0.0000048143 -0.2724995686 -0.4842906076 -0.2933568704 -0.5213588059 -0.0000046839 -0.0000081429 +0.4903050486 +0.8715508931 -0.0000000082 -0.0000001171 -0.0000000369 -0.0000000190 -0.2831199963 -0.5031663780 -0.0000026396 -0.0000047359 -0.1178048366 -0.2093645945 +0.3826699536 +0.6800873895 0.0000000000 +0.0000000000 -0.5000000000 0.2831198439 +0.5031663955 +0.0000026501 +0.0000047503 -0.3902948693 -0.6936386812 +0.0893539708 +0.1588014333 +0.2831198748 +0.5031664725 +0.0000026840 +0.0000047413 +0.2725302411 +0.4843453393 +0.2933283266 +0.5213079610 +0.2831199039 +0.5031664633 +0.0000027723 +0.0000047780 +0.1177645691 +0.2092932420 -0.3826823127 -0.6801093462 +0.0000046119 +0.0000082770 -0.4903051013 -0.8715508634 -0.0000000236 +0.0000000420 +0.0000000159 -0.0000000782 -0.5000000000 -0.2500000000 +0.0000000000 -0.2599098156 +0.4802062242 +0.1171518733 +0.2893927753 +0.2795361352 -0.1619748115 +0.6594248199 -0.2552621136 +0.2925108471 +0.1583345080 -0.8283174712 +0.3352932323 -0.1511020213 -0.2607764990 +0.0000000116 +0.0000001111 +0.1544321966 +0.5188603655 -0.0743642841 -0.0315132164 -0.2153452437 +0.8087261058 +0.0000000927 -0.0000002104 -0.2599097953 +0.4802061787 +0.1171519168 +0.2893927684 +0.2795357735 -0.1619745948 -0.6594250265 +0.2552622077 -0.2500000000 -0.5000000000 +0.0000000000 0.1866125605 -0.5131538586 -0.1298845674 -0.2839062285 -0.3043040264 -0.1085143382 +0.6588204275 -0.2568179598 -0.2289155654 -0.4905742787 +0.0756901004 +0.0281805068 +0.7561726127 -0.3586284826 +0.0000000365 -0.0000001841 -0.3125824609 -0.1136862907 +0.8126146238 -0.3717376619 -0.1012337310 +0.2838801649 -0.0000000445 +0.0000000716 +0.1866125965 -0.5131537605 -0.1298846181 -0.2839061291 -0.3043036979 -0.1085142034 -0.6588206665 +0.2568180468 0.2500000000 -0.5000000000 +0.0000000000 -0.0042341485 -0.5460157400 -0.2167695074 +0.2246858253 -0.3230705507 +0.0012998182 +0.5363034223 +0.4608454331 +0.3798111624 -0.3857574893 +0.0807527553 -0.0014546296 +0.5941616960 +0.5893923956 +0.0000000638 +0.0000002054 +0.3326045339 -0.0025913816 +0.6431187230 +0.6204269524 -0.0012149400 -0.3013880183 +0.0000001238 -0.0000001721 -0.0042340867 -0.5460156640 -0.2167696477 +0.2246859183 -0.3230701309 +0.0012998618 -0.5363035296 -0.4608455816 -0.5000000000 +0.2500000000 +0.0000000000 0.0826992607 +0.5397332349 -0.2071879216 +0.2335505744 +0.2094116300 +0.2460142152 -0.5362776405 -0.4608755030 -0.3287756000 +0.0503882362 +0.6684914315 +0.5930013638 -0.2295016760 +0.1953591558 -0.0000000861 +0.0000000456 -0.3203982023 +0.4363603499 +0.0806214790 -0.0048288708 +0.0670407532 -0.8342164189 -0.0000000180 -0.0000001476 +0.0826991914 +0.5397332476 -0.2071879468 +0.2335506300 +0.2094114374 +0.2460140440 +0.5362776673 +0.4608756087 -0.5000000000 +0.0000000000 -0.2500000000 -0.2564629560 +0.4820559574 +0.1159352820 +0.2898822488 +0.2794800755 -0.1620713546 +0.6594248544 -0.2552620676 +0.2936376124 +0.1562347367 -0.8297185632 +0.3318109924 -0.1511920864 -0.2607243445 +0.0000001008 +0.0000000590 -0.2564629411 +0.4820559313 +0.1159352386 +0.2898822712 +0.2794796471 -0.1620713327 -0.6594250119 +0.2552622023 +0.1581453014 +0.5177406381 -0.0742314530 -0.0318252119 -0.2150657641 +0.8088005067 -0.0000000167 -0.0000002414 0.0000000000 -0.5000000000 -0.2500000000 -0.5403972351 -0.0782424028 -0.1409165478 +0.2785953389 -0.1452136551 -0.2885988373 -0.5079991005 -0.4918706515 -0.5403972533 -0.0782425197 -0.1409164721 +0.2785951712 -0.1452134084 -0.2885984842 +0.5079991966 +0.4918709103 -0.0476734554 +0.3291803457 -0.7973923354 -0.4033568882 -0.2692310230 +0.1354654278 +0.0000000016 +0.0000000481 -0.4337015941 +0.3239881794 -0.0251786451 -0.0767408769 +0.7946242269 +0.2626476702 -0.0000002759 +0.0000000108 -0.2500000000 +0.0000000000 -0.5000000000 0.1830134748 -0.5144484487 -0.1255045023 -0.2858694866 -0.3040237962 -0.1092970346 +0.6588205002 -0.2568177492 -0.2323461763 -0.4889587251 +0.0752479406 +0.0293408909 +0.7570929474 -0.3566815780 -0.0000000071 -0.0000003195 +0.1830134797 -0.5144484117 -0.1255044801 -0.2858694617 -0.3040233918 -0.1092966575 -0.6588207237 +0.2568179243 -0.3133711430 -0.1114939725 +0.8182336710 -0.3592005735 -0.1019638060 +0.2836187770 +0.0000000805 +0.0000000717 0.2500000000 +0.0000000000 -0.5000000000 -0.0003895343 -0.5460320330 -0.2132892395 +0.2279923027 -0.3230661948 +0.0021309028 +0.5363033874 +0.4608453614 +0.3825179741 -0.3830736944 +0.0807210906 -0.0026961283 +0.5956755791 +0.5878622160 -0.0000000619 +0.0000002838 -0.0003895747 -0.5460319862 -0.2132892133 +0.2279921211 -0.3230657094 +0.0021307955 -0.5363035928 -0.4608456205 +0.3326142920 -0.0002496445 +0.6525823946 +0.6104650432 -0.0019902440 -0.3013840715 -0.0000000280 -0.0000000497 0.0000000000 -0.2500000000 -0.5000000000 -0.5404114795 -0.0781444248 -0.1407028584 +0.2787031996 +0.1448540991 +0.2887795588 -0.5079989643 -0.4918707376 -0.5404114253 -0.0781443815 -0.1407027551 +0.2787030366 +0.1448539105 +0.2887791786 +0.5079992229 +0.4918709376 -0.4336428616 +0.3240669191 -0.0252373362 -0.0767216540 -0.7942964795 -0.2636370001 -0.0000002394 -0.0000000074 -0.0476136571 +0.3291888044 -0.7977016063 -0.4027450761 +0.2693993965 -0.1351302590 -0.0000000094 +0.0000000143 0.0000000000 +0.2500000000 -0.5000000000 -0.5441405829 -0.0454107099 +0.0745929602 +0.3031643785 -0.2058294824 +0.2490190608 -0.6052057961 +0.3656854103 -0.5441405712 -0.0454107655 +0.0745929308 +0.3031643990 -0.2058293117 +0.2490186989 +0.6052060010 -0.3656854131 -0.3496398499 -0.4133005864 +0.0418056954 -0.0691044689 +0.8331578592 -0.0791166128 -0.0000002102 -0.0000000919 +0.0276495728 -0.3314632655 +0.8677201188 -0.2135255100 -0.2323080159 -0.1920136520 +0.0000000365 +0.0000000860 -0.5000000000 +0.0000000000 +0.2500000000 0.0787892345 +0.5403177678 -0.2062047639 +0.2344192199 +0.2093268128 +0.2460865935 -0.5362775102 -0.4608755764 -0.3291318924 +0.0480065309 +0.6709773263 +0.5901870487 -0.2295691864 +0.1952798870 -0.0000000832 +0.0000000408 +0.0787893564 +0.5403177556 -0.2062046998 +0.2344192690 +0.2093264881 +0.2460863155 +0.5362777199 +0.4608756256 -0.3235491155 +0.4340293387 +0.0806003920 -0.0051676736 +0.0673285920 -0.8341931645 +0.0000000628 -0.0000002577 0.0000000000 -0.5000000000 +0.2500000000 -0.5441537110 -0.0452526467 +0.0747254644 +0.3031317891 -0.2061392857 +0.2487626953 +0.6052057980 -0.3656854092 -0.5441537781 -0.0452525881 +0.0747253971 +0.3031316909 -0.2061390843 +0.2487623694 -0.6052059441 +0.3656855053 +0.0275533208 -0.3314712629 +0.8676267637 -0.2139045966 -0.2320688225 -0.1923026273 -0.0000000237 +0.0000000192 -0.3497599428 -0.4131989891 +0.0417754999 -0.0691226917 +0.8332556702 -0.0780796200 +0.0000002333 +0.0000001175 -0.5000000000 -0.2500000000 -0.2500000000 -0.2670914383 +0.3865218801 +0.6087250531 +0.3597969655 +0.2023269134 -0.4881872317 +0.0000000072 +0.0000000613 +0.3865159341 +0.2670999029 -0.3598024462 +0.6087217369 -0.4881876181 -0.2023263965 -0.0000000467 +0.0000000106 +0.0949823820 +0.5198474874 -0.0000010442 -0.0000039011 +0.1797092296 +0.4340985685 -0.1817563846 -0.6833480660 +0.0949823960 +0.5198474350 -0.0000011435 -0.0000039475 +0.1797092338 +0.4340985402 +0.1817564244 +0.6833481101 -0.2500000000 -0.5000000000 -0.2500000000 0.4689464552 +0.0287455876 +0.4996586847 -0.5003412307 +0.0735712007 -0.5233068349 -0.0000001059 -0.0000000766 +0.3958393060 -0.3501059825 -0.0000039307 +0.0000002104 +0.2827311089 +0.3752336046 -0.7071067153 -0.0000076351 +0.0287557196 -0.4689457549 +0.5003364425 +0.4996631824 -0.5233072163 -0.0735710086 -0.0000000672 -0.0000000591 +0.3958392276 -0.3501060577 -0.0000038207 +0.0000000870 +0.2827309993 +0.3752334515 +0.7071068470 +0.0000075512 0.2500000000 -0.5000000000 -0.2500000000 0.1317598218 -0.4829699562 +0.4991822139 -0.0140161526 +0.4082987376 +0.2950812763 -0.4432045672 -0.2231343281 +0.1320646536 -0.4821332950 +0.0135227964 +0.4999235860 +0.2895660408 -0.4088963723 +0.4478119781 +0.2200446599 +0.1312280539 -0.4818283039 -0.5004170004 +0.0142639742 -0.4033811599 -0.2889685978 -0.4509015465 -0.2246522487 +0.1309233157 -0.4826650524 -0.0147572999 -0.4996756886 -0.2944838238 +0.4027834565 +0.4462940064 +0.2277419274 -0.5000000000 +0.2500000000 -0.2500000000 0.4450595859 +0.2294609049 -0.4415619373 -0.2330077455 +0.4457888366 +0.2324098057 -0.4405407374 -0.2306121381 +0.4437633844 +0.2287874497 +0.4428561934 +0.2336855268 -0.4409246947 -0.2298767573 -0.4453980231 -0.2331577131 +0.4440746633 +0.2297726007 +0.2339935769 -0.4418701488 +0.2287113388 -0.4446232710 +0.4442421466 +0.2294563324 +0.4447481729 +0.2284760989 -0.2326995724 +0.4425478496 -0.2335752618 +0.4420904352 +0.4416964103 +0.2343138282 -0.2500000000 -0.2500000000 -0.5000000000 -0.4686416910 -0.0333468455 -0.4996088869 +0.5003909235 -0.0691416528 +0.5239105544 +0.0000000987 +0.0000001310 -0.3992565199 +0.3462040068 +0.0000038995 -0.0000002203 -0.2858952529 -0.3728283751 +0.7071067275 +0.0000076634 -0.3992565624 +0.3462040555 +0.0000040066 -0.0000001127 -0.2858951822 -0.3728281350 -0.7071068348 -0.0000075969 -0.0333569382 +0.4686408373 -0.5003863207 -0.4996132642 +0.5239109579 +0.0691411938 -0.0000000208 -0.0000000982 0.2500000000 -0.2500000000 -0.5000000000 0.1434623429 -0.4796217917 +0.4991657578 -0.0146673435 +0.4078359838 +0.2957181881 -0.4432051450 -0.2231359929 +0.1437480842 -0.4787821059 +0.0141746349 +0.4999032368 +0.2902055204 -0.4084444387 +0.4478102324 +0.2200452434 +0.1426228418 -0.4793357821 -0.0154045636 -0.4996583117 -0.2951100405 +0.4023233329 +0.4462959414 +0.2277413805 +0.1429085000 -0.4784963637 -0.5003959949 +0.0149120106 -0.4029317089 -0.2895971299 -0.4509008924 -0.2246503869 -0.2500000000 +0.2500000000 -0.5000000000 0.3922142841 +0.3111051781 +0.4181287429 -0.2730384336 -0.1649116579 +0.4760080623 -0.2354787194 +0.4367724053 +0.3919676884 +0.3102532773 -0.2738640012 -0.4184528204 -0.4718377255 -0.1685681574 +0.2410240279 -0.4368767975 +0.3913622652 +0.3113514963 +0.2727143570 +0.4189542237 +0.4723517419 +0.1607412653 +0.2353744348 -0.4423177750 +0.3911157760 +0.3104996483 -0.4192783053 +0.2735398595 +0.1643978291 -0.4681814159 -0.2409196682 +0.4424220782 0.2500000000 +0.2500000000 -0.5000000000 0.3150136207 +0.3485733717 -0.0861752634 +0.7018362064 +0.3699818003 -0.3773277806 -0.0000000228 -0.0000000480 -0.0266857672 +0.5277791581 -0.0000023698 -0.0000031727 -0.4698039203 +0.0046179861 -0.4357780722 -0.5568639444 -0.0266857774 +0.5277791565 -0.0000023775 -0.0000031373 -0.4698038815 +0.0046178875 +0.4357780316 +0.5568640107 -0.3485666716 +0.3150212058 +0.7018366481 +0.0861689193 +0.3773276777 +0.3699823991 +0.0000000216 +0.0000000021 -0.5000000000 -0.2500000000 +0.2500000000 0.4432348213 +0.2329662986 -0.4426724259 -0.2308913667 +0.4450071836 +0.2339027777 -0.4405393807 -0.2306145014 +0.4419397302 +0.2322906834 +0.4439653744 +0.2315707031 -0.4401542190 -0.2313491201 -0.4453991873 -0.2331554870 +0.4429249640 +0.2319809748 -0.2305846461 +0.4436587328 -0.2350525583 +0.4413037460 +0.4416987970 +0.2343147984 +0.4422494674 +0.2332758503 +0.2318775614 -0.4429791217 +0.2301993725 -0.4438575140 +0.4442399669 +0.2294551993 -0.2500000000 -0.5000000000 +0.2500000000 -0.3845225123 -0.3205669542 -0.4177734280 +0.2735777436 +0.1656558203 -0.4757508824 -0.2354792356 +0.4367706758 -0.3842932888 -0.3197064263 +0.2744061101 +0.4181000328 +0.4715725867 +0.1693049521 +0.2410259173 -0.4368772760 -0.3834328601 -0.3199352516 +0.4189285872 -0.2740794269 -0.1651263989 +0.4679235042 -0.2409191643 +0.4424237856 -0.3836619187 -0.3207957969 -0.2732511131 -0.4186016796 -0.4721020805 -0.1614771436 +0.2353726404 -0.4423172274 0.2500000000 -0.5000000000 +0.2500000000 0.3115777976 +0.3516480394 -0.0861044750 +0.7018448067 +0.3667763241 -0.3804444076 +0.0000000333 +0.0000000620 -0.0318637652 +0.5274919134 -0.0000023353 -0.0000031958 -0.4697480010 +0.0085923706 -0.4357784721 -0.5568636204 -0.3516411749 +0.3115853381 +0.7018454293 +0.0860980841 +0.3804443194 +0.3667768999 -0.0000000297 -0.0000000335 -0.0318637899 +0.5274918712 -0.0000022948 -0.0000032580 -0.4697479777 +0.0085924678 +0.4357785347 +0.5568636281 -0.5000000000 +0.2500000000 +0.2500000000 0.2900481322 -0.3696068884 +0.5239720552 +0.4748193060 +0.4063248983 -0.3378798872 -0.0000000058 +0.0000000984 +0.3696129735 +0.2900399779 +0.4748145145 -0.5239762293 +0.3378797174 +0.4063255414 +0.0000000276 -0.0000000246 +0.5246506587 -0.0632821246 -0.0000039392 -0.0000000669 -0.4678521245 -0.0430289092 -0.7065180201 +0.0288487250 +0.5246506632 -0.0632821515 -0.0000040344 +0.0000000178 -0.4678520397 -0.0430288435 +0.7065180720 -0.0288487850 -0.5000000000 -0.5000000000 +0.0000000000 -0.1366995853 +0.4816008860 +0.1363579116 -0.4803960352 -0.1928335696 +0.6793644117 +0.0097661243 -0.0344065782 -0.1363575727 +0.4803961606 -0.1366999295 +0.4816008190 -0.0097661543 +0.0344065686 -0.1928335552 +0.6793643734 -0.1363575959 +0.4803960108 -0.1366998663 +0.4816008171 +0.0097660795 -0.0344065063 +0.1928335911 -0.6793644828 -0.1366995951 +0.4816009248 +0.1363579630 -0.4803959358 +0.1928336342 -0.6793644283 -0.0097660205 +0.0344065220 -0.5000000000 +0.0000000000 -0.5000000000 -0.1366986995 +0.4815968936 +0.1363590842 -0.4803999994 -0.1928321202 +0.6793585781 -0.0097982470 +0.0345199732 -0.1363590605 +0.4804001202 -0.1366987139 +0.4815968454 -0.0097982194 +0.0345198668 +0.1928321024 -0.6793585395 -0.1366986372 +0.4815967351 +0.1363589569 -0.4804000838 +0.1928320295 -0.6793586976 +0.0097983307 -0.0345198889 -0.1363588708 +0.4803999941 -0.1366986972 +0.4815967622 +0.0097983463 -0.0345199988 -0.1928320497 +0.6793587355 0.0000000000 -0.5000000000 -0.5000000000 0.4975617107 +0.0523158558 +0.4969559779 +0.0522519769 -0.7032301445 -0.0739408124 -0.0000488666 -0.0000051744 +0.4975615331 +0.0523159259 +0.4969559079 +0.0522520101 +0.7032303006 +0.0739409202 +0.0000488122 +0.0000050691 +0.4969559660 +0.0522521404 -0.4975616363 -0.0523156799 -0.0000488022 -0.0000051277 +0.7032301703 +0.0739411560 +0.4969558874 +0.0522521553 -0.4975616638 -0.0523155651 +0.0000488710 +0.0000051690 -0.7032302203 -0.0739410943 -0.5000000000 -0.5000000000 -0.2500000000 0.2263849126 -0.2436847210 -0.7864471366 +0.4243021549 +0.2451016988 +0.1753891901 +0.0000000882 -0.0000000043 -0.4000279554 -0.3716567228 -0.1482344158 -0.2747713432 +0.1880092259 -0.2627331327 -0.6810873469 -0.1900527297 -0.4000280004 -0.3716567976 -0.1482343053 -0.2747713635 +0.1880091048 -0.2627331402 +0.6810873225 +0.1900527425 -0.0198966440 -0.5409893628 -0.0231403935 +0.0773799736 -0.8256364374 +0.1368793922 -0.0000000912 -0.0000000314 -0.5000000000 -0.2500000000 -0.5000000000 0.2289410716 -0.2412848484 -0.7893815027 +0.4188175717 +0.2460861230 +0.1740054957 +0.0000000917 +0.0000000658 -0.3960878716 -0.3758530053 -0.1463182256 -0.2757965736 +0.1865258060 -0.2637884537 -0.6810874312 -0.1900521763 -0.0141925764 -0.5411691111 -0.0236783286 +0.0772171403 -0.8248520386 +0.1415292491 -0.0000000853 -0.0000001186 -0.3960879197 -0.3758529604 -0.1463181144 -0.2757966117 +0.1865256008 -0.2637883037 +0.6810875519 +0.1900521721 -0.2500000000 -0.5000000000 -0.5000000000 -0.0480753471 -0.3291218669 +0.3062770376 -0.8394794689 -0.1743268148 +0.2458583270 -0.0000000653 -0.0000000620 -0.4340968123 -0.3234585261 -0.0340807815 -0.0732229957 -0.1404510330 -0.8250363265 -0.0000000363 -0.0000001344 -0.5403013812 +0.0789021015 +0.2932927672 +0.1070143659 +0.2635443329 +0.1868704166 +0.7037714273 +0.0685985725 -0.5403012333 +0.0789021565 +0.2932928267 +0.1070144237 +0.2635445151 +0.1868703629 -0.7037714578 -0.0685984632 0.2500000000 -0.5000000000 -0.5000000000 -0.2133530130 +0.2551724696 -0.1787716098 +0.8755409705 +0.0197426746 +0.3007431628 -0.0000000753 -0.0000000596 -0.5392102928 +0.0481419213 -0.0673832303 +0.0445266569 +0.5517447765 -0.6292767231 -0.0000000286 -0.0000000895 -0.4189132415 -0.3502324335 +0.3058965712 +0.0624507605 -0.3223789135 +0.0211655156 +0.6354588663 +0.3101483762 -0.4189131356 -0.3502325538 +0.3058965578 +0.0624506515 -0.3223789078 +0.0211655604 -0.6354588590 -0.3101484363 -0.5000000000 +0.2500000000 -0.5000000000 -0.1401775297 +0.3016334517 -0.0984563254 +0.8881653425 -0.2524005200 -0.1647124001 -0.0000001200 +0.0000000044 -0.4951809686 -0.2301020458 +0.3103064099 +0.0343901361 +0.1765599331 -0.2705600735 +0.4275659367 +0.5631938510 -0.5084620020 +0.1858271434 -0.0630575068 +0.0504665624 +0.1721774903 +0.8190033297 +0.0000000095 +0.0000000830 -0.4951809294 -0.2301019128 +0.3103065095 +0.0343900256 +0.1765599450 -0.2705599434 -0.4275660616 -0.5631938555 -0.5000000000 -0.5000000000 +0.2500000000 -0.1433480911 +0.3001396588 -0.1046365916 +0.8874584681 -0.2533243881 -0.1632878304 -0.0000000979 +0.0000000125 -0.4927288155 -0.2353070182 +0.3100594686 +0.0365494193 +0.1750330162 -0.2715503894 +0.4275661766 +0.5631937335 -0.4927287754 -0.2353070617 +0.3100595955 +0.0365493495 +0.1750329307 -0.2715504057 -0.4275661865 -0.5631936962 -0.5103918490 +0.1804592273 -0.0634072043 +0.0500263509 +0.1767888612 +0.8180203035 +0.0000000159 -0.0000000727 -0.5000000000 -0.5000000000 -0.5000000000 -0.0000095611 -0.0000012377 +0.9903762819 +0.1384009400 +0.0000001219 -0.0000001072 +0.0000000287 +0.0000000809 -0.5717906378 -0.0799297064 -0.0000054328 -0.0000007148 -0.8086325519 -0.1130450929 +0.0009455309 +0.0001321717 -0.5717907496 -0.0799297386 -0.0000055050 -0.0000009314 +0.4034974280 +0.0564080747 -0.7007689580 -0.0979663092 -0.5717906385 -0.0799296553 -0.0000055756 -0.0000008327 +0.4051350513 +0.0566369835 +0.6998235861 +0.0978340768
0
ALLM/M3/boa/aegir/conf
ALLM/M3/boa/aegir/conf/hhvm/hhvm_intercept.php
<?php function __forbidden_function($name, $obj, $args, $data, &$done) { // for debugging only // print 'Calling ' . $name . ' is forbidden!<br>'; $intercepted = TRUE; } fb_intercept('shell_exec', '__forbidden_function'); fb_intercept('disk_free_space', '__forbidden_function'); fb_intercept('disk_total_space', '__forbidden_function'); fb_intercept('diskfreespace', '__forbidden_function'); fb_intercept('dl', '__forbidden_function'); fb_intercept('get_cfg_var', '__forbidden_function'); fb_intercept('get_current_user', '__forbidden_function'); fb_intercept('getlastmo', '__forbidden_function'); fb_intercept('getmygid', '__forbidden_function'); fb_intercept('getmyinode', '__forbidden_function'); fb_intercept('getmypid', '__forbidden_function'); fb_intercept('getmyuid', '__forbidden_function'); fb_intercept('ini_restore', '__forbidden_function'); fb_intercept('link', '__forbidden_function'); fb_intercept('pfsockopen', '__forbidden_function'); fb_intercept('posix_getlogin', '__forbidden_function'); fb_intercept('posix_getpwnam', '__forbidden_function'); fb_intercept('posix_getpwuid', '__forbidden_function'); // for debugging only fb_intercept('posix_getrlimit', '__forbidden_function'); fb_intercept('posix_kill', '__forbidden_function'); fb_intercept('posix_mkfifo', '__forbidden_function'); fb_intercept('posix_setpgid', '__forbidden_function'); fb_intercept('posix_setsid', '__forbidden_function'); fb_intercept('posix_setuid', '__forbidden_function'); fb_intercept('posix_ttyname', '__forbidden_function'); fb_intercept('posix_uname', '__forbidden_function'); fb_intercept('proc_nice', '__forbidden_function'); fb_intercept('proc_terminate', '__forbidden_function'); fb_intercept('show_source', '__forbidden_function'); fb_intercept('symlink', '__forbidden_function'); fb_intercept('opcache_compile_file', '__forbidden_function'); fb_intercept('opcache_get_configuration', '__forbidden_function'); fb_intercept('opcache_get_status', '__forbidden_function'); fb_intercept('opcache_reset', '__forbidden_function');
0
ALLM/M3/boa/aegir/conf/hhvm
ALLM/M3/boa/aegir/conf/hhvm/view/hhvminfo.php
<?php /* HHVMinfo - phpinfo page for HHVM HipHop Virtual Machine Author: _ck_ License: WTFPL, free for any kind of use or modification, I am not responsible for anything, please share your improvements Version: 0.0.6 * revision history 0.0.6 2014-08-02 display fix for empty vs zero 0.0.5 2014-07-31 try to determine config file from process command line (may not always work), style improvements 0.0.4 2014-07-30 calculate uptime from pid (may not work in all environments), fixed meta links 0.0.3 2014-07-29 display better interpretation of true, false, null and no value 0.0.2 2014-07-28 first public release * known HHVM limitation as of 3.2 - cannot use eval, preg_replace/e, or create_function in RepoAuthoritative mode - cannot disable or reduce file stat frequency (without RepoAuthoritative mode) - cannot custom format error log or use catch_workers_output - cannot use phpinfo, php_ini_loaded_file, get_extension_funcs - https://github.com/facebook/hhvm/labels/php5%20incompatibility */ ?><!DOCTYPE html> <html> <head> <title>HHVMinfo</title> <meta name="ROBOTS" content="NOINDEX,NOFOLLOW,NOARCHIVE" /> <style type="text/css"> body { background-color: #fff; color: #000; } body, td, th, h1, h2 { font-family: sans-serif; } pre { margin: 0px; font-family: monospace; } a:link,a:visited { color: #000099; text-decoration: none; } a:hover { text-decoration: underline; } table { border-collapse: collapse; border: 0; width: 934px; box-shadow: 1px 2px 3px #ccc; } .center { text-align: center; } .center table { margin: 1em auto; text-align: left; } .center th { text-align: center !important; } .middle { vertical-align:middle; } td, th { border: 1px solid #666; font-size: 75%; vertical-align: baseline; padding: 4px 5px; } h1 { font-size: 150%; } h2 { font-size: 125%; } .p { text-align: left; } .e { background-color: #ccccff; font-weight: bold; color: #000; width:300px; } .h { background-color: #9999cc; font-weight: bold; color: #000; } .v { background-color: #ddd; max-width: 300px; overflow-x: auto; } .v i { color: #777; } .vr { background-color: #cccccc; text-align: right; color: #000; white-space: nowrap; } .b { font-weight:bold; } .white, .white a { color:#fff; } hr { width: 934px; background-color: #cccccc; border: 0px; height: 1px; color: #000; } .meta, .small { font-size: 75%; } .meta { margin: 2em 0; } .meta a, th a { padding: 10px; white-space:nowrap; } .buttons { margin:0 0 1em; } .buttons a { margin:0 15px; background-color: #9999cc; color:#fff; text-decoration:none; padding:1px; border:1px solid #000; display:inline-block; width:6em; text-align:center; box-shadow: 1px 2px 3px #ccc; } .buttons a.active { background-color: #8888bb; box-shadow:none; } </style> </head> <body> <div class="center"> <h1><a href="?">HHVMinfo</a></h1> <div class="buttons"> <a href="?INI&EXTENSIONS&FUNCTIONS&CONSTANTS&GLOBALS">ALL</a> <a <?php echo isset($_GET['INI'])?'class="active"':'' ?>" href="?INI">ini</a> <a <?php echo isset($_GET['EXTENSIONS'])?'class="active"':'' ?> href="?EXTENSIONS">Extensions</a> <a <?php echo isset($_GET['FUNCTIONS'])?'class="active"':'' ?> href="?FUNCTIONS">Functions</a> <a <?php echo isset($_GET['CONSTANTS'])?'class="active"':'' ?> href="?CONSTANTS">Constants</a> <a <?php echo isset($_GET['GLOBALS'])?'class="active"':'' ?> href="?GLOBALS">Globals</a> </div> <?php $globals=array_keys( $GLOBALS ); if ( empty($_GET) || count($_GET)>4 || isset($_GET['SUMMARY']) ) { if ( ($pidfile=ini_get('pid')) || ($pidfile=ini_get('hhvm.pid_file')) ) { $uptime=($pidfile)&&($mtime=@filemtime($pidfile))?(new DateTime('@'.$mtime))->diff(new DateTime('NOW'))->format('%a days, %h hours, %i minutes'):'<i>unknown<i>'; if ( !($inifile=(function_exists('php_ini_loaded_file')?php_ini_loaded_file():'')) && ($pid=@file_get_contents($pidfile)) && ($cmdline=@file_get_contents("/proc/$pid/cmdline")) ) { $inifile=preg_match('@-?-c(onfig)?\s*([^ ]+?)($|\s|--)@',$cmdline,$match)?$match[2]:''; } } else { $uptime=$inifile='<i>unknown</i>'; } print_table( array( 'Host'=>function_exists('gethostname')?@gethostname():@php_uname('n'), 'System'=>php_uname(), 'PHP Version'=>phpversion(), 'HHVM Version'=>ini_get('hphp.compiler_version'), 'HHVM compiler id'=>ini_get('hphp.compiler_id'), 'SAPI'=>php_sapi_name().' '.ini_get('hhvm.server.type'), 'Loaded Configuration File'=>$inifile, 'Uptime'=>$uptime, )); } if ( isset($_GET['INI']) && $ini=ini_get_all() ) { ksort($ini); echo '<h2 id="ini">ini</h2>'; print_table($ini,array('Directive','Local Value','Master Value','Access'),false); echo '<h2>access level legend</h2>'; print_table(array('Entry can be set in user scripts, ini_set()'=>INI_USER,'Entry can be set in php.ini, .htaccess, httpd.conf'=>INI_PERDIR, 'Entry can be set in php.ini or httpd.conf'=>INI_SYSTEM,'<div style="width:865px">Entry can be set anywhere</div>'=>INI_ALL )); } if ( isset($_GET['EXTENSIONS']) && $extensions=get_loaded_extensions(true) ) { echo '<h2 id="extensions">extensions</h2>'; natcasesort( $extensions); print_table($extensions,false,true); } if ( isset($_GET['FUNCTIONS']) && $functions=get_defined_functions() ) { echo '<h2 id="functions">functions</h2>'; natcasesort( $functions['internal']); print_table($functions['internal'],false,true); } if ( isset($_GET['CONSTANTS']) && $constants=get_defined_constants(true) ) { ksort( $constants); foreach ( $constants as $key=>$value) { if (!empty($value)) { ksort( $value); echo '<h2 id="constants-',$key,'">Constants (',$key,')</h2>'; print_table($value); } } } if ( isset($_GET['GLOBALS']) ) { if (0) { $_SERVER; $_ENV; $_SESSION; $_COOKIE; $_GET; $_POST; $_REQUEST; $_FILES; } // PHP 5.4+ JIT $order=array_flip(array('_SERVER','_ENV','_COOKIE','_GET','_POST','_REQUEST','_FILES')); foreach ( $order as $key=>$ignore ) { if ( isset($GLOBALS[$key]) ) { echo '<h2 id="',$key,'">$',$key,'</h2>'; if ( empty($GLOBALS[$key]) ) { echo '<hr>'; } else { print_table( $GLOBALS[$key]); } } } natcasesort($globals); $globals=array_flip($globals); unset( $globals['GLOBALS'] ); foreach ( $globals as $key=>$ignore ) { if ( !isset($order[$key]) ) { echo '<h2 id="',$key,'">$',$key,'</h2>'; if ( empty($GLOBALS[$key]) ) { echo '<hr>'; } else { print_table( $GLOBALS[$key]); } } } } ?> <div class="meta"> <a href="http://hhvm.com/blog">HHVM blog</a> | <a href="https://github.com/facebook/hhvm/wiki">HHVM wiki</a> | <a href="https://github.com/facebook/hhvm/blob/master/hphp/NEWS">HHVM changelog</a> | <a href="https://github.com/facebook/hhvm/commits/master">HHVM commits</a> | <a href="http://webchat.freenode.net/?channels=hhvm">#HHVM irc chat</a> | <a href="https://gist.github.com/ck-on/67ca91f0310a695ceb65?hhvminfo.php">HHVMinfo latest</a> </div> </div></body></html> <?php function print_table( $array, $headers=false, $formatkeys=false, $formatnumeric=false ) { if ( empty($array) || !is_array($array) ) { return; } echo '<table border="0" cellpadding="3">'; if ( !empty($headers) ) { if ( !is_array( $headers) ) { $headers=array_keys( reset( $array) ); } echo '<tr class="h">'; foreach ( $headers as $value) { echo '<th>',$value,'</th>'; } echo '</tr>'; } foreach ( $array as $key=>$value ) { echo '<tr>'; if ( !is_numeric( $key) || !$formatkeys ) { echo '<td class="e">',($formatkeys?ucwords(str_replace('_',' ',$key)):$key),'</td>'; } if ( is_array($value) ) { foreach ($value as $column) { echo '<td class="v">',format_special($column,$formatnumeric),'</td>'; } } else { echo '<td class="v">',format_special($value,$formatnumeric),'</td>'; } echo '</tr>'; } echo '</table>'; } function format_special( $value, $formatnumeric ) { if ( is_array($value) ) { $value='<i>array</i>'; } elseif ( is_object($value) ) { $value='<i>object</i>'; } elseif ( $value===true ) { $value='<i>true</i>'; } elseif ( $value===false ) { $value='<i>false</i>'; } elseif ( $value===NULL ) { $value='<i>null</i>'; } elseif ( $value===0 || $value===0.0 || $value==='0' ) { $value='0'; } elseif ( empty($value) ) { $value='<i>no value</i>'; } elseif ( is_string($value) && strlen($value)>50 ) { $value=implode('&#8203;',str_split($value,45)); } elseif ( $formatnumeric && is_numeric($value) ) { if ( $value>1048576 ) { $value=round($value/1048576,1).'M'; } elseif ( is_float($value) ) { $value=round($value,1); } } return $value; }
0
ALLM/M3/boa/aegir
ALLM/M3/boa/aegir/helpers/challenge-dns-email-hook.sh
#!/usr/bin/env bash function has_propagated { while [ "$#" -ge 2 ]; do local RECORD_NAME="${1}"; shift local TOKEN_VALUE="${1}"; shift if [ ${#AUTH_NS[@]} -eq 0 ]; then local RECORD_DOMAIN=$RECORD_NAME declare -a iAUTH_NS while [ -z "$iAUTH_NS" ]; do RECORD_DOMAIN=$(echo "${RECORD_DOMAIN}" | cut -d'.' -f 2-) iAUTH_NS=($(dig +short "${RECORD_DOMAIN}" IN CNAME)) if [ -n "$iAUTH_NS" ]; then unset iAUTH_NS && declare -a iAUTH_NS continue fi iAUTH_NS=($(dig +short "${RECORD_DOMAIN}" IN NS)) done else local iAUTH_NS=("${AUTH_NS[@]}") fi for NS in "${iAUTH_NS[@]}"; do dig +short @"${NS}" "${RECORD_NAME}" IN TXT | grep -q "\"${TOKEN_VALUE}\"" || return 1 done unset iAUTH_NS done return 0 } function ocsp_update { local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" TIMESTAMP="${6}" # Get oscp response and shove it into a file, used for OCSP stapling. # # You only need this for old versions of of nginx that can't do this itself, # or if your server is behind a proxy (eg nginx can't do OCSP via HTTP proxy). # # Parameters: # - DOMAIN # The primary domain name, i.e. the certificate common # name (CN). # - KEYFILE # The path of the file containing the private key. # - CERTFILE # The path of the file containing the signed certificate. # - FULLCHAINFILE # The path of the file containing the full certificate chain. # - CHAINFILE # The path of the file containing the intermediate certificate(s). # - TIMESTAMP # Timestamp when the specified certificate was created. if [ -n "${OCSP_RESPONSE_FILE}" ]; then if [ -z "${OCSP_HOST}" ]; then OCSP_HOST="${http_proxy}" # eg http://foo.bar:3128/ # strip protocol and path: OCSP_HOST="$(echo "$OCSP_HOST" | sed -E 's/(\w+:\/\/)((\w|\.)+:[0-9]+?)\/?.*/\2/')" # eg foo.bar:3128 fi if [ -n "$VERBOSE" ]; then echo "OCSP_HOST: $OCSP_HOST" echo "http_proxy: $http_proxy" echo "OCSP_RESPONSE_FILE: $OCSP_RESPONSE_FILE" echo "CHAINFILE: $CHAINFILE" echo "CERTFILE: $CERTFILE" echo "command: openssl ocsp -noverify -no_nonce -respout \"${OCSP_RESPONSE_FILE}\" -issuer \"${CHAINFILE}\" -cert \"${CERTFILE}\" -host \"${OCSP_HOST}\" -path \"\$(openssl x509 -noout -ocsp_uri -in \"${CERTFILE}\")\" -CApath \"/etc/ssl/certs\"" fi if [ -n "${OCSP_HOST}" ]; then openssl ocsp -noverify -no_nonce -respout "${OCSP_RESPONSE_FILE}" -issuer "${CHAINFILE}" -cert "${CERTFILE}" -host "${OCSP_HOST}" -path "$(openssl x509 -noout -ocsp_uri -in "${CERTFILE}")" -CApath "/etc/ssl/certs" else openssl ocsp -noverify -no_nonce -respout "${OCSP_RESPONSE_FILE}" -issuer "${CHAINFILE}" -cert "${CERTFILE}" -path "$(openssl x509 -noout -ocsp_uri -in "${CERTFILE}")" -CApath "/etc/ssl/certs" fi fi } function oscp_update { #oops :) ocsp_update "$@" } function deploy_challenge { local RECORDS=() RECIPIENT=${RECIPIENT:-$(id -u -n)} local FIRSTDOMAIN="${1}" local SUBJECT="Let's Encrypt certificate renewal" while (( "$#" >= 3 )); do local DOMAIN="${1}"; shift local TOKEN_FILENAME="${1}"; shift local TOKEN_VALUE="${1}"; shift # This hook is called once for every domain that needs to be # validated, including any alternative names you may have listed. # # Parameters: # - DOMAIN # The domain name (CN or subject alternative name) being # validated. # - TOKEN_FILENAME # The name of the file containing the token to be served for HTTP # validation. Should be served by your web server as # /.well-known/acme-challenge/${TOKEN_FILENAME}. # - TOKEN_VALUE # The token value that needs to be served for validation. For DNS # validation, this is what you want to put in the _acme-challenge # TXT record. For HTTP validation it is the value that is expected # be found in the $TOKEN_FILENAME file. RECORD_NAME="_acme-challenge.${DOMAIN}" RECORDS+=( ${RECORD_NAME} ) RECORDS+=( ${TOKEN_VALUE} ) done read -d '' MESSAGE <<EOF The Let's Encrypt certificate for ${FIRSTDOMAIN} is about to expire. Before it can be renewed, ownership of the domain must be proven by responding to a challenge. Please deploy the following record(s) to validate ownership of ${FIRSTDOMAIN}: EOF for (( i=0; i < "${#RECORDS[@]}"; i+=2 )); do MESSAGE="$(printf '%s\n %s. IN TXT %s\n' "$MESSAGE" "${RECORDS[$i]}" "${RECORDS[$(($i + 1))]}")" done echo "$MESSAGE" | mail -s "$SUBJECT" "$RECIPIENT" echo " + Settling down for 10s..." sleep 10 while ! has_propagated "${RECORDS[@]}"; do echo " + DNS not propagated. Waiting 30s for record creation and replication..." sleep 30 done } function clean_challenge { local RECORDS=() RECIPIENT=${RECIPIENT:-$(id -u -n)} local FIRSTDOMAIN="${1}" local SUBJECT="Let's Encrypt certificate renewal" while (( "$#" >= 3 )); do local DOMAIN="${1}"; shift local TOKEN_FILENAME="${1}"; shift local TOKEN_VALUE="${1}"; shift # This hook is called after attempting to validate each domain, # whether or not validation was successful. Here you can delete # files or DNS records that are no longer needed. # # The parameters are the same as for deploy_challenge. RECORD_NAME="_acme-challenge.${DOMAIN}" RECORDS+=( ${RECORD_NAME} ) RECORDS+=( ${TOKEN_VALUE} ) done read -d '' MESSAGE <<EOF Propagation has completed for ${FIRSTDOMAIN}. The following record(s) can now be deleted: EOF while (( "${#RECORDS}" >= 2 )); do MESSAGE="$(printf '%s\n %s. IN TXT %s\n' "$MESSAGE" "${RECORDS[0]}" "${RECORDS[1]}")" RECORDS=( "${RECORDS[@]:2}" ) done echo "$MESSAGE" | mail -s "$SUBJECT" "$RECIPIENT" } function deploy_cert { local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" TIMESTAMP="${6}" # This hook is called once for each certificate that has been # produced. Here you might, for instance, copy your new certificates # to service-specific locations and reload the service. # # Parameters: # - DOMAIN # The primary domain name, i.e. the certificate common # name (CN). # - KEYFILE # The path of the file containing the private key. # - CERTFILE # The path of the file containing the signed certificate. # - FULLCHAINFILE # The path of the file containing the full certificate chain. # - CHAINFILE # The path of the file containing the intermediate certificate(s). # - TIMESTAMP # Timestamp when the specified certificate was created. oscp_update "$@" } function unchanged_cert { local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" # This hook is called once for each certificate that is still # valid and therefore wasn't reissued. # # Parameters: # - DOMAIN # The primary domain name, i.e. the certificate common # name (CN). # - KEYFILE # The path of the file containing the private key. # - CERTFILE # The path of the file containing the signed certificate. # - FULLCHAINFILE # The path of the file containing the full certificate chain. # - CHAINFILE # The path of the file containing the intermediate certificate(s). oscp_update "$@" } HANDLER=$1; shift if [[ "${HANDLER}" =~ ^(deploy_challenge|clean_challenge|deploy_cert|unchanged_cert)$ ]]; then "$HANDLER" "$@" fi
0
ALLM/M3/boa/aegir
ALLM/M3/boa/aegir/helpers/dump_cdorked_config.c
// This program dumps the content of a shared memory block // used by Linux/Cdorked.A into a file named httpd_cdorked_config.bin // when the machine is infected. // // Some of the data is encrypted. If your server is infected and you // would like to help, please send the httpd_cdorked_config.bin // and your httpd executable to our lab for analysis. Thanks! // // Build with gcc -o dump_cdorked_config dump_cdorked_config.c // // Marc-Etienne M.Léveillé <leveille@eset.com> // #include <stdio.h> #include <sys/shm.h> #define CDORKED_SHM_SIZE (6118512) #define CDORKED_OUTFILE "httpd_cdorked_config.bin" int main (int argc, char *argv[]) { int maxkey, id, shmid, infected = 0; struct shm_info shm_info; struct shmid_ds shmds; void * cdorked_data; FILE * outfile; maxkey = shmctl(0, SHM_INFO, (void *) &shm_info); for(id = 0; id <= maxkey; id++) { shmid = shmctl(id, SHM_STAT, &shmds); if (shmid < 0) continue; if(shmds.shm_segsz == CDORKED_SHM_SIZE) { // We have a matching Cdorked memory segment infected++; printf("A shared memory matching Cdorked signature was found.\n"); printf("You should check your HTTP server's executable file integrity.\n"); cdorked_data = shmat(shmid, NULL, 0666); if(cdorked_data != NULL) { outfile = fopen(CDORKED_OUTFILE, "wb"); if(outfile == NULL) { printf("Could not open file %s for writing.", CDORKED_OUTFILE); } else { fwrite(cdorked_data, CDORKED_SHM_SIZE, 1, outfile); fclose(outfile); printf("The Cdorked configuration was dumped in the %s file.\n\n", CDORKED_OUTFILE); } } } } if(infected == 0) { printf("No shared memory matching Cdorked signature was found.\n"); printf("To further verify your server, run \"ipcs -m -p\" and look"); printf(" for a memory segments created by your http server.\n"); } else { printf("If you would like to help us in our research on Cdorked, "); printf("please send the httpd_cdorked_config.bin and your httpd executable file "); printf("to our lab for analysis at leveille@eset.com. Thanks!\n"); } return infected; }
0
ALLM/M3/boa/aegir
ALLM/M3/boa/aegir/helpers/le-hook.sh
#!/usr/bin/env bash # https://github.com/lukas2511/dehydrated/blob/master/docs/examples/hook.sh set -eu -o pipefail deploy_challenge() { local DOMAIN="${1}" TOKEN_FILENAME="${2}" TOKEN_VALUE="${3}" echo "" echo "Add the following to the zone definition of ${1}:" echo "_acme-challenge.${1}. IN TXT \"${3}\"" echo "" echo -n "Press enter to continue..." read tmp echo "" } clean_challenge() { local DOMAIN="${1}" TOKEN_FILENAME="${2}" TOKEN_VALUE="${3}" echo "" echo "Now you can remove the following from the zone definition of ${1}:" echo "_acme-challenge.${1}. IN TXT \"${3}\"" echo "" echo -n "Press enter to continue..." read tmp echo "" } deploy_cert() { local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" TIMESTAMP="${6}" echo "" echo "deploy_cert()" echo "" } unchanged_cert() { local DOMAIN="${1}" KEYFILE="${2}" CERTFILE="${3}" FULLCHAINFILE="${4}" CHAINFILE="${5}" echo "" echo "unchanged_cert()" echo "" } invalid_challenge() { local DOMAIN="${1}" RESPONSE="${2}" echo "" echo "invalid_challenge()" echo "${1}" echo "${2}" echo "" } request_failure() { local STATUSCODE="${1}" REASON="${2}" REQTYPE="${3}" echo "" echo "request_failure()" echo "${1}" echo "${2}" echo "${3}" echo "" } exit_hook() { echo "" echo "done" echo "" } HANDLER="$1"; shift if [[ "${HANDLER}" =~ ^(deploy_challenge|clean_challenge|deploy_cert|unchanged_cert|invalid_challenge|request_failure|exit_hook)$ ]]; then "$HANDLER" "$@" fi
0
ALLM/M3/boa/aegir
ALLM/M3/boa/aegir/helpers/rvm-installer-root.sh
#!/usr/bin/env bash shopt -s extglob set -o errtrace set -o errexit set -o pipefail rvm_install_initialize() { DEFAULT_SOURCES=(github.com/rvm/rvm bitbucket.org/mpapis/rvm) BASH_MIN_VERSION="3.2.25" if [[ -n "${BASH_VERSION:-}" && "$(\printf "%b" "${BASH_VERSION:-}\n${BASH_MIN_VERSION}\n" | LC_ALL=C \sort -t"." -k1,1n -k2,2n -k3,3n | \head -n1)" != "${BASH_MIN_VERSION}" ]] then echo "BASH ${BASH_MIN_VERSION} required (you have $BASH_VERSION)" exit 1 fi export HOME PS4 export rvm_trace_flag rvm_debug_flag rvm_user_install_flag rvm_ignore_rvmrc rvm_prefix rvm_path PS4="+ \${BASH_SOURCE##\${rvm_path:-}} : \${FUNCNAME[0]:+\${FUNCNAME[0]}()} \${LINENO} > " } log() { printf "%b\n" "$*"; } debug(){ [[ ${rvm_debug_flag:-0} -eq 0 ]] || printf "%b\n" "$*" >&2; } warn() { log "WARN: $*" >&2 ; } fail() { fail_with_code 1 "$*" ; } fail_with_code() { code="$1" ; shift ; log "\nERROR: $*\n" >&2 ; exit "$code" ; } rvm_install_commands_setup() { \which which >/dev/null 2>&1 || fail "Could not find 'which' command, make sure it's available first before continuing installation." \which grep >/dev/null 2>&1 || fail "Could not find 'grep' command, make sure it's available first before continuing installation." if [[ -z "${rvm_tar_command:-}" ]] && builtin command -v gtar >/dev/null then rvm_tar_command=gtar elif ${rvm_tar_command:-tar} --help 2>&1 | GREP_OPTIONS="" \grep -- --strip-components >/dev/null then rvm_tar_command="${rvm_tar_command:-tar}" else case "$(uname)" in (OpenBSD) log "Trying to install GNU version of tar, might require sudo password" if (( UID )) then sudo pkg_add -z gtar-1 else pkg_add -z gtar-1 fi rvm_tar_command=gtar ;; (Darwin|FreeBSD|DragonFly) # it's not possible to autodetect on OSX, the help/man does not mention all flags rvm_tar_command=tar ;; (SunOS) case "$(uname -r)" in (5.10) log "Trying to install GNU version of tar, might require sudo password" if (( UID )) then if \which sudo >/dev/null 2>&1 then sudo_10=sudo elif \which /opt/csw/bin/sudo >/dev/null 2>&1 then sudo_10=/opt/csw/bin/sudo else fail "sudo is required but not found. You may install sudo from OpenCSW repository (https://www.opencsw.org/about)" fi pkginfo -q CSWpkgutil || $sudo_10 pkgadd -a $rvm_path/config/solaris/noask -d https://get.opencsw.org/now CSWpkgutil sudo /opt/csw/bin/pkgutil -iy CSWgtar -t https://mirror.opencsw.org/opencsw/unstable else pkginfo -q CSWpkgutil || pkgadd -a $rvm_path/config/solaris/noask -d https://get.opencsw.org/now CSWpkgutil /opt/csw/bin/pkgutil -iy CSWgtar -t https://mirror.opencsw.org/opencsw/unstable fi rvm_tar_command=/opt/csw/bin/gtar ;; (*) rvm_tar_command=tar ;; esac esac builtin command -v ${rvm_tar_command:-gtar} >/dev/null || fail "Could not find GNU compatible version of 'tar' command, make sure it's available first before continuing installation." fi if [[ " ${rvm_tar_options:-} " != *" --no-same-owner "* ]] && $rvm_tar_command --help 2>&1 | GREP_OPTIONS="" \grep -- --no-same-owner >/dev/null then rvm_tar_options="${rvm_tar_options:-}${rvm_tar_options:+ }--no-same-owner" fi } usage() { printf "%b" " Usage rvm-installer [options] [action] Options [[--]version] <version> The version or tag to install. Valid values are: latest - The latest tagged version. latest-minor - The latest minor version of the current major version. latest-<x> - The latest minor version of version x. latest-<x>.<y> - The latest patch version of version x.y. <x>.<y>.<z> - Major version x, minor version y and patch z. [--]branch <branch> The name of the branch from which RVM is installed. This option can be used with the following formats for <branch>: <account>/ If account is rvm or mpapis, installs from one of the following: https://github.com/rvm/rvm/archive/master.tar.gz https://bitbucket.org/mpapis/rvm/get/master.tar.gz Otherwise, installs from: https://github.com/<account>/rvm/archive/master.tar.gz <account>/<branch> If account is rvm or mpapis, installs from one of the following: https://github.com/rvm/rvm/archive/<branch>.tar.gz https://bitbucket.org/mpapis/rvm/get/<branch>.tar.gz Otherwise, installs from: https://github.com/<account>/rvm/archive/<branch>.tar.gz [/]<branch> Installs the branch from one of the following: https://github.com/rvm/rvm/archive/<branch>.tar.gz https://bitbucket.org/mpapis/rvm/get/<branch>.tar.gz [--]source <source> Defines the repository from which RVM is retrieved and installed in the format: <domain>/<account>/<repo> Where: <domain> - Is bitbucket.org, github.com or a github enterprise site serving an RVM repository. <account> - Is the user account in which the RVM repository resides. <repo> - Is the name of the RVM repository. Note that when using the [--]source option, one should only use the [/]branch format with the [--]branch option. Failure to do so will result in undefined behavior. --trace Provides debug logging for the installation script. Actions master - Installs RVM from the master branch at rvm/rvm on github or mpapis/rvm on bitbucket.org. stable - Installs RVM from the stable branch a rvm/rvm on github or mpapis/rvm on bitbucket.org. help - Displays this output. " } ## duplication marker 32fosjfjsznkjneuera48jae __rvm_curl_output_control() { if (( ${rvm_quiet_curl_flag:-0} == 1 )) then __flags+=( "--silent" "--show-error" ) elif [[ " $*" == *" -s"* || " $*" == *" --silent"* ]] then # make sure --show-error is used with --silent [[ " $*" == *" -S"* || " $*" == *" -sS"* || " $*" == *" --show-error"* ]] || { __flags+=( "--show-error" ) } fi } ## duplication marker 32fosjfjsznkjneuera48jae # -S is automatically added to -s __rvm_curl() ( __rvm_which curl >/dev/null || { rvm_error "RVM requires 'curl'. Install 'curl' first and try again." return 200 } typeset -a __flags __flags=( --fail --location --max-redirs 10 ) [[ "$*" == *"--max-time"* ]] || [[ "$*" == *"--connect-timeout"* ]] || __flags+=( --connect-timeout 30 --retry-delay 2 --retry 3 ) if [[ -n "${rvm_proxy:-}" ]] then __flags+=( --proxy "${rvm_proxy:-}" ) fi __rvm_curl_output_control unset curl __rvm_debug_command \curl "${__flags[@]}" "$@" || return $? ) rvm_error() { printf "ERROR: %b\n" "$*"; } __rvm_which(){ which "$@" || return $?; true; } __rvm_debug_command() { debug "Running($#): $*" "$@" || return $? true } rvm_is_a_shell_function() { [[ -t 0 && -t 1 ]] || return $? return ${rvm_is_not_a_shell_function:-0} } # Searches the tags for the highest available version matching a given pattern. # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1.10. -> 1.10.3 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1.10. -> 1.10.3 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1. -> 1.11.0 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) "" -> 2.0.1 fetch_version() { typeset _account _domain _pattern _repo _sources _values _version _sources=(${!1}) _pattern=$2 for _source in "${_sources[@]}" do IFS='/' read -r _domain _account _repo <<< "${_source}" _version="$( fetch_versions ${_domain} ${_account} ${_repo} | GREP_OPTIONS="" \grep "^${_pattern:-}" | tail -n 1 )" if [[ -n ${_version} ]] then echo "${_version}" return 0 fi done fail_with_code 4 "Exhausted all sources trying to fetch version '$version' of RVM!" } # Returns a sorted list of most recent tags from a repository fetch_versions() { typeset _account _domain _repo _url _domain=$1 _account=$2 _repo=$3 case ${_domain} in (bitbucket.org) _url="https://api.${_domain}/2.0/repositories/${_account}/${_repo}/refs/tags?sort=-name&pagelen=20" ;; (github.com) _url=https://api.${_domain}/repos/${_account}/${_repo}/tags ;; (*) _url=https://${_domain}/api/v3/repos/${_account}/${_repo}/tags ;; esac { __rvm_curl -sS "${_url}" || warn "...the preceeding error with code $? occurred while fetching $_url" ; } | \awk -v RS=',|values":' -v FS='"' '$2=="name"&&$4!="rvm"{print $4}' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n } install_release() { typeset _source _sources _url _version _verify_pgp _sources=(${!1}) _version=$2 debug "Downloading RVM version ${_version}" for _source in "${_sources[@]}" do case ${_source} in (bitbucket.org*) _url="https://${_source}/get/${_version}.tar.gz" _verify_pgp="https://${_source}/downloads/${_version}.tar.gz.asc" ;; (*) _url="https://${_source}/archive/${_version}.tar.gz" _verify_pgp="https://${_source}/releases/download/${_version}/${_version}.tar.gz.asc" ;; esac get_and_unpack "${_url}" "rvm-${_version}.tgz" "$_verify_pgp" && return done return $? } install_head() { typeset _branch _source _sources _url _sources=(${!1}) _branch=$2 debug "Selected RVM branch ${_branch}" for _source in "${_sources[@]}" do case ${_source} in (bitbucket.org*) _url=https://${_source}/get/${_branch}.tar.gz ;; (*) _url=https://${_source}/archive/${_branch}.tar.gz ;; esac get_and_unpack "${_url}" "rvm-${_branch//\//_}.tgz" && return done return $? } # duplication marker dfkjdjngdfjngjcszncv # Drop in cd which _doesn't_ respect cdpath __rvm_cd() { typeset old_cdpath ret ret=0 old_cdpath="${CDPATH}" CDPATH="." chpwd_functions="" builtin cd "$@" || ret=$? CDPATH="${old_cdpath}" return $ret } get_package() { typeset _url _file _url="$1" _file="$2" log "Downloading ${_url}" __rvm_curl -sS ${_url} > ${rvm_archives_path}/${_file} || { _return=$? case $_return in # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (60) log " Could not download '${_url}', you can read more about it here: https://rvm.io/support/fixing-broken-ssl-certificates/ To continue in insecure mode run 'echo insecure >> ~/.curlrc'. " ;; # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (77) log " It looks like you have old certificates, you can read more about it here: https://rvm.io/support/fixing-broken-ssl-certificates/ " ;; # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (141) log " Curl returned 141 - it is result of a segfault which means it's Curls fault. Try again and if it crashes more than a couple of times you either need to reinstall Curl or consult with your distribution manual and contact support. " ;; (*) log " Could not download '${_url}'. curl returned status '$_return'. " ;; esac return $_return } } # duplication marker flnglfdjkngjndkfjhsbdjgfghdsgfklgg rvm_install_gpg_setup() { export rvm_gpg_command { rvm_gpg_command="$( \which gpg2 2>/dev/null )" && [[ ${rvm_gpg_command} != "/cygdrive/"* ]] } || { rvm_gpg_command="$( \which gpg 2>/dev/null )" && [[ ${rvm_gpg_command} != "/cygdrive/"* ]] } || rvm_gpg_command="" debug "Detected GPG program: '$rvm_gpg_command'" [[ -n "$rvm_gpg_command" ]] || return $? } # duplication marker rdjgndfnghdfnhgfdhbghdbfhgbfdhbn verify_package_pgp() { if "${rvm_gpg_command}" --verify "$2" "$1" then log "GPG verified '$1'" else typeset _return=$? log "\ GPG signature verification failed for '$1' - '$3'! Try to install GPG v2 and then fetch the public key: ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --keyserver hkp://pool.sks-keyservers.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB or if it fails: command curl -k -sSL https://rvm.io/mpapis.asc | ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --import - command curl -k -sSL https://rvm.io/pkuczynski.asc | ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --import - In case of further problems with validation please refer to https://rvm.io/rvm/security " exit ${_return} fi } verify_pgp() { [[ -n "${1:-}" ]] || { debug "No PGP url given, skipping." return 0 } get_package "$1" "$2.asc" || { debug "PGP url given but does not exist: '$1'" return 0 } rvm_install_gpg_setup || { log "Found PGP signature at: '$1', but no GPG software exists to validate it, skipping." return 0 } verify_package_pgp "${rvm_archives_path}/$2" "${rvm_archives_path}/$2.asc" "$1" } get_and_unpack() { typeset _url _file _patern _return _verify_pgp _url="$1" _file="$2" _verify_pgp="$3" get_package "$_url" "$_file" || return $? verify_pgp "$_verify_pgp" "$_file" || return $? [[ -d "${rvm_src_path}/rvm" ]] || \mkdir -p "${rvm_src_path}/rvm" __rvm_cd "${rvm_src_path}/rvm" || { _return=$? log "Could not change directory '${rvm_src_path}/rvm'." return $_return } # Remove existing installation typeset _cleanup_cmd _cleanup_cmd="rm -rf ${rvm_src_path}/rvm/{,.[!.],..?}*" $_cleanup_cmd || { _return=$? log "Could not remove old RVM sources. Try:\n\n\tsudo $_cleanup_cmd\n\nThen retry your task again." return $_return } # Unpack sources __rvm_debug_command $rvm_tar_command xzf ${rvm_archives_path}/${_file} ${rvm_tar_options:-} --strip-components 1 || { _return=$? log "Could not extract RVM sources." return $_return } } rvm_install_default_settings() { # Tracing, if asked for. if [[ "$*" == *--trace* ]] || (( ${rvm_trace_flag:-0} > 0 )) then set -o xtrace rvm_trace_flag=1 fi # Variable initialization, remove trailing slashes if they exist on HOME true \ ${rvm_trace_flag:=0} ${rvm_debug_flag:=0}\ ${rvm_ignore_rvmrc:=0} HOME="${HOME%%+(\/)}" if (( rvm_ignore_rvmrc == 0 )) then for rvmrc in /etc/rvmrc "$HOME/.rvmrc" do if [[ -s "$rvmrc" ]] then if GREP_OPTIONS="" \grep '^\s*rvm .*$' "$rvmrc" >/dev/null 2>&1 then printf "%b" " Error: $rvmrc is for rvm settings only. rvm CLI may NOT be called from within $rvmrc. Skipping the loading of $rvmrc " exit 1 else source "$rvmrc" fi fi done fi if [[ -z "${rvm_path:-}" ]] then if (( UID == 0 )) then rvm_user_install_flag=0 rvm_prefix="/usr/local" rvm_path="${rvm_prefix}/rvm" else rvm_user_install_flag=1 rvm_prefix="$HOME" rvm_path="${rvm_prefix}/.rvm" fi fi if [[ -z "${rvm_prefix}" ]] then rvm_prefix=$( dirname $rvm_path ) fi # duplication marker kkdfkgnjfndgjkndfjkgnkfjdgn [[ -n "${rvm_user_install_flag:-}" ]] || case "$rvm_path" in (/usr/local/rvm) rvm_user_install_flag=0 ;; ($HOME/*|/${USER// /_}*) rvm_user_install_flag=1 ;; (*) rvm_user_install_flag=0 ;; esac } rvm_install_parse_params() { install_rubies=() install_gems=() flags=( ./scripts/install ) forwarded_flags=() while (( $# > 0 )) do token="$1" shift case "$token" in (--trace) set -o xtrace rvm_trace_flag=1 flags=( -x "${flags[@]}" "$token" ) forwarded_flags+=( "$token" ) ;; (--debug|--quiet-curl) flags+=( "$token" ) forwarded_flags+=( "$token" ) token=${token#--} token=${token//-/_} export "rvm_${token}_flag"=1 printf "%b" "Turning on ${token/_/ } mode.\n" ;; (--path) if [[ -n "${1:-}" ]] then rvm_path="$1" shift else fail "--path must be followed by a path." fi ;; (--branch|branch) # Install RVM from a given branch if [[ -n "${1:-}" ]] then case "$1" in (/*) branch=${1#/} ;; (*/) branch=master if [[ "${1%/}" -ne rvm ]] && [[ "${1%/}" -ne mpapis ]] then sources=(github.com/${1%/}/rvm) fi ;; (*/*) branch=${1#*/} if [[ "${1%%/*}" -ne rvm ]] && [[ "${1%%/*}" -ne mpapis ]] then sources=(github.com/${1%%/*}/rvm) fi ;; (*) branch="$1" ;; esac shift else fail "--branch must be followed by a branchname." fi ;; (--source|source) if [[ -n "${1:-}" ]] then if [[ "$1" = */*/* ]] then sources=($1) shift else fail "--source must be in the format <domain>/<account>/<repo>." fi else fail "--source must be followed by a source." fi ;; (--user-install|--ignore-dotfiles) token=${token#--} token=${token//-/_} export "rvm_${token}_flag"=1 printf "%b" "Turning on ${token/_/ } mode.\n" ;; (--auto-dotfiles) flags+=( "$token" ) export "rvm_auto_dotfiles_flag"=1 printf "%b" "Turning on auto dotfiles mode.\n" ;; (--auto) export "rvm_auto_dotfiles_flag"=1 printf "%b" "Warning, --auto is deprecated in favor of --auto-dotfiles.\n" ;; (--verify-downloads) if [[ -n "${1:-}" ]] then export rvm_verify_downloads_flag="$1" forwarded_flags+=( "$token" "$1" ) shift else fail "--verify-downloads must be followed by level(0|1|2)." fi ;; (--autolibs=*) flags+=( "$token" ) export rvm_autolibs_flag="${token#--autolibs=}" forwarded_flags+=( "$token" ) ;; (--without-gems=*|--with-gems=*|--with-default-gems=*) flags+=( "$token" ) value="${token#*=}" token="${token%%=*}" token="${token#--}" token="${token//-/_}" export "rvm_${token}"="${value}" printf "%b" "Installing RVM ${token/_/ }: ${value}.\n" ;; (--version|version) version="$1" shift ;; (head|master) version="head" branch="master" ;; (stable) version="latest" ;; (latest|latest-*|+([[:digit:]]).+([[:digit:]]).+([[:digit:]])) version="$token" ;; (--ruby) install_rubies+=( ruby ) ;; (--ruby=*) token=${token#--ruby=} install_rubies+=( ${token//,/ } ) ;; (--rails) install_gems+=( rails ) ;; (--gems=*) token=${token#--gems=} install_gems+=( ${token//,/ } ) ;; (--add-to-rvm-group) export rvm_add_users_to_rvm_group="$1" shift ;; (help) usage exit 0 ;; (*) usage exit 1 ;; esac done if (( ${#install_gems[@]} > 0 && ${#install_rubies[@]} == 0 )) then install_rubies=( ruby ) fi true "${version:=head}" true "${branch:=master}" if [[ -z "${sources[@]}" ]] then sources=("${DEFAULT_SOURCES[@]}") fi rvm_src_path="$rvm_path/src" rvm_archives_path="$rvm_path/archives" rvm_releases_url="https://rvm.io/releases" } rvm_install_validate_rvm_path() { case "$rvm_path" in (*[[:space:]]*) printf "%b" " It looks you are one of the happy *space* users (in home dir name), RVM is not yet fully ready for it, use this trick to fix it: sudo mkdir -p /${USER// /_}.rvm sudo chown -R \"$USER:\" /${USER// /_}.rvm echo \"export rvm_path=/${USER// /_}.rvm\" >> \"$HOME/.rvmrc\" and start installing again. " exit 2 ;; (/usr/share/ruby-rvm) printf "%b" " It looks you are one of the happy Ubuntu users, RVM packaged by Ubuntu is old and broken, follow this link for details how to fix: https://stackoverflow.com/a/9056395/497756 " [[ "${rvm_uses_broken_ubuntu_path:-no}" == "yes" ]] || exit 3 ;; esac if [[ "$rvm_path" != "/"* ]] then fail "The rvm install path must be fully qualified. Tried $rvm_path" fi } rvm_install_validate_volume_mount_mode() { \typeset path partition test_exec path=$rvm_path # Directory $rvm_path might not exists at this point so we need to traverse the tree upwards while [[ -n "$path" ]] do if [[ -d $path ]] then partition=`df -P $path | awk 'END{print $1}'` test_exec=$(mktemp $path/rvm-exec-test.XXXXXX) echo '#!/bin/sh' > "$test_exec" chmod +x "$test_exec" if ! "$test_exec" then rm -f "$test_exec" printf "%b" " It looks that scripts located in ${path}, which would be RVM destination ${rvm_path}, are not executable. One of the reasons might be that partition ${partition} holding this location is mounted in *noexec* mode, which prevents RVM from working correctly. Please verify your setup and re-mount partition ${partition} without the noexec option." exit 2 fi rm -f "$test_exec" break fi path=${path%/*} done } rvm_install_select_and_get_version() { typeset dir _version_release _version for dir in "$rvm_src_path" "$rvm_archives_path" do [[ -d "$dir" ]] || mkdir -p "$dir" done _version_release="${version}" case "${version}" in (head) _version_release="${branch}" install_head sources[@] ${branch:-master} ;; (latest) _version=$(fetch_version sources[@]) install_release sources[@] "$_version" ;; (latest-minor) version="$(<"$rvm_path/VERSION")" _version=$(fetch_version sources[@] ${version%.*}) install_release sources[@] "$_version" ;; (latest-*) _version=$(fetch_version sources[@] ${version#latest-}) install_release sources[@] "$_version" ;; (+([[:digit:]]).+([[:digit:]]).+([[:digit:]])) # x.y.z install_release sources[@] ${version} ;; (*) fail "Something went wrong, unrecognized version '$version'" ;; esac echo "${_version_release}" > "$rvm_path/RELEASE" } rvm_install_main() { [[ -f ./scripts/install ]] || { log "'./scripts/install' can not be found for installation, something went wrong, it usually means your 'tar' is broken, please report it here: https://github.com/rvm/rvm/issues" return 127 } # required flag - path to install flags+=( --path "$rvm_path" ) \command bash "${flags[@]}" } rvm_install_ruby_and_gems() ( if (( ${#install_rubies[@]} > 0 )) then source ${rvm_scripts_path:-${rvm_path}/scripts}/rvm source ${rvm_scripts_path:-${rvm_path}/scripts}/functions/version __rvm_print_headline for _ruby in ${install_rubies[@]} do command rvm "${forwarded_flags[@]}" install ${_ruby} done # set the first one as default, skip rest for _ruby in ${install_rubies[@]} do rvm "${forwarded_flags[@]}" alias create default ${_ruby} break done for _gem in ${install_gems[@]} do rvm "${forwarded_flags[@]}" all do gem install ${_gem} done printf "%b" " * To start using RVM you need to run \`source $rvm_path/scripts/rvm\` in all your open shell windows, in rare cases you need to reopen all shell windows. " if [[ "${install_gems[*]}" == *"rails"* ]] then printf "%b" " * To start using rails you need to run \`rails new <project_dir>\`. " fi fi ) rvm_install() { rvm_install_initialize rvm_install_commands_setup rvm_install_default_settings rvm_install_parse_params "$@" rvm_install_validate_rvm_path rvm_install_validate_volume_mount_mode rvm_install_select_and_get_version rvm_install_main rvm_install_ruby_and_gems } rvm_install "$@"
0
ALLM/M3/boa/aegir
ALLM/M3/boa/aegir/helpers/rvm-installer.sh
#!/usr/bin/env bash shopt -s extglob set -o errtrace set -o errexit set -o pipefail rvm_install_initialize() { DEFAULT_SOURCES=(github.com/rvm/rvm bitbucket.org/mpapis/rvm) BASH_MIN_VERSION="3.2.25" if [[ -n "${BASH_VERSION:-}" && "$(\printf "%b" "${BASH_VERSION:-}\n${BASH_MIN_VERSION}\n" | LC_ALL=C \sort -t"." -k1,1n -k2,2n -k3,3n | \head -n1)" != "${BASH_MIN_VERSION}" ]] then echo "BASH ${BASH_MIN_VERSION} required (you have $BASH_VERSION)" exit 1 fi export HOME PS4 export rvm_trace_flag rvm_debug_flag rvm_user_install_flag rvm_ignore_rvmrc rvm_prefix rvm_path PS4="+ \${BASH_SOURCE##\${rvm_path:-}} : \${FUNCNAME[0]:+\${FUNCNAME[0]}()} \${LINENO} > " } log() { printf "%b\n" "$*"; } debug(){ [[ ${rvm_debug_flag:-0} -eq 0 ]] || printf "%b\n" "$*" >&2; } warn() { log "WARN: $*" >&2 ; } fail() { fail_with_code 1 "$*" ; } fail_with_code() { code="$1" ; shift ; log "\nERROR: $*\n" >&2 ; exit "$code" ; } rvm_install_commands_setup() { \which which >/dev/null 2>&1 || fail "Could not find 'which' command, make sure it's available first before continuing installation." \which grep >/dev/null 2>&1 || fail "Could not find 'grep' command, make sure it's available first before continuing installation." if [[ -z "${rvm_tar_command:-}" ]] && builtin command -v gtar >/dev/null then rvm_tar_command=gtar elif ${rvm_tar_command:-tar} --help 2>&1 | GREP_OPTIONS="" \grep -- --strip-components >/dev/null then rvm_tar_command="${rvm_tar_command:-tar}" else case "$(uname)" in (OpenBSD) log "Trying to install GNU version of tar, might require sudo password" if (( UID )) then sudo pkg_add -z gtar-1 else pkg_add -z gtar-1 fi rvm_tar_command=gtar ;; (Darwin|FreeBSD|DragonFly) # it's not possible to autodetect on OSX, the help/man does not mention all flags rvm_tar_command=tar ;; (SunOS) case "$(uname -r)" in (5.10) log "Trying to install GNU version of tar, might require sudo password" if (( UID )) then if \which sudo >/dev/null 2>&1 then sudo_10=sudo elif \which /opt/csw/bin/sudo >/dev/null 2>&1 then sudo_10=/opt/csw/bin/sudo else fail "sudo is required but not found. You may install sudo from OpenCSW repository (https://www.opencsw.org/about)" fi pkginfo -q CSWpkgutil || $sudo_10 pkgadd -a $rvm_path/config/solaris/noask -d https://get.opencsw.org/now CSWpkgutil sudo /opt/csw/bin/pkgutil -iy CSWgtar -t https://mirror.opencsw.org/opencsw/unstable else pkginfo -q CSWpkgutil || pkgadd -a $rvm_path/config/solaris/noask -d https://get.opencsw.org/now CSWpkgutil /opt/csw/bin/pkgutil -iy CSWgtar -t https://mirror.opencsw.org/opencsw/unstable fi rvm_tar_command=/opt/csw/bin/gtar ;; (*) rvm_tar_command=tar ;; esac esac builtin command -v ${rvm_tar_command:-gtar} >/dev/null || fail "Could not find GNU compatible version of 'tar' command, make sure it's available first before continuing installation." fi if [[ " ${rvm_tar_options:-} " != *" --no-same-owner "* ]] && $rvm_tar_command --help 2>&1 | GREP_OPTIONS="" \grep -- --no-same-owner >/dev/null then rvm_tar_options="${rvm_tar_options:-}${rvm_tar_options:+ }--no-same-owner" fi } usage() { printf "%b" " Usage rvm-installer [options] [action] Options [[--]version] <version> The version or tag to install. Valid values are: latest - The latest tagged version. latest-minor - The latest minor version of the current major version. latest-<x> - The latest minor version of version x. latest-<x>.<y> - The latest patch version of version x.y. <x>.<y>.<z> - Major version x, minor version y and patch z. [--]branch <branch> The name of the branch from which RVM is installed. This option can be used with the following formats for <branch>: <account>/ If account is rvm or mpapis, installs from one of the following: https://github.com/rvm/rvm/archive/master.tar.gz https://bitbucket.org/mpapis/rvm/get/master.tar.gz Otherwise, installs from: https://github.com/<account>/rvm/archive/master.tar.gz <account>/<branch> If account is rvm or mpapis, installs from one of the following: https://github.com/rvm/rvm/archive/<branch>.tar.gz https://bitbucket.org/mpapis/rvm/get/<branch>.tar.gz Otherwise, installs from: https://github.com/<account>/rvm/archive/<branch>.tar.gz [/]<branch> Installs the branch from one of the following: https://github.com/rvm/rvm/archive/<branch>.tar.gz https://bitbucket.org/mpapis/rvm/get/<branch>.tar.gz [--]source <source> Defines the repository from which RVM is retrieved and installed in the format: <domain>/<account>/<repo> Where: <domain> - Is bitbucket.org, github.com or a github enterprise site serving an RVM repository. <account> - Is the user account in which the RVM repository resides. <repo> - Is the name of the RVM repository. Note that when using the [--]source option, one should only use the [/]branch format with the [--]branch option. Failure to do so will result in undefined behavior. --trace Provides debug logging for the installation script. Actions master - Installs RVM from the master branch at rvm/rvm on github or mpapis/rvm on bitbucket.org. stable - Installs RVM from the stable branch a rvm/rvm on github or mpapis/rvm on bitbucket.org. help - Displays this output. " } ## duplication marker 32fosjfjsznkjneuera48jae __rvm_curl_output_control() { if (( ${rvm_quiet_curl_flag:-0} == 1 )) then __flags+=( "--silent" "--show-error" ) elif [[ " $*" == *" -s"* || " $*" == *" --silent"* ]] then # make sure --show-error is used with --silent [[ " $*" == *" -S"* || " $*" == *" -sS"* || " $*" == *" --show-error"* ]] || { __flags+=( "--show-error" ) } fi } ## duplication marker 32fosjfjsznkjneuera48jae # -S is automatically added to -s __rvm_curl() ( __rvm_which curl >/dev/null || { rvm_error "RVM requires 'curl'. Install 'curl' first and try again." return 200 } typeset -a __flags __flags=( --fail --location --max-redirs 10 ) [[ "$*" == *"--max-time"* ]] || [[ "$*" == *"--connect-timeout"* ]] || __flags+=( --connect-timeout 30 --retry-delay 2 --retry 3 ) if [[ -n "${rvm_proxy:-}" ]] then __flags+=( --proxy "${rvm_proxy:-}" ) fi __rvm_curl_output_control unset curl __rvm_debug_command \curl "${__flags[@]}" "$@" || return $? ) rvm_error() { printf "ERROR: %b\n" "$*"; } __rvm_which(){ which "$@" || return $?; true; } __rvm_debug_command() { debug "Running($#): $*" "$@" || return $? true } rvm_is_a_shell_function() { [[ -t 0 && -t 1 ]] || return $? return ${rvm_is_not_a_shell_function:-0} } # Searches the tags for the highest available version matching a given pattern. # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1.10. -> 1.10.3 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1.10. -> 1.10.3 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) 1. -> 1.11.0 # fetch_version (github.com/rvm/rvm bitbucket.org/mpapis/rvm) "" -> 2.0.1 fetch_version() { typeset _account _domain _pattern _repo _sources _values _version _sources=(${!1}) _pattern=$2 for _source in "${_sources[@]}" do IFS='/' read -r _domain _account _repo <<< "${_source}" _version="$( fetch_versions ${_domain} ${_account} ${_repo} | GREP_OPTIONS="" \grep "^${_pattern:-}" | tail -n 1 )" if [[ -n ${_version} ]] then echo "${_version}" return 0 fi done fail_with_code 4 "Exhausted all sources trying to fetch version '$version' of RVM!" } # Returns a sorted list of most recent tags from a repository fetch_versions() { typeset _account _domain _repo _url _domain=$1 _account=$2 _repo=$3 case ${_domain} in (bitbucket.org) _url="https://api.${_domain}/2.0/repositories/${_account}/${_repo}/refs/tags?sort=-name&pagelen=20" ;; (github.com) _url=https://api.${_domain}/repos/${_account}/${_repo}/tags ;; (*) _url=https://${_domain}/api/v3/repos/${_account}/${_repo}/tags ;; esac { __rvm_curl -sS "${_url}" || warn "...the preceeding error with code $? occurred while fetching $_url" ; } | \awk -v RS=',|values":' -v FS='"' '$2=="name"&&$4!="rvm"{print $4}' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n } install_release() { typeset _source _sources _url _version _verify_pgp _sources=(${!1}) _version=$2 debug "Downloading RVM version ${_version}" for _source in "${_sources[@]}" do case ${_source} in (bitbucket.org*) _url="https://${_source}/get/${_version}.tar.gz" _verify_pgp="https://${_source}/downloads/${_version}.tar.gz.asc" ;; (*) _url="https://${_source}/archive/${_version}.tar.gz" _verify_pgp="https://${_source}/releases/download/${_version}/${_version}.tar.gz.asc" ;; esac get_and_unpack "${_url}" "rvm-${_version}.tgz" "$_verify_pgp" && return done return $? } install_head() { typeset _branch _source _sources _url _sources=(${!1}) _branch=$2 debug "Selected RVM branch ${_branch}" for _source in "${_sources[@]}" do case ${_source} in (bitbucket.org*) _url=https://${_source}/get/${_branch}.tar.gz ;; (*) _url=https://${_source}/archive/${_branch}.tar.gz ;; esac get_and_unpack "${_url}" "rvm-${_branch//\//_}.tgz" && return done return $? } # duplication marker dfkjdjngdfjngjcszncv # Drop in cd which _doesn't_ respect cdpath __rvm_cd() { typeset old_cdpath ret ret=0 old_cdpath="${CDPATH}" CDPATH="." chpwd_functions="" builtin cd "$@" || ret=$? CDPATH="${old_cdpath}" return $ret } get_package() { typeset _url _file _url="$1" _file="$2" log "Downloading ${_url}" __rvm_curl -sS ${_url} > ${rvm_archives_path}/${_file} || { _return=$? case $_return in # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (60) log " Could not download '${_url}', you can read more about it here: https://rvm.io/support/fixing-broken-ssl-certificates/ To continue in insecure mode run 'echo insecure >> ~/.curlrc'. " ;; # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (77) log " It looks like you have old certificates, you can read more about it here: https://rvm.io/support/fixing-broken-ssl-certificates/ " ;; # duplication marker lfdgzkngdkjvnfjknkjvcnbjkncvjxbn (141) log " Curl returned 141 - it is result of a segfault which means it's Curls fault. Try again and if it crashes more than a couple of times you either need to reinstall Curl or consult with your distribution manual and contact support. " ;; (*) log " Could not download '${_url}'. curl returned status '$_return'. " ;; esac return $_return } } # duplication marker flnglfdjkngjndkfjhsbdjgfghdsgfklgg rvm_install_gpg_setup() { export rvm_gpg_command { rvm_gpg_command="$( \which gpg2 2>/dev/null )" && [[ ${rvm_gpg_command} != "/cygdrive/"* ]] } || { rvm_gpg_command="$( \which gpg 2>/dev/null )" && [[ ${rvm_gpg_command} != "/cygdrive/"* ]] } || rvm_gpg_command="" debug "Detected GPG program: '$rvm_gpg_command'" [[ -n "$rvm_gpg_command" ]] || return $? } # duplication marker rdjgndfnghdfnhgfdhbghdbfhgbfdhbn verify_package_pgp() { if "${rvm_gpg_command}" --verify "$2" "$1" then log "GPG verified '$1'" else typeset _return=$? log "\ GPG signature verification failed for '$1' - '$3'! Try to install GPG v2 and then fetch the public key: ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --keyserver hkp://pool.sks-keyservers.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB or if it fails: command curl -k -sSL https://rvm.io/mpapis.asc | ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --import - command curl -k -sSL https://rvm.io/pkuczynski.asc | ${SUDO_USER:+sudo }${rvm_gpg_command##*/} --import - In case of further problems with validation please refer to https://rvm.io/rvm/security " exit ${_return} fi } verify_pgp() { [[ -n "${1:-}" ]] || { debug "No PGP url given, skipping." return 0 } get_package "$1" "$2.asc" || { debug "PGP url given but does not exist: '$1'" return 0 } rvm_install_gpg_setup || { log "Found PGP signature at: '$1', but no GPG software exists to validate it, skipping." return 0 } verify_package_pgp "${rvm_archives_path}/$2" "${rvm_archives_path}/$2.asc" "$1" } get_and_unpack() { typeset _url _file _patern _return _verify_pgp _url="$1" _file="$2" _verify_pgp="$3" get_package "$_url" "$_file" || return $? verify_pgp "$_verify_pgp" "$_file" || return $? [[ -d "${rvm_src_path}/rvm" ]] || \mkdir -p "${rvm_src_path}/rvm" __rvm_cd "${rvm_src_path}/rvm" || { _return=$? log "Could not change directory '${rvm_src_path}/rvm'." return $_return } # Remove existing installation typeset _cleanup_cmd _cleanup_cmd="rm -rf ${rvm_src_path}/rvm/{,.[!.],..?}*" $_cleanup_cmd || { _return=$? log "Could not remove old RVM sources. Try:\n\n\tsudo $_cleanup_cmd\n\nThen retry your task again." return $_return } # Unpack sources __rvm_debug_command $rvm_tar_command xzf ${rvm_archives_path}/${_file} ${rvm_tar_options:-} --strip-components 1 || { _return=$? log "Could not extract RVM sources." return $_return } } rvm_install_default_settings() { # Tracing, if asked for. if [[ "$*" == *--trace* ]] || (( ${rvm_trace_flag:-0} > 0 )) then set -o xtrace rvm_trace_flag=1 fi # Variable initialization, remove trailing slashes if they exist on HOME true \ ${rvm_trace_flag:=0} ${rvm_debug_flag:=0}\ ${rvm_ignore_rvmrc:=0} HOME="${HOME%%+(\/)}" if (( rvm_ignore_rvmrc == 0 )) then for rvmrc in /etc/rvmrc "$HOME/.rvmrc" do if [[ -s "$rvmrc" ]] then if GREP_OPTIONS="" \grep '^\s*rvm .*$' "$rvmrc" >/dev/null 2>&1 then printf "%b" " Error: $rvmrc is for rvm settings only. rvm CLI may NOT be called from within $rvmrc. Skipping the loading of $rvmrc " exit 1 else source "$rvmrc" fi fi done fi if [[ -z "${rvm_path:-}" ]] then if (( UID == 0 )) then rvm_user_install_flag=1 rvm_prefix="$HOME" rvm_path="${rvm_prefix}/.rvm" else rvm_user_install_flag=1 rvm_prefix="$HOME" rvm_path="${rvm_prefix}/.rvm" fi fi if [[ -z "${rvm_prefix}" ]] then rvm_prefix=$( dirname $rvm_path ) fi # duplication marker kkdfkgnjfndgjkndfjkgnkfjdgn [[ -n "${rvm_user_install_flag:-}" ]] || case "$rvm_path" in ($HOME/*|/${USER// /_}*) rvm_user_install_flag=1 ;; (*) rvm_user_install_flag=1 ;; esac } rvm_install_parse_params() { install_rubies=() install_gems=() flags=( ./scripts/install ) forwarded_flags=() while (( $# > 0 )) do token="$1" shift case "$token" in (--trace) set -o xtrace rvm_trace_flag=1 flags=( -x "${flags[@]}" "$token" ) forwarded_flags+=( "$token" ) ;; (--debug|--quiet-curl) flags+=( "$token" ) forwarded_flags+=( "$token" ) token=${token#--} token=${token//-/_} export "rvm_${token}_flag"=1 printf "%b" "Turning on ${token/_/ } mode.\n" ;; (--path) if [[ -n "${1:-}" ]] then rvm_path="$1" shift else fail "--path must be followed by a path." fi ;; (--branch|branch) # Install RVM from a given branch if [[ -n "${1:-}" ]] then case "$1" in (/*) branch=${1#/} ;; (*/) branch=master if [[ "${1%/}" -ne rvm ]] && [[ "${1%/}" -ne mpapis ]] then sources=(github.com/${1%/}/rvm) fi ;; (*/*) branch=${1#*/} if [[ "${1%%/*}" -ne rvm ]] && [[ "${1%%/*}" -ne mpapis ]] then sources=(github.com/${1%%/*}/rvm) fi ;; (*) branch="$1" ;; esac shift else fail "--branch must be followed by a branchname." fi ;; (--source|source) if [[ -n "${1:-}" ]] then if [[ "$1" = */*/* ]] then sources=($1) shift else fail "--source must be in the format <domain>/<account>/<repo>." fi else fail "--source must be followed by a source." fi ;; (--user-install|--ignore-dotfiles) token=${token#--} token=${token//-/_} export "rvm_${token}_flag"=1 printf "%b" "Turning on ${token/_/ } mode.\n" ;; (--auto-dotfiles) flags+=( "$token" ) export "rvm_auto_dotfiles_flag"=1 printf "%b" "Turning on auto dotfiles mode.\n" ;; (--auto) export "rvm_auto_dotfiles_flag"=1 printf "%b" "Warning, --auto is deprecated in favor of --auto-dotfiles.\n" ;; (--verify-downloads) if [[ -n "${1:-}" ]] then export rvm_verify_downloads_flag="$1" forwarded_flags+=( "$token" "$1" ) shift else fail "--verify-downloads must be followed by level(0|1|2)." fi ;; (--autolibs=*) flags+=( "$token" ) export rvm_autolibs_flag="${token#--autolibs=}" forwarded_flags+=( "$token" ) ;; (--without-gems=*|--with-gems=*|--with-default-gems=*) flags+=( "$token" ) value="${token#*=}" token="${token%%=*}" token="${token#--}" token="${token//-/_}" export "rvm_${token}"="${value}" printf "%b" "Installing RVM ${token/_/ }: ${value}.\n" ;; (--version|version) version="$1" shift ;; (head|master) version="head" branch="master" ;; (stable) version="latest" ;; (latest|latest-*|+([[:digit:]]).+([[:digit:]]).+([[:digit:]])) version="$token" ;; (--ruby) install_rubies+=( ruby ) ;; (--ruby=*) token=${token#--ruby=} install_rubies+=( ${token//,/ } ) ;; (--rails) install_gems+=( rails ) ;; (--gems=*) token=${token#--gems=} install_gems+=( ${token//,/ } ) ;; (--add-to-rvm-group) export rvm_add_users_to_rvm_group="$1" shift ;; (help) usage exit 0 ;; (*) usage exit 1 ;; esac done if (( ${#install_gems[@]} > 0 && ${#install_rubies[@]} == 0 )) then install_rubies=( ruby ) fi true "${version:=head}" true "${branch:=master}" if [[ -z "${sources[@]}" ]] then sources=("${DEFAULT_SOURCES[@]}") fi rvm_src_path="$rvm_path/src" rvm_archives_path="$rvm_path/archives" rvm_releases_url="https://rvm.io/releases" } rvm_install_validate_rvm_path() { case "$rvm_path" in (*[[:space:]]*) printf "%b" " It looks you are one of the happy *space* users (in home dir name), RVM is not yet fully ready for it, use this trick to fix it: sudo mkdir -p /${USER// /_}.rvm sudo chown -R \"$USER:\" /${USER// /_}.rvm echo \"export rvm_path=/${USER// /_}.rvm\" >> \"$HOME/.rvmrc\" and start installing again. " exit 2 ;; (/usr/share/ruby-rvm) printf "%b" " It looks you are one of the happy Ubuntu users, RVM packaged by Ubuntu is old and broken, follow this link for details how to fix: https://stackoverflow.com/a/9056395/497756 " [[ "${rvm_uses_broken_ubuntu_path:-no}" == "yes" ]] || exit 3 ;; esac if [[ "$rvm_path" != "/"* ]] then fail "The rvm install path must be fully qualified. Tried $rvm_path" fi } rvm_install_validate_volume_mount_mode() { \typeset path partition test_exec path=$rvm_path # Directory $rvm_path might not exists at this point so we need to traverse the tree upwards while [[ -n "$path" ]] do if [[ -d $path ]] then partition=`df -P $path | awk 'END{print $1}'` test_exec=$(mktemp $path/rvm-exec-test.XXXXXX) echo '#!/bin/sh' > "$test_exec" chmod +x "$test_exec" if ! "$test_exec" then rm -f "$test_exec" printf "%b" " It looks that scripts located in ${path}, which would be RVM destination ${rvm_path}, are not executable. One of the reasons might be that partition ${partition} holding this location is mounted in *noexec* mode, which prevents RVM from working correctly. Please verify your setup and re-mount partition ${partition} without the noexec option." exit 2 fi rm -f "$test_exec" break fi path=${path%/*} done } rvm_install_select_and_get_version() { typeset dir _version_release _version for dir in "$rvm_src_path" "$rvm_archives_path" do [[ -d "$dir" ]] || mkdir -p "$dir" done _version_release="${version}" case "${version}" in (head) _version_release="${branch}" install_head sources[@] ${branch:-master} ;; (latest) _version=$(fetch_version sources[@]) install_release sources[@] "$_version" ;; (latest-minor) version="$(<"$rvm_path/VERSION")" _version=$(fetch_version sources[@] ${version%.*}) install_release sources[@] "$_version" ;; (latest-*) _version=$(fetch_version sources[@] ${version#latest-}) install_release sources[@] "$_version" ;; (+([[:digit:]]).+([[:digit:]]).+([[:digit:]])) # x.y.z install_release sources[@] ${version} ;; (*) fail "Something went wrong, unrecognized version '$version'" ;; esac echo "${_version_release}" > "$rvm_path/RELEASE" } rvm_install_main() { [[ -f ./scripts/install ]] || { log "'./scripts/install' can not be found for installation, something went wrong, it usually means your 'tar' is broken, please report it here: https://github.com/rvm/rvm/issues" return 127 } # required flag - path to install flags+=( --path "$rvm_path" ) \command bash "${flags[@]}" } rvm_install_ruby_and_gems() ( if (( ${#install_rubies[@]} > 0 )) then source ${rvm_scripts_path:-${rvm_path}/scripts}/rvm source ${rvm_scripts_path:-${rvm_path}/scripts}/functions/version __rvm_print_headline for _ruby in ${install_rubies[@]} do command rvm "${forwarded_flags[@]}" install ${_ruby} done # set the first one as default, skip rest for _ruby in ${install_rubies[@]} do rvm "${forwarded_flags[@]}" alias create default ${_ruby} break done for _gem in ${install_gems[@]} do rvm "${forwarded_flags[@]}" all do gem install ${_gem} done printf "%b" " * To start using RVM you need to run \`source $rvm_path/scripts/rvm\` in all your open shell windows, in rare cases you need to reopen all shell windows. " if [[ "${install_gems[*]}" == *"rails"* ]] then printf "%b" " * To start using rails you need to run \`rails new <project_dir>\`. " fi fi ) rvm_install() { rvm_install_initialize rvm_install_commands_setup rvm_install_default_settings rvm_install_parse_params "$@" rvm_install_validate_rvm_path rvm_install_validate_volume_mount_mode rvm_install_select_and_get_version rvm_install_main rvm_install_ruby_and_gems } rvm_install "$@"
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/bin/fix-drupal-platform-ownership.sh
#!/bin/bash # Help menu print_help() { cat <<-HELP This script is used to fix the file ownership of a Drupal platform. You need to provide the following arguments: --root: Path to the root of your Drupal installation. --script-user: Username of the user to whom you want to give file ownership (defaults to 'aegir'). --web-group: Web server group name (defaults to 'www-data'). Usage: (sudo) ${0##*/} --root=PATH --script-user=USER --web_group=GROUP Example: (sudo) ${0##*/} --drupal_path=/var/aegir/platforms/drupal-7.50 --script-user=aegir --web-group=www-data HELP exit 0 } if [ $(id -u) != 0 ]; then printf "Error: You must run this with sudo or root.\n" exit 1 fi drupal_root=${1%/} script_user=${2:-aegir} web_group="${3:-www-data}" # Parse Command Line Arguments while [ "$#" -gt 0 ]; do case "$1" in --root=*) drupal_root="${1#*=}" ;; --script-user=*) script_user="${1#*=}" ;; --web-group=*) web_group="${1#*=}" ;; --help) print_help;; *) printf "Error: Invalid argument, run --help for valid arguments.\n" exit 1 esac shift done if [ -z "${drupal_root}" ] \ || [ ! -d "${drupal_root}/sites" ] \ || [ ! -f "${drupal_root}/core/modules/system/system.module" ] \ && [ ! -f "${drupal_root}/modules/system/system.module" ]; then printf "Error: Please provide a valid Drupal root directory.\n" exit 1 fi if [ -z "${script_user}" ] \ || [[ $(id -un "${script_user}" 2> /dev/null) != "${script_user}" ]]; then printf "Error: Please provide a valid user.\n" exit 1 fi _TODAY=$(date +%y%m%d 2>&1) _TODAY=${_TODAY//[^0-9]/} if [ -e "${drupal_root}/sites/all/libraries/ownership-fixed-${_TODAY}.pid" ]; then exit 0 fi cd ${drupal_root} printf "Setting ownership of "${drupal_root}" to: user => "${script_user}" group => "users"\n" chown ${script_user}:users ${drupal_root} mkdir -p ${drupal_root}/sites/all/{modules,themes,libraries,drush} ### ctrl pid rm -f ${drupal_root}/sites/all/libraries/ownership-fixed*.pid touch ${drupal_root}/sites/all/libraries/ownership-fixed-${_TODAY}.pid if [[ "${drupal_root}" =~ "/static/" ]] && [ -e "${drupal_root}/core" ]; then rm -f ${drupal_root}/../vendor/bin/drush* rm -f ${drupal_root}/vendor/bin/drush* rm -f ${drupal_root}/../drush/* rm -f ${drupal_root}/sites/development.services.yml fi chown -R ${script_user}:users \ ${drupal_root}/sites/all/{modules,themes,libraries,includes,misc,profiles,core,vendor,drush}/* if [[ "${drupal_root}" =~ "/static/" ]] && [ -e "${drupal_root}/core" ]; then chown -R ${script_user}:users ${drupal_root}/../vendor/* chown -R ${script_user}:users ${drupal_root}/../drush/* chown ${script_user}:users ${drupal_root}/../vendor fi chown ${script_user}:users \ ${drupal_root}/sites/all/drush/drushrc.php \ ${drupal_root}/sites \ ${drupal_root}/sites/* \ ${drupal_root}/sites/sites.php \ ${drupal_root}/sites/all \ ${drupal_root}/sites/all/{modules,themes,libraries,drush} \ ${drupal_root}/{modules,themes,libraries,includes,misc,profiles,core,vendor} ### known exceptions chown -R ${script_user}:www-data \ ${drupal_root}/sites/all/libraries/tcpdf/cache &> /dev/null echo "Done setting proper ownership of platform files and directories."
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/bin/fix-drupal-platform-permissions.sh
#!/bin/bash # Help menu print_help() { cat <<-HELP This script is used to fix the file permissions of a Drupal platform. You need to provide the following argument: --root: Path to the root of your Drupal installation. Usage: (sudo) ${0##*/} --root=PATH Example: (sudo) ${0##*/} --drupal_path=/var/aegir/platforms/drupal-7.50 HELP exit 0 } if [ $(id -u) != 0 ]; then printf "Error: You must run this with sudo or root.\n" exit 1 fi drupal_root=${1%/} # Parse Command Line Arguments while [ "$#" -gt 0 ]; do case "$1" in --root=*) drupal_root="${1#*=}" ;; --help) print_help;; *) printf "Error: Invalid argument, run --help for valid arguments.\n" exit 1 esac shift done if [ -z "${drupal_root}" ] \ || [ ! -d "${drupal_root}/sites" ] \ || [ ! -f "${drupal_root}/core/modules/system/system.module" ] \ && [ ! -f "${drupal_root}/modules/system/system.module" ]; then printf "Error: Please provide a valid Drupal root directory.\n" exit 1 fi _TODAY=$(date +%y%m%d 2>&1) _TODAY=${_TODAY//[^0-9]/} if [ -e "${drupal_root}/sites/all/libraries/permissions-fixed-${_TODAY}.pid" ]; then exit 0 fi cd ${drupal_root} printf "Setting main permissions inside "${drupal_root}"...\n" mkdir -p ${drupal_root}/sites/all/{modules,themes,libraries,drush} ### ctrl pid rm -f ${drupal_root}/sites/all/libraries/permissions-fixed*.pid touch ${drupal_root}/sites/all/libraries/permissions-fixed-${_TODAY}.pid chmod 0644 ${drupal_root}/*.php chmod 0664 ${drupal_root}/autoload.php chmod 0751 ${drupal_root}/sites chmod 0755 ${drupal_root}/sites/* chmod 0644 ${drupal_root}/sites/*.php chmod 0644 ${drupal_root}/sites/*.txt chmod 0644 ${drupal_root}/sites/*.yml chmod 0755 ${drupal_root}/sites/all/drush printf "Setting permissions of all codebase directories inside "${drupal_root}/sites/all"...\n" find ${drupal_root}/sites/all/{modules,themes,libraries} -type d -exec \ chmod 02775 {} \; printf "Setting permissions of all codebase directories inside "${drupal_root}"...\n" find ${drupal_root}/{modules,themes,libraries,includes,misc,profiles,core,vendor} -type d -exec \ chmod 02775 {} \; if [[ "${drupal_root}" =~ "/static/" ]] && [ -e "${drupal_root}/core" ]; then printf "Setting permissions of all codebase directories inside "${drupal_root}/../vendor"...\n" find ${drupal_root}/../vendor -type d -exec \ chmod 02775 {} \; fi printf "Setting permissions of all codebase files inside "${drupal_root}/sites/all"...\n" find ${drupal_root}/sites/all/{modules,themes,libraries} -type f -exec \ chmod 0664 {} \; printf "Setting permissions of all codebase files inside "${drupal_root}"...\n" find ${drupal_root}/{modules,themes,libraries,includes,misc,profiles,core,vendor} -type f -exec \ chmod 0664 {} \; if [[ "${drupal_root}" =~ "/static/" ]] && [ -e "${drupal_root}/core" ]; then printf "Setting permissions of all codebase files inside "${drupal_root}/../vendor"...\n" find ${drupal_root}/../vendor -type f -exec \ chmod 0664 {} \; fi ### known exceptions chmod -R 775 ${drupal_root}/sites/all/libraries/tcpdf/cache &> /dev/null chmod 0644 ${drupal_root}/.htaccess echo "Done setting proper permissions on platform files and directories."
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/bin/fix-drupal-site-ownership.sh
#!/bin/bash # Help menu print_help() { cat <<-HELP This script is used to fix the file ownership of a Drupal site. You need to provide the following arguments: --site-path: Path to the Drupal site directory. --script-user: Username of the user to whom you want to give file ownership (defaults to 'aegir'). --web-group: Web server group name (defaults to 'www-data'). Usage: (sudo) ${0##*/} --site-path=PATH --script-user=USER --web_group=GROUP Example: (sudo) ${0##*/} --site-path=/var/aegir/platforms/drupal-7.50/sites/example.com --script-user=aegir --web-group=www-data HELP exit 0 } if [ $(id -u) != 0 ]; then printf "Error: You must run this with sudo or root.\n" exit 1 fi site_path=${1%/} script_user=${2:-aegir} web_group="${3:-www-data}" # Parse Command Line Arguments while [ "$#" -gt 0 ]; do case "$1" in --site-path=*) site_path="${1#*=}" ;; --script-user=*) script_user="${1#*=}" ;; --web-group=*) web_group="${1#*=}" ;; --help) print_help;; *) printf "Error: Invalid argument, run --help for valid arguments.\n" exit 1 esac shift done if [ -z "${site_path}" ] || [ ! -f "${site_path}/settings.php" ] ; then printf "Error: Please provide a valid Drupal site directory.\n" exit 1 fi if [ -z "${script_user}" ] \ || [[ $(id -un "${script_user}" 2> /dev/null) != "${script_user}" ]]; then printf "Error: Please provide a valid user.\n" exit 1 fi if [ -e "${site_path}/libraries/ownership-fixed.pid" ]; then rm -f ${site_path}/libraries/ownership-fixed.pid fi _TODAY=$(date +%y%m%d 2>&1) _TODAY=${_TODAY//[^0-9]/} if [ -e "${site_path}/../sites/default/default.services.yml" ]; then if [ ! -e "${site_path}/modules/default.services.yml" ] ; then cp -a ${site_path}/../sites/default/default.services.yml ${site_path}/modules/ fi fi if [ -e "${site_path}/modules/services.yml" ] && [ ! -e "${site_path}/services.yml" ]; then ln -s ${site_path}/modules/services.yml ${site_path}/services.yml fi cd ${site_path} printf "Setting ownership of key files and directories inside "${site_path}" to: user => "${script_user}"\n" if [ ! -e "${site_path}/libraries" ]; then mkdir ${site_path}/libraries fi ### directory and settings files - site level chown ${script_user}:users ${site_path} &> /dev/null chown ${script_user}:www-data \ ${site_path}/{local.settings.php,settings.php,civicrm.settings.php,solr.php} &> /dev/null ### modules,themes,libraries - site level chown -R ${script_user}:users \ ${site_path}/{modules,themes,libraries}/* &> /dev/null chown ${script_user}:users \ ${site_path}/drushrc.php \ ${site_path}/modules/*.yml \ ${site_path}/{modules,themes,libraries} &> /dev/null if [ ! -e "${site_path}/files/ownership-fixed-${_TODAY}.pid" ]; then ### ctrl pid rm -f ${site_path}/files/ownership-fixed*.pid touch ${site_path}/files/ownership-fixed-${_TODAY}.pid ### files - site level chown -L -R ${script_user}:www-data ${site_path}/files &> /dev/null chown ${script_user}:www-data ${site_path}/files &> /dev/null chown ${script_user}:www-data ${site_path}/files/{tmp,images,pictures,css,js} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{advagg_css,advagg_js,ctools} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{ctools/css,imagecache,locations} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{xmlsitemap,deployment,styles,private} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{civicrm,civicrm/templates_c} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{civicrm/upload,civicrm/persist} &> /dev/null chown ${script_user}:www-data ${site_path}/files/{civicrm/custom,civicrm/dynamic} &> /dev/null ### private - site level chown -L -R ${script_user}:www-data ${site_path}/private &> /dev/null chown ${script_user}:www-data ${site_path}/private &> /dev/null chown ${script_user}:www-data ${site_path}/private/{files,temp} &> /dev/null chown ${script_user}:www-data ${site_path}/private/files/backup_migrate &> /dev/null chown ${script_user}:www-data ${site_path}/private/files/backup_migrate/{manual,scheduled} &> /dev/null chown -L -R ${script_user}:www-data ${site_path}/private/config &> /dev/null fi echo "Done setting proper ownership of site files and directories."
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/bin/fix-drupal-site-permissions.sh
#!/bin/bash # Help menu print_help() { cat <<-HELP This script is used to fix the file permissions of a Drupal site. You need to provide the following argument: --site-path: Path to the Drupal site's directory. Usage: (sudo) ${0##*/} --site-path=PATH Example: (sudo) ${0##*/} --site-path=/var/aegir/platforms/drupal-7.50/sites/example.com HELP exit 0 } if [ $(id -u) != 0 ]; then printf "Error: You must run this with sudo or root.\n" exit 1 fi site_path=${1%/} # Parse Command Line Arguments while [ "$#" -gt 0 ]; do case "$1" in --site-path=*) site_path="${1#*=}" ;; --help) print_help;; *) printf "Error: Invalid argument, run --help for valid arguments.\n" exit 1 esac shift done if [ -z "${site_path}" ] || [ ! -f "${site_path}/settings.php" ] ; then printf "Error: Please provide a valid Drupal site directory.\n" exit 1 fi _TODAY=$(date +%y%m%d 2>&1) _TODAY=${_TODAY//[^0-9]/} if [ -e "${site_path}/libraries/permissions-fixed.pid" ]; then rm -f ${site_path}/libraries/permissions-fixed.pid fi cd ${site_path} printf "Setting correct permissions on key files and directories inside "${site_path}"...\n" ### directory and settings files - site level if [ -e "${site_path}/aegir.services.yml" ]; then rm -f ${site_path}/aegir.services.yml fi find ${site_path}/*.php -type f -exec chmod 0440 {} \; &> /dev/null chmod 0640 ${site_path}/civicrm.settings.php &> /dev/null ### modules,themes,libraries - site level find ${site_path}/{modules,themes,libraries} -type d -exec \ chmod 02775 {} \; &> /dev/null find ${site_path}/{modules,themes,libraries} -type f -exec \ chmod 0664 {} \; &> /dev/null if [ ! -e "${site_path}/files/permissions-fixed-${_TODAY}.pid" ]; then ### ctrl pid rm -f ${site_path}/files/permissions-fixed*.pid touch ${site_path}/files/permissions-fixed-${_TODAY}.pid ### files - site level find ${site_path}/files/ -type d -exec chmod 02775 {} \; &> /dev/null find ${site_path}/files/ -type f -exec chmod 0664 {} \; &> /dev/null chmod 02775 ${site_path}/files &> /dev/null ### private - site level find ${site_path}/private/ -type d -exec chmod 02775 {} \; &> /dev/null find ${site_path}/private/ -type f -exec chmod 0664 {} \; &> /dev/null ### known exceptions chmod 0644 ${site_path}/files/.htaccess fi echo "Done setting proper permissions on site files and directories."
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/host/host-fire.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} csf_flood_guard() { thisCountCsf=`ps aux | grep -v "grep" | grep -v "null" | grep --count "/csf"` if [ ${thisCountCsf} -gt "4" ]; then echo "$(date 2>&1) Too many ${thisCountCsf} csf processes killed" >> \ /var/log/csf-count.kill.log kill -9 $(ps aux | grep '[c]sf' | awk '{print $2}') &> /dev/null csf -tf wait csf -df wait fi thisCountFire=`ps aux | grep -v "grep" | grep -v "null" | grep --count "/fire.sh"` if [ ${thisCountFire} -gt "9" ]; then echo "$(date 2>&1) Too many ${thisCountFire} fire.sh processes killed and rules purged" >> \ /var/log/fire-purge.kill.log csf -tf wait csf -df wait kill -9 $(ps aux | grep '[f]ire.sh' | awk '{print $2}') &> /dev/null elif [ ${thisCountFire} -gt "7" ]; then echo "$(date 2>&1) Too many ${thisCountFire} fire.sh processes killed" >> \ /var/log/fire-count.kill.log csf -tf wait kill -9 $(ps aux | grep '[f]ire.sh' | awk '{print $2}') &> /dev/null fi } [ ! -e "/var/run/water.pid" ] && csf_flood_guard guest_proc_monitor() { for i in `dir -d /vservers/*`; do _THIS_VM=`echo $i | cut -d'/' -f3 | awk '{ print $1}'` _VS_NAME=`echo ${_THIS_VM} | cut -d'/' -f3 | awk '{ print $1}'` if [ -e "${i}/var/xdrago/proc_num_ctrl.cgi" ] \ && [ ! -e "${i}/var/run/fmp_wait.pid" ] \ && [ ! -e "${i}/var/run/boa_wait.pid" ] \ && [ ! -e "${i}/var/run/boa_run.pid" ] \ && [ ! -e "${i}/var/run/mysql_restart_running.pid" ] \ && [ -e "/usr/var/run${i}" ]; then vserver ${_VS_NAME} exec perl /var/xdrago/proc_num_ctrl.cgi fi done } ###guest_proc_monitor guest_guard() { if [ ! -e "/var/run/fire.pid" ] && [ ! -e "/var/run/water.pid" ]; then touch /var/run/fire.pid echo start `date` for i in `dir -d /vservers/*`; do if [ -e "${i}/var/xdrago/monitor/ssh.log" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/var/xdrago/monitor/ssh.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 22" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi if [ -e "${i}/var/xdrago/monitor/web.log" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/var/xdrago/monitor/web.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 80" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi if [ -e "${i}/var/xdrago/monitor/ftp.log" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/var/xdrago/monitor/ftp.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 21" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi echo Completed for $i `date` done echo fin `date` rm -f /var/run/fire.pid fi } if [ -e "/vservers" ] \ && [ -e "/etc/csf/csf.deny" ] \ && [ ! -e "/var/run/water.pid" ] \ && [ -x "/usr/sbin/csf" ]; then [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard rm -f /var/run/fire.pid fi exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/host/host-water.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} whitelist_ip_pingdom() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing pingdom ips from csf.allow sed -i "s/.*pingdom.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s https://my.pingdom.com/probes/feed \ | grep '<pingdom:ip>' \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) echo _IPS pingdom list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow pingdom ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # pingdom ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_cloudflare() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing cloudflare ips from csf.allow sed -i "s/.*cloudflare.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s https://www.cloudflare.com/ips-v4 \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.\/]//g' \ | sort \ | uniq 2>&1) echo _IPS cloudflare list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow cloudflare ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # cloudflare ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_imperva() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing imperva ips from csf.allow sed -i "s/.*imperva.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s --data "resp_format=text" https://my.imperva.com/api/integration/v1/ips \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.\/]//g' \ | sort \ | uniq 2>&1) echo _IPS imperva list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow imperva ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # imperva ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_googlebot() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing googlebot ips from csf.allow sed -i "s/.*googlebot.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="66.249.64.0/19" echo _IPS googlebot list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow googlebot ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # googlebot ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done sed -i "s/66.249..*//g" /etc/csf/csf.deny wait } whitelist_ip_microsoft() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing microsoft ips from csf.allow sed -i "s/.*microsoft.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="65.52.0.0/14 199.30.16.0/20" echo _IPS microsoft list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow microsoft ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # microsoft ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done sed -i "s/65.5.*//g" /etc/csf/csf.deny wait sed -i "s/199.30..*//g" /etc/csf/csf.deny wait } whitelist_ip_sucuri() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing sucuri ips from csf.allow sed -i "s/.*sucuri.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="192.88.134.0/23 185.93.228.0/22 66.248.200.0/22 208.109.0.0/22" echo _IPS sucuri list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow sucuri ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # sucuri ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_authzero() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing authzero ips from csf.allow sed -i "s/.*authzero.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="35.167.77.121 35.166.202.113 35.160.3.103 54.183.64.135 54.67.77.38 54.67.15.170 54.183.204.205 35.171.156.124 18.233.90.226 3.211.189.167 52.28.56.226 52.28.45.240 52.16.224.164 52.16.193.66 34.253.4.94 52.50.106.250 52.211.56.181 52.213.38.246 52.213.74.69 52.213.216.142 35.156.51.163 35.157.221.52 52.28.184.187 52.28.212.16 52.29.176.99 52.57.230.214 54.76.184.103 52.210.122.50 52.208.95.174 52.210.122.50 52.208.95.174 54.76.184.103 52.64.84.177 52.64.111.197 54.153.131.0 13.210.52.131 13.55.232.24 13.54.254.182 52.62.91.160 52.63.36.78 52.64.120.184 54.66.205.24 54.79.46.4" echo _IPS authzero list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow authzero ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # authzero ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_site24x7_extra() { _IPS="87.252.213.0/24 89.36.170.0/24 185.172.199.128/26 185.230.214.0/23 185.172.199.0/27" echo _IPS site24x7_extra list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow site24x7_extra ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # site24x7_extra ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done if [ -e "/root/.ignore.site24x7.firewall.cnf" ]; then for _IP in ${_IPS}; do echo checking csf.ignore site24x7_extra ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.ignore \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.ignore" echo "${_IP} # site24x7_extra ips" >> /etc/csf/csf.ignore else echo "${_IP} already listed in /etc/csf/csf.ignore" fi done fi } whitelist_ip_site24x7() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing site24x7 ips from csf.allow sed -i "s/.*site24x7.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait echo removing site24x7 ips from csf.ignore sed -i "s/.*site24x7.*//g" /etc/csf/csf.ignore wait sed -i "/^$/d" /etc/csf/csf.ignore wait fi _IPS=$(host site24x7.enduserexp.com 1.1.1.1 \ | grep 'has address' \ | cut -d ' ' -f4 \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) if [ -z "${_IPS}" ] \ || [[ ! "${_IPS}" =~ "104.236.16.22" ]] \ || [[ "${_IPS}" =~ "HINFO" ]]; then _IPS=$(dig site24x7.enduserexp.com \ | grep 'IN.*A' \ | cut -d 'A' -f2 \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) fi echo _IPS site24x7 list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow site24x7 ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # site24x7 ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done if [ -e "/root/.ignore.site24x7.firewall.cnf" ]; then for _IP in ${_IPS}; do echo checking csf.ignore site24x7 ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.ignore \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.ignore" echo "${_IP} # site24x7 ips" >> /etc/csf/csf.ignore else echo "${_IP} already listed in /etc/csf/csf.ignore" fi done fi if [ ! -e "/root/.whitelist.site24x7.cnf" ]; then csf -tf wait csf -df wait touch /root/.whitelist.site24x7.cnf fi } local_ip_rg() { if [ -e "/root/.local.IP.list" ]; then echo "the file /root/.local.IP.list already exists" for _IP in `hostname -I`; do _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /root/.local.IP.list" echo "${_IP} # local IP address" >> /root/.local.IP.list else echo "${_IP} already listed in /root/.local.IP.list" fi done for _IP in `cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s"`; do echo removing ${_IP} from d/t firewall rules csf -ar ${_IP} &> /dev/null csf -dr ${_IP} &> /dev/null csf -tr ${_IP} &> /dev/null if [ ! -e "/root/.local.IP.csf.listed" ]; then echo removing ${_IP} from csf.ignore sed -i "s/^${_IP} .*//g" /etc/csf/csf.ignore wait echo removing ${_IP} from csf.allow sed -i "s/^${_IP} .*//g" /etc/csf/csf.allow wait echo adding ${_IP} to csf.ignore echo "${_IP} # local.IP.list" >> /etc/csf/csf.ignore wait echo adding ${_IP} to csf.allow echo "${_IP} # local.IP.list" >> /etc/csf/csf.allow wait fi done touch /root/.local.IP.csf.listed else echo "the file /root/.local.IP.list does not exist" rm -f /root/.tmp.IP.list* rm -f /root/.local.IP.list* for _IP in `hostname -I`;do echo ${_IP} >> /root/.tmp.IP.list;done for _IP in `cat /root/.tmp.IP.list \ | sort \ | uniq`;do echo "${_IP} # local IP address" >> /root/.local.IP.list;done rm -f /root/.tmp.IP.list* fi sed -i "/^$/d" /etc/csf/csf.ignore &> /dev/null wait sed -i "/^$/d" /etc/csf/csf.allow &> /dev/null wait } guard_stats() { for i in `dir -d /vservers/*`; do if [ -e "/root/.local.IP.list" ]; then cp -af /root/.local.IP.list ${i}/root/.local.IP.list fi if [ ! -e "${i}/${_HX}" ] && [ -e "${i}/${_HA}" ]; then mv -f ${i}/${_HA} ${i}/${_HX} fi if [ ! -e "${i}/${_WX}" ] && [ -e "${i}/${_WA}" ]; then mv -f ${i}/${_WA} ${i}/${_WX} fi if [ ! -e "${i}/${_FX}" ] && [ -e "${i}/${_FA}" ]; then mv -f ${i}/${_FA} ${i}/${_FX} fi if [ -e "${i}/${_HA}" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/${_HA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${i}/${_HA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${i}/${_HA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 22" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force SSH Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force SSH Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi if [ -e "${i}/${_WA}" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/${_WA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${i}/${_WA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${i}/${_WA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 80" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force Web Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force Web Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi if [ -e "${i}/${_FA}" ] && [ -e "/usr/var/run${i}" ]; then for _IP in `cat ${i}/${_FA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${i}/${_FA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${i}/${_FA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 21" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force FTP Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force FTP Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi done } whitelist_ip_dns() { csf -tr 1.1.1.1 csf -tr 1.0.0.1 csf -dr 1.1.1.1 csf -dr 1.0.0.1 sed -i "s/.*1.1.1.1.*//g" /etc/csf/csf.allow sed -i "s/.*1.1.1.1.*//g" /etc/csf/csf.ignore sed -i "s/.*1.0.0.1.*//g" /etc/csf/csf.allow sed -i "s/.*1.0.0.1.*//g" /etc/csf/csf.ignore echo "tcp|out|d=53|d=1.1.1.1 # Cloudflare DNS" >> /etc/csf/csf.allow echo "tcp|out|d=53|d=1.0.0.1 # Cloudflare DNS" >> /etc/csf/csf.allow sed -i "s/.*8.8.8.8.*//g" /etc/csf/csf.allow sed -i "s/.*8.8.8.8.*//g" /etc/csf/csf.ignore sed -i "s/.*8.8.4.4.*//g" /etc/csf/csf.allow sed -i "s/.*8.8.4.4.*//g" /etc/csf/csf.ignore echo "tcp|out|d=53|d=8.8.8.8 # Google DNS" >> /etc/csf/csf.allow echo "tcp|out|d=53|d=8.8.4.4 # Google DNS" >> /etc/csf/csf.allow sed -i "/^$/d" /etc/csf/csf.ignore sed -i "/^$/d" /etc/csf/csf.allow } if [ -e "/vservers" ] \ && [ -e "/etc/csf/csf.deny" ] \ && [ -x "/usr/sbin/csf" ]; then if [ -e "/root/.local.IP.list" ]; then echo local dr/tr start `date` for _IP in `cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s"`; do csf -dr ${_IP} &> /dev/null csf -tr ${_IP} &> /dev/null done fi n=$((RANDOM%120+90)) touch /var/run/water.pid echo Waiting $n seconds... sleep $n whitelist_ip_dns whitelist_ip_pingdom whitelist_ip_cloudflare whitelist_ip_googlebot whitelist_ip_microsoft [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_imperva [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_sucuri [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_authzero [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_site24x7_extra [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_site24x7 if [ -e "/root/.full.csf.cleanup.cnf" ]; then sed -i "s/.*do not delete.*//g" /etc/csf/csf.deny sed -i "/^$/d" /etc/csf/csf.deny fi kill -9 $(ps aux | grep '[C]onfigServer' | awk '{print $2}') &> /dev/null killall sleep &> /dev/null rm -f /etc/csf/csf.error service lfd restart wait csf -e wait csf -tf wait csf -q ### Linux kernel TCP SACK CVEs mitigation ### CVE-2019-11477 SACK Panic ### CVE-2019-11478 SACK Slowness ### CVE-2019-11479 Excess Resource Consumption Due to Low MSS Values if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then _SACK_TEST=$(ip6tables --list | grep tcpmss 2>&1) if [[ ! "${_SACK_TEST}" =~ "tcpmss" ]]; then sysctl net.ipv4.tcp_mtu_probing=0 &> /dev/null iptables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null ip6tables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null fi fi echo local start `date` local_ip_rg _HA=var/xdrago/monitor/hackcheck.archive.log _HX=var/xdrago/monitor/hackcheck.archive.x3.log _WA=var/xdrago/monitor/scan_nginx.archive.log _WX=var/xdrago/monitor/scan_nginx.archive.x3.log _FA=var/xdrago/monitor/hackftp.archive.log _FX=var/xdrago/monitor/hackftp.archive.x3.log echo guard start `date` guard_stats rm -f /vservers/*/var/xdrago/monitor/ssh.log rm -f /vservers/*/var/xdrago/monitor/web.log rm -f /vservers/*/var/xdrago/monitor/ftp.log kill -9 $(ps aux | grep '[C]onfigServer' | awk '{print $2}') &> /dev/null killall sleep &> /dev/null rm -f /etc/csf/csf.error service lfd restart wait sed -i "s/.*DHCP.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow _DHCP_TEST=$(grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f13 | sort | uniq 2>&1) if [[ "${_DHCP_TEST}" =~ "port" ]]; then for _IP in `grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f12 | sort | uniq`;do echo "udp|out|d=67|d=${_IP} # Local DHCP out" >> /etc/csf/csf.allow;done else for _IP in `grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f13 | sort | uniq`;do echo "udp|out|d=67|d=${_IP} # Local DHCP out" >> /etc/csf/csf.allow;done fi csf -e wait csf -q ### Linux kernel TCP SACK CVEs mitigation ### CVE-2019-11477 SACK Panic ### CVE-2019-11478 SACK Slowness ### CVE-2019-11479 Excess Resource Consumption Due to Low MSS Values if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then _SACK_TEST=$(ip6tables --list | grep tcpmss 2>&1) if [[ ! "${_SACK_TEST}" =~ "tcpmss" ]]; then sysctl net.ipv4.tcp_mtu_probing=0 &> /dev/null iptables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null ip6tables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null fi fi rm -f /var/run/water.pid echo guard fin `date` fi ntpdate pool.ntp.org _IF_CDP=$(ps aux | grep '[c]dp_io' | awk '{print $2}') if [ -z "${_IF_CDP}" ] && [ ! -e "/root/.no.swap.clear.cnf" ]; then swapoff -a swapon -a fi exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/clear.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} aptYesUnth="-y --allow-unauthenticated" check_root() { if [ `whoami` = "root" ]; then if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi } check_root os_detection_minimal() { _THIS_RV=$(lsb_release -sc 2>&1) if [ "${_THIS_RV}" = "chimaera" ] \ || [ "${_THIS_RV}" = "beowulf" ] \ || [ "${_THIS_RV}" = "bullseye" ] \ || [ "${_THIS_RV}" = "buster" ]; then _APT_UPDATE="apt-get update --allow-releaseinfo-change" else _APT_UPDATE="apt-get update" fi } os_detection_minimal apt_clean_update() { apt-get clean -qq 2> /dev/null rm -rf /var/lib/apt/lists/* &> /dev/null ${_APT_UPDATE} -qq 2> /dev/null } rm -f /var/run/clear_m.pid find /var/run/boa*.pid -mtime +0 -exec rm -rf {} \; &> /dev/null find /var/run/manage*users.pid -mtime +0 -exec rm -rf {} \; &> /dev/null find /var/run/daily-fix.pid -mtime +0 -exec rm -rf {} \; &> /dev/null find /var/run/clear_m.pid -mtime +0 -exec rm -rf {} \; &> /dev/null if [ -e "/root/.proxy.cnf" ]; then exit 0 fi # # Find the fastest mirror. find_fast_mirror() { isNetc=$(which netcat 2>&1) if [ ! -x "${isNetc}" ] || [ -z "${isNetc}" ]; then if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi apt_clean_update apt-get install netcat ${aptYesUnth} &> /dev/null sleep 3 fi ffMirr=$(which ffmirror 2>&1) if [ -x "${ffMirr}" ]; then ffList="/var/backups/boa-mirrors-2023-01.txt" mkdir -p /var/backups if [ ! -e "${ffList}" ]; then echo "de.files.aegir.cc" > ${ffList} echo "ny.files.aegir.cc" >> ${ffList} echo "sg.files.aegir.cc" >> ${ffList} fi if [ -e "${ffList}" ]; then _CHECK_MIRROR=$(bash ${ffMirr} < ${ffList} 2>&1) _USE_MIR="${_CHECK_MIRROR}" [[ "${_USE_MIR}" =~ "printf" ]] && _USE_MIR="files.aegir.cc" else _USE_MIR="files.aegir.cc" fi else _USE_MIR="files.aegir.cc" fi urlDev="http://${_USE_MIR}/dev" urlHmr="http://${_USE_MIR}/versions/master/aegir" } if [ ! -e "/var/run/boa_run.pid" ]; then if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf isCurl=$(curl --version 2>&1) if [[ ! "${isCurl}" =~ "OpenSSL" ]] || [ -z "${isCurl}" ]; then rm -f /etc/apt/sources.list.d/openssl.list if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi echo "curl install" | dpkg --set-selections &> /dev/null apt_clean_update apt-get install curl ${aptYesUnth} &> /dev/null mkdir -p /var/backups/libcurl mv -f /usr/local/lib/libcurl* /var/backups/libcurl/ &> /dev/null mv -f /usr/local/lib/pkgconfig/libcurl* /var/backups/libcurl/ &> /dev/null touch /root/.use.curl.from.packages.cnf fi fi rm -f /tmp/*error* rm -f /var/backups/BOA.sh.txt.hourly* find_fast_mirror curl -L -k -s \ --max-redirs 10 \ --retry 10 \ --retry-delay 5 \ -A iCab "http://${_USE_MIR}/BOA.sh.txt" \ -o /var/backups/BOA.sh.txt.hourly bash /var/backups/BOA.sh.txt.hourly &> /dev/null rm -f /var/backups/BOA.sh.txt.hourly* sleep 3 bash /opt/local/bin/autoupboa fi checkVn=$(/opt/local/bin/boa version | tr -d "\n" 2>&1) if [[ "${checkVn}" =~ "===" ]] || [ -z "${checkVn}" ]; then if [ -e "/var/log/barracuda_log.txt" ]; then checkVn=$(tail --lines=3 /var/log/barracuda_log.txt | tr -d "\n" 2>&1) else checkVn="whereis barracuda_log.txt" fi fi crlHead="-I -k -s --retry 8 --retry-delay 8" urlBpth="http://${_USE_MIR}/versions/master/aegir/tools/bin" curl ${crlHead} -A "${checkVn}" "${urlBpth}/thinkdifferent" &> /dev/null renice ${_B_NICE} -p $$ &> /dev/null service ssh restart &> /dev/null touch /var/xdrago/log/clear.done.pid exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/daily.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_heavy_tasks_maint.cnf" ]; then exit 0 fi _X_SE="414prodT66" _WEBG=www-data _OSR=$(lsb_release -sc 2>&1) _SSL_ITD=$(openssl version 2>&1 \ | tr -d "\n" \ | cut -d" " -f2 \ | awk '{ print $1}') if [[ "${_SSL_ITD}" =~ "3.1." ]] \ || [[ "${_SSL_ITD}" =~ "1.1.1" ]] \ || [[ "${_SSL_ITD}" =~ "1.1.0" ]] \ || [[ "${_SSL_ITD}" =~ "1.0.2" ]] \ || [[ "${_SSL_ITD}" =~ "1.0.1" ]]; then _NEW_SSL=YES fi crlGet="-L --max-redirs 10 -k -s --retry 10 --retry-delay 5 -A iCab" aptYesUnth="-y --allow-unauthenticated" cGet="config-get user.settings" cSet="config-set user.settings" vGet="variable-get" vSet="variable-set --always-set" ###-------------SYSTEM-----------------### os_detection_minimal() { _THIS_RV=$(lsb_release -sc 2>&1) if [ "${_THIS_RV}" = "chimaera" ] \ || [ "${_THIS_RV}" = "beowulf" ] \ || [ "${_THIS_RV}" = "bullseye" ] \ || [ "${_THIS_RV}" = "buster" ]; then _APT_UPDATE="apt-get update --allow-releaseinfo-change" else _APT_UPDATE="apt-get update" fi } os_detection_minimal apt_clean_update() { apt-get clean -qq 2> /dev/null rm -rf /var/lib/apt/lists/* &> /dev/null ${_APT_UPDATE} -qq 2> /dev/null } find_fast_mirror() { isNetc=$(which netcat 2>&1) if [ ! -x "${isNetc}" ] || [ -z "${isNetc}" ]; then if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi apt_clean_update apt-get install netcat ${aptYesUnth} &> /dev/null sleep 3 fi ffMirr=$(which ffmirror 2>&1) if [ -x "${ffMirr}" ]; then ffList="/var/backups/boa-mirrors-2023-01.txt" mkdir -p /var/backups if [ ! -e "${ffList}" ]; then echo "de.files.aegir.cc" > ${ffList} echo "ny.files.aegir.cc" >> ${ffList} echo "sg.files.aegir.cc" >> ${ffList} fi if [ -e "${ffList}" ]; then _CHECK_MIRROR=$(bash ${ffMirr} < ${ffList} 2>&1) _USE_MIR="${_CHECK_MIRROR}" [[ "${_USE_MIR}" =~ "printf" ]] && _USE_MIR="files.aegir.cc" else _USE_MIR="files.aegir.cc" fi else _USE_MIR="files.aegir.cc" fi urlDev="http://${_USE_MIR}/dev" urlHmr="http://${_USE_MIR}/versions/master/aegir" } extract_archive() { if [ ! -z "$1" ]; then case $1 in *.tar.bz2) tar xjf $1 ;; *.tar.gz) tar xzf $1 ;; *.tar.xz) tar xvf $1 ;; *.bz2) bunzip2 $1 ;; *.rar) unrar x $1 ;; *.gz) gunzip -q $1 ;; *.tar) tar xf $1 ;; *.tbz2) tar xjf $1 ;; *.tgz) tar xzf $1 ;; *.zip) unzip -qq $1 ;; *.Z) uncompress $1 ;; *.7z) 7z x $1 ;; *) echo "'$1' cannot be extracted via >extract<" ;; esac rm -f $1 fi } get_dev_ext() { if [ ! -z "$1" ]; then curl ${crlGet} "${urlDev}/HEAD/$1" -o "$1" if [ -e "$1" ]; then extract_archive "$1" else echo "OOPS: $1 failed download from ${urlDev}/HEAD/$1" fi fi } enable_chattr() { isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [ -d "/home/$1/" ]; then if [ "$1" != "${_HM_U}.ftp" ]; then chattr +i /home/$1/ else if [ -d "/home/$1/platforms/" ]; then chattr +i /home/$1/platforms/ chattr +i /home/$1/platforms/* &> /dev/null fi fi if [ -d "/home/$1/.drush/" ]; then chattr +i /home/$1/.drush/ fi if [ -d "/home/$1/.drush/usr/" ]; then chattr +i /home/$1/.drush/usr/ fi if [ -f "/home/$1/.drush/php.ini" ]; then chattr +i /home/$1/.drush/*.ini fi if [ -d "/home/$1/.bazaar/" ]; then chattr +i /home/$1/.bazaar/ fi fi } disable_chattr() { isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [ -d "/home/$1/" ]; then if [ "$1" != "${_HM_U}.ftp" ]; then if [ -d "/home/$1/" ]; then chattr -i /home/$1/ fi else if [ -d "/home/$1/platforms/" ]; then chattr -i /home/$1/platforms/ chattr -i /home/$1/platforms/* &> /dev/null fi fi if [ -d "/home/$1/.drush/" ]; then chattr -i /home/$1/.drush/ fi if [ -d "/home/$1/.drush/usr/" ]; then chattr -i /home/$1/.drush/usr/ fi if [ -f "/home/$1/.drush/php.ini" ]; then chattr -i /home/$1/.drush/*.ini fi if [ -d "/home/$1/.bazaar/" ]; then chattr -i /home/$1/.bazaar/ fi fi } run_drush8_cmd() { if [ -e "/root/.debug_daily.info" ]; then nOw=$(date +%y%m%d-%H%M%S 2>&1) echo "${nOw} ${_HM_U} running drush8 @${Dom} $1" fi su -s /bin/bash - ${_HM_U} -c "drush8 @${Dom} $1" &> /dev/null } run_drush8_hmr_cmd() { if [ -e "/root/.debug_daily.info" ]; then nOw=$(date +%y%m%d-%H%M%S 2>&1) echo "${nOw} ${_HM_U} running drush8 @hostmaster $1" fi su -s /bin/bash - ${_HM_U} -c "drush8 @hostmaster $1" &> /dev/null } run_drush8_hmr_master_cmd() { if [ -e "/root/.debug_daily.info" ]; then nOw=$(date +%y%m%d-%H%M%S 2>&1) echo "${nOw} aegir running drush8 @hostmaster $1" fi su -s /bin/bash - aegir -c "drush8 @hostmaster $1" &> /dev/null } run_drush8_nosilent_cmd() { if [ -e "/root/.debug_daily.info" ]; then nOw=$(date +%y%m%d-%H%M%S 2>&1) echo "${nOw} ${_HM_U} running drush8 @${Dom} $1" fi su -s /bin/bash - ${_HM_U} -c "drush8 @${Dom} $1" } check_if_required_with_drush8() { _REQ=YES _REI_TEST=$(run_drush8_nosilent_cmd "pmi $1 --fields=required_by" 2>&1) _REL_TEST=$(echo "${_REI_TEST}" | grep "Required by" 2>&1) if [[ "${_REL_TEST}" =~ "was not found" ]]; then _REQ=NULL echo "_REQ for $1 is ${_REQ} in ${Dom} == null == via ${_REL_TEST}" else echo "CTRL _REL_TEST _REQ for $1 is ${_REQ} in ${Dom} == init == via ${_REL_TEST}" _REN_TEST=$(echo "${_REI_TEST}" | grep "Required by.*:.*none" 2>&1) if [[ "${_REN_TEST}" =~ "Required by" ]]; then _REQ=NO echo "_REQ for $1 is ${_REQ} in ${Dom} == 0 == via ${_REN_TEST}" else echo "CTRL _REN_TEST _REQ for $1 is ${_REQ} in ${Dom} == 1 == via ${_REN_TEST}" _REM_TEST=$(echo "${_REI_TEST}" | grep "Required by.*minimal" 2>&1) if [[ "${_REM_TEST}" =~ "Required by" ]]; then _REQ=NO echo "_REQ for $1 is ${_REQ} in ${Dom} == 2 == via ${_REM_TEST}" fi _RES_TEST=$(echo "${_REI_TEST}" | grep "Required by.*standard" 2>&1) if [[ "${_RES_TEST}" =~ "Required by" ]]; then _REQ=NO echo "_REQ for $1 is ${_REQ} in ${Dom} == 3 == via ${_RES_TEST}" fi _RET_TEST=$(echo "${_REI_TEST}" | grep "Required by.*testing" 2>&1) if [[ "${_RET_TEST}" =~ "Required by" ]]; then _REQ=NO "echo _REQ for $1 is ${_REQ} in ${Dom} == 4 == via ${_RET_TEST}" fi _REH_TEST=$(echo "${_REI_TEST}" | grep "Required by.*hacked" 2>&1) if [[ "${_REH_TEST}" =~ "Required by" ]]; then _REQ=NO "echo _REQ for $1 is ${_REQ} in ${Dom} == 5 == via ${_REH_TEST}" fi _RED_TEST=$(echo "${_REI_TEST}" | grep "Required by.*devel" 2>&1) if [[ "${_RED_TEST}" =~ "Required by" ]]; then _REQ=NO "echo _REQ for $1 is ${_REQ} in ${Dom} == 6 == via ${_RED_TEST}" fi _REW_TEST=$(echo "${_REI_TEST}" | grep "Required by.*watchdog_live" 2>&1) if [[ "${_REW_TEST}" =~ "Required by" ]]; then _REQ=NO "echo _REQ for $1 is ${_REQ} in ${Dom} == 7 == via ${_REW_TEST}" fi fi Profile=$(run_drush8_nosilent_cmd "${vGet} ^install_profile$" \ | cut -d: -f2 \ | awk '{ print $1}' \ | sed "s/['\"]//g" \ | tr -d "\n" 2>&1) Profile=${Profile//[^a-z_]/} echo "Profile is == ${Profile} ==" if [ ! -z "${Profile}" ]; then _REP_TEST=$(echo "${_REI_TEST}" | grep "Required by.*:.*${Profile}" 2>&1) if [[ "${_REP_TEST}" =~ "Required by" ]]; then _REQ=NO echo "_REQ for $1 is ${_REQ} in ${Dom} == 8 == via ${_REP_TEST}" else echo "CTRL _REP_TEST _REQ for $1 is ${_REQ} in ${Dom} == 9 == via ${_REP_TEST}" fi fi _REA_TEST=$(echo "${_REI_TEST}" | grep "Required by.*apps" 2>&1) if [[ "${_REA_TEST}" =~ "Required by" ]]; then _REQ=YES echo "_REQ for $1 is ${_REQ} in ${Dom} == 10 == via ${_REA_TEST}" fi _REF_TEST=$(echo "${_REI_TEST}" | grep "Required by.*features" 2>&1) if [[ "${_REF_TEST}" =~ "Required by" ]]; then _REQ=YES echo "_REQ for $1 is ${_REQ} in ${Dom} == 11 == via ${_REF_TEST}" fi fi } check_if_skip() { for s in ${_MODULES_SKIP}; do if [ ! -z "$1" ] && [ "$s" = "$1" ]; then _SKIP=YES #echo $1 is whitelisted and will not be disabled in ${Dom} fi done } check_if_force() { for s in ${_MODULES_FORCE}; do if [ ! -z "$1" ] && [ "$s" = "$1" ]; then _FORCE=YES echo $1 is blacklisted and will be forcefully disabled in ${Dom} fi done } disable_modules_with_drush8() { for m in $1; do _SKIP=NO _FORCE=NO if [ ! -z "${_MODULES_SKIP}" ]; then check_if_skip "$m" fi if [ ! -z "${_MODULES_FORCE}" ]; then check_if_force "$m" fi if [ "${_SKIP}" = "NO" ]; then _MODULE_T=$(run_drush8_nosilent_cmd "pml --status=enabled \ --type=module | grep \($m\)" 2>&1) if [[ "${_MODULE_T}" =~ "($m)" ]]; then if [ "${_FORCE}" = "NO" ]; then check_if_required_with_drush8 "$m" else echo "$m dependencies not checked in ${Dom} action forced" _REQ=FCE fi if [ "${_REQ}" = "FCE" ]; then run_drush8_cmd "dis $m -y" echo "$m FCE disabled in ${Dom}" elif [ "${_REQ}" = "NO" ]; then run_drush8_cmd "dis $m -y" echo "$m disabled in ${Dom}" elif [ "${_REQ}" = "NULL" ]; then echo "$m is not used in ${Dom}" else echo "$m is required and can not be disabled in ${Dom}" fi fi fi done } enable_modules_with_drush8() { for m in $1; do _MODULE_T=$(run_drush8_nosilent_cmd "pml --status=enabled \ --type=module | grep \($m\)" 2>&1) if [[ "${_MODULE_T}" =~ "($m)" ]]; then _DO_NOTHING=YES else run_drush8_cmd "en $m -y" echo "$m enabled in ${Dom}" fi done } sync_user_register_protection_ini_vars() { if [ -e "${User}/static/control/enable_user_register_protection.info" ] \ && [ -e "/data/conf/default.boa_platform_control.ini" ] \ && [ ! -e "${_PLR_CTRL_F}" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${_PLR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_PLR_CTRL_F} &> /dev/null chmod 0664 ${_PLR_CTRL_F} &> /dev/null fi if [ -e "${_PLR_CTRL_F}" ]; then _EN_URP_T=$(grep "^enable_user_register_protection = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_EN_URP_T}" =~ "enable_user_register_protection = TRUE" ]]; then _ENABLE_USER_REGISTER_PROTECTION=YES else _ENABLE_USER_REGISTER_PROTECTION=NO fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then if [ "${_CLIENT_OPTION}" = "POWER" ] \ || [ "${_CLIENT_OPTION}" = "CLUSTER" ]; then _DIS_URP_T=$(grep "^disable_user_register_protection = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_DIS_URP_T}" =~ "disable_user_register_protection = TRUE" ]]; then _DISABLE_USER_REGISTER_PROTECTION=YES else _DISABLE_USER_REGISTER_PROTECTION=NO fi fi else _DIS_URP_T=$(grep "^disable_user_register_protection = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_DIS_URP_T}" =~ "disable_user_register_protection = TRUE" ]]; then _DISABLE_USER_REGISTER_PROTECTION=YES else _DISABLE_USER_REGISTER_PROTECTION=NO fi fi else _ENABLE_USER_REGISTER_PROTECTION=NO fi if [ "${_ENABLE_USER_REGISTER_PROTECTION}" = "NO" ] \ && [ -e "${User}/static/control/enable_user_register_protection.info" ]; then sed -i "s/.*enable_user_register_protection.*/enable_user_register_protection = TRUE/g" \ ${_PLR_CTRL_F} &> /dev/null wait _ENABLE_USER_REGISTER_PROTECTION=YES fi if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi if [ -e "${_DIR_CTRL_F}" ]; then _DIS_URP_T=$(grep "^disable_user_register_protection = TRUE" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_DIS_URP_T}" =~ "disable_user_register_protection = TRUE" ]]; then _DISABLE_USER_REGISTER_PROTECTION=YES else _DISABLE_USER_REGISTER_PROTECTION=NO fi else _DISABLE_USER_REGISTER_PROTECTION=NO fi } fix_site_readonlymode() { if [ -e "${User}/log/imported.pid" ] \ || [ -e "${User}/log/exported.pid" ]; then if [ -e "${Dir}/modules/readonlymode_fix.info" ]; then touch ${User}/log/ctrl/site.${Dom}.rom-fix.info rm -f ${Dir}/modules/readonlymode_fix.info fi if [ ! -e "${User}/log/ctrl/site.${Dom}.rom-fix.info" ]; then run_drush8_cmd "${vSet} site_readonly 0" touch ${User}/log/ctrl/site.${Dom}.rom-fix.info fi fi } fix_user_register_protection_with_vSet() { sync_user_register_protection_ini_vars if [ "${_DISABLE_USER_REGISTER_PROTECTION}" = "NO" ] \ && [ ! -e "${Plr}/core" ]; then Prm=$(run_drush8_nosilent_cmd "${vGet} ^user_register$" \ | cut -d: -f2 \ | awk '{ print $1}' \ | sed "s/['\"]//g" \ | tr -d "\n" 2>&1) Prm=${Prm//[^0-2]/} echo "Prm user_register for ${Dom} is ${Prm}" if [ "${_ENABLE_USER_REGISTER_PROTECTION}" = "YES" ]; then run_drush8_cmd "${vSet} user_register 0" echo "Prm user_register for ${Dom} set to 0" else if [ "${Prm}" = "1" ] || [ -z "${Prm}" ]; then run_drush8_cmd "${vSet} user_register 2" echo "Prm user_register for ${Dom} set to 2" fi run_drush8_cmd "${vSet} user_email_verification 1" echo "Prm user_email_verification for ${Dom} set to 1" fi fi fix_site_readonlymode } fix_robots_txt() { find ${Dir}/files/robots.txt -mtime +6 -exec rm -f {} \; &> /dev/null if [ ! -e "${Dir}/files/robots.txt" ] \ && [ ! -e "${Plr}/profiles/hostmaster" ]; then curl -L --max-redirs 10 -k -s --retry 2 --retry-delay 5 \ -A iCab "http://${Dom}/robots.txt?nocache=1&noredis=1" \ -o ${Dir}/files/robots.txt if [ -e "${Dir}/files/robots.txt" ]; then echo >> ${Dir}/files/robots.txt fi fi _VAR_IF_PRESENT= if [ -f "${Dir}/files/robots.txt" ]; then _VAR_IF_PRESENT=$(grep "Disallow:" ${Dir}/files/robots.txt 2>&1) fi if [[ ! "${_VAR_IF_PRESENT}" =~ "Disallow:" ]]; then rm -f ${Dir}/files/robots.txt else chown ${_HM_U}:www-data ${Dir}/files/robots.txt &> /dev/null chmod 0664 ${Dir}/files/robots.txt &> /dev/null if [ -f "${Plr}/robots.txt" ] || [ -L "${Plr}/robots.txt" ]; then rm -f ${Plr}/robots.txt fi fi } fix_boost_cache() { if [ -e "${Plr}/cache" ]; then rm -rf ${Plr}/cache/* rm -f ${Plr}/cache/{.boost,.htaccess} else if [ -e "${Plr}/sites/all/drush/drushrc.php" ]; then mkdir -p ${Plr}/cache fi fi if [ -e "${Plr}/cache" ]; then chown ${_HM_U}:www-data ${Plr}/cache &> /dev/null chmod 02775 ${Plr}/cache &> /dev/null fi } fix_o_contrib_symlink() { if [ "${_O_CONTRIB_SEVEN}" != "NO" ]; then symlinks -d ${Plr}/modules &> /dev/null if [ -e "${Plr}/web.config" ] \ && [ -e "${_O_CONTRIB_SEVEN}" ] \ && [ ! -e "${Plr}/core" ]; then if [ ! -e "${Plr}/modules/o_contrib_seven" ]; then ln -sf ${_O_CONTRIB_SEVEN} ${Plr}/modules/o_contrib_seven &> /dev/null fi elif [ -e "${Plr}/core" ] \ && [ ! -e "${Plr}/core/themes/olivero" ] \ && [ ! -e "${Plr}/core/themes/stable9" ] \ && [ -e "${_O_CONTRIB_EIGHT}" ]; then if [ -e "${Plr}/modules/o_contrib_nine" ] \ || [ -e "${Plr}/modules/.o_contrib_nine_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_nine rm -f ${Plr}/modules/.o_contrib_nine_dont_use fi if [ -e "${Plr}/modules/o_contrib_ten" ] \ || [ -e "${Plr}/modules/.o_contrib_ten_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_ten rm -f ${Plr}/modules/.o_contrib_ten_dont_use fi if [ ! -e "${Plr}/modules/o_contrib_eight" ]; then ln -sf ${_O_CONTRIB_EIGHT} ${Plr}/modules/o_contrib_eight &> /dev/null fi elif [ -e "${Plr}/core/themes/olivero" ] \ && [ -e "${Plr}/core/themes/classy" ] \ && [ -e "${_O_CONTRIB_NINE}" ]; then if [ -e "${Plr}/modules/o_contrib_eight" ] \ || [ -e "${Plr}/modules/.o_contrib_eight_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_eight rm -f ${Plr}/modules/.o_contrib_eight_dont_use fi if [ -e "${Plr}/modules/o_contrib_ten" ] \ || [ -e "${Plr}/modules/.o_contrib_ten_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_ten rm -f ${Plr}/modules/.o_contrib_ten_dont_use fi if [ ! -e "${Plr}/modules/o_contrib_nine" ]; then ln -sf ${_O_CONTRIB_NINE} ${Plr}/modules/o_contrib_nine &> /dev/null fi elif [ -e "${Plr}/core/themes/olivero" ] \ && [ ! -e "${Plr}/core/themes/classy" ] \ && [ -e "${_O_CONTRIB_TEN}" ]; then if [ -e "${Plr}/modules/o_contrib_eight" ] \ || [ -e "${Plr}/modules/.o_contrib_eight_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_eight rm -f ${Plr}/modules/.o_contrib_eight_dont_use fi if [ -e "${Plr}/modules/o_contrib_nine" ] \ || [ -e "${Plr}/modules/.o_contrib_nine_dont_use" ]; then rm -f ${Plr}/modules/o_contrib_nine rm -f ${Plr}/modules/.o_contrib_nine_dont_use fi if [ ! -e "${Plr}/modules/o_contrib_ten" ]; then ln -sf ${_O_CONTRIB_TEN} ${Plr}/modules/o_contrib_ten &> /dev/null fi else if [ -e "${Plr}/modules/watchdog" ]; then if [ -e "${Plr}/modules/o_contrib" ]; then rm -f ${Plr}/modules/o_contrib &> /dev/null fi else if [ ! -e "${Plr}/modules/o_contrib" ] \ && [ -e "${_O_CONTRIB}" ]; then ln -sf ${_O_CONTRIB} ${Plr}/modules/o_contrib &> /dev/null fi fi fi fi } sql_convert() { sudo -u ${_HM_U}.ftp -H /opt/local/bin/sqlmagic convert @${Dom} to-${_SQL_CONVERT} } send_shutdown_notice() { _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MY_EMAIL=${_MY_EMAIL//\\\@/\@} if [[ "${_MY_EMAIL}" =~ "omega8.cc" ]]; then _MY_EMAIL="support@omega8.cc" fi if [ ! -z "${_CLIENT_EMAIL}" ] \ && [[ ! "${_CLIENT_EMAIL}" =~ "${_MY_EMAIL}" ]]; then _ALRT_EMAIL="${_CLIENT_EMAIL}" else _ALRT_EMAIL="${_MY_EMAIL}" fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then _BCC_EMAIL="omega8cc@gmail.com" else _BCC_EMAIL="${_MY_EMAIL}" fi _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "ALERT! Shutdown of Hacked ${Dom} Site on ${_CHECK_HOST}" \ ${_ALRT_EMAIL} Hello, Because you have not fixed this site despite several alerts sent before, this site is scheduled for automated shutdown to prevent further damage for the site owner and visitors. Once the site is disabled, the only way to re-enable it again is to run the Verify task in your Aegir control panel. But if you will enable the site and not fix it immediately, it will be shut down automatically again. Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 -- This email has been sent by your Aegir automatic system monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "ALERT! Shutdown of Hacked ${Dom} Site on ${_CHECK_HOST}" \ ${_ALRT_EMAIL} Hello, Because you have not fixed this site despite several alerts sent before, this site is scheduled for automated shutdown to prevent further damage for the site owner and visitors. Once the site is disabled, the only way to re-enable it again is to run the Verify task in your Aegir control panel. But if you will enable the site and not fix it immediately, it will be shut down automatically again. Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 -- This email has been sent by your Aegir automatic system monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "ALERT! Shutdown of Hacked ${Dom} Site on ${_CHECK_HOST}" \ ${_ALRT_EMAIL} Hello, Because you have not fixed this site despite several alerts sent before, this site is scheduled for automated shutdown to prevent further damage for the site owner and visitors. Once the site is disabled, the only way to re-enable it again is to run the Verify task in your Aegir control panel. But if you will enable the site and not fix it immediately, it will be shut down automatically again. Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 -- This email has been sent by your Aegir automatic system monitor. EOF fi echo "ALERT: HACKED notice sent to ${_CLIENT_EMAIL} [${_HM_U}]: OK" } send_hacked_alert() { _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MY_EMAIL=${_MY_EMAIL//\\\@/\@} if [[ "${_MY_EMAIL}" =~ "omega8.cc" ]]; then _MY_EMAIL="support@omega8.cc" fi if [ ! -z "${_CLIENT_EMAIL}" ] \ && [[ ! "${_CLIENT_EMAIL}" =~ "${_MY_EMAIL}" ]]; then _ALRT_EMAIL="${_CLIENT_EMAIL}" else _ALRT_EMAIL="${_MY_EMAIL}" fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then _BCC_EMAIL="omega8cc@gmail.com" else _BCC_EMAIL="${_MY_EMAIL}" fi _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} has been HACKED!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that the site ${Dom} has been hacked! Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} has been HACKED!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that the site ${Dom} has been hacked! Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} has been HACKED!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that the site ${Dom} has been hacked! Common signatures of an attack which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} To learn more on what happened, how it was possible and how to survive #Drupageddon, please read: https://omega8.cc/drupageddon-psa-2014-003-342 We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF fi echo "ALERT: HACKED notice sent to ${_CLIENT_EMAIL} [${_HM_U}]: OK" } send_core_alert() { _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MY_EMAIL=${_MY_EMAIL//\\\@/\@} if [[ "${_MY_EMAIL}" =~ "omega8.cc" ]]; then _MY_EMAIL="support@omega8.cc" fi if [ ! -z "${_CLIENT_EMAIL}" ] \ && [[ ! "${_CLIENT_EMAIL}" =~ "${_MY_EMAIL}" ]]; then _ALRT_EMAIL="${_CLIENT_EMAIL}" else _ALRT_EMAIL="${_MY_EMAIL}" fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then _BCC_EMAIL="omega8cc@gmail.com" else _BCC_EMAIL="${_MY_EMAIL}" fi _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} runs on not secure Drupal core!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that this site runs on not secure Drupal core: ${Dom} The Drupageddon check result which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} Does it mean that your site is vulnerable to Drupageddon attack, recently made famous again by Panama Papers leak? https://www.drupal.org/node/2718467 It depends on the Drupal core version you are using, and if it has been patched already to close the known attack vectors. You can find more details on our website at: https://omega8.cc/drupageddon-psa-2014-003-342 Even if the Drupal core version used in this site is not vulnerable to Drupageddon attack, it is still vulnerable to other attacks, because you have missed Drupal core security release(s). We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} runs on not secure Drupal core!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that this site runs on not secure Drupal core: ${Dom} The Drupageddon check result which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} Does it mean that your site is vulnerable to Drupageddon attack, recently made famous again by Panama Papers leak? https://www.drupal.org/node/2718467 It depends on the Drupal core version you are using, and if it has been patched already to close the known attack vectors. You can find more details on our website at: https://omega8.cc/drupageddon-psa-2014-003-342 Even if the Drupal core version used in this site is not vulnerable to Drupageddon attack, it is still vulnerable to other attacks, because you have missed Drupal core security release(s). We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "URGENT: The ${Dom} site on ${_CHECK_HOST} runs on not secure Drupal core!" \ ${_ALRT_EMAIL} Hello, Our monitoring detected that this site runs on not secure Drupal core: ${Dom} The Drupageddon check result which triggered this alert: ${_DETECTED} The platform root directory for this site is: ${Plr} The system hostname is: ${_CHECK_HOST} Does it mean that your site is vulnerable to Drupageddon attack, recently made famous again by Panama Papers leak? https://www.drupal.org/node/2718467 It depends on the Drupal core version you are using, and if it has been patched already to close the known attack vectors. You can find more details on our website at: https://omega8.cc/drupageddon-psa-2014-003-342 Even if the Drupal core version used in this site is not vulnerable to Drupageddon attack, it is still vulnerable to other attacks, because you have missed Drupal core security release(s). We have restarted these daily checks on May 7, 2016 to make sure that no one stays on some too old Drupal version with many known security vulnerabilities. You will receive Drupageddon alert for every site with outdated and not secure codebase, even if it was not affected by Drupageddon bug directly. Please be a good web citizen and upgrade to latest Drupal core provided by BOA-4.1.3. As a bonus, you will be able to speed up your sites considerably by switching PHP-FPM to 7.0 We recommend to follow this upgrade how-to: https://omega8.cc/your-drupal-site-upgrade-safe-workflow-298 The how-to for PHP-FPM version switch can be found at: https://omega8.cc/how-to-quickly-switch-php-to-newer-version-330 -- This email has been sent by your Aegir automatic system monitor. EOF fi echo "ALERT: Core notice sent to ${_CLIENT_EMAIL} [${_HM_U}]: OK" } check_site_status_with_drush8() { _SITE_TEST=$(run_drush8_nosilent_cmd "status" 2>&1) if [[ "${_SITE_TEST}" =~ "Error:" ]] \ || [[ "${_SITE_TEST}" =~ "Drush was attempting to connect" ]]; then _SITE_TEST_RESULT=ERROR else _SITE_TEST_RESULT=OK fi if [ "${_SITE_TEST_RESULT}" = "OK" ]; then _STATUS_BOOTSTRAP=$(run_drush8_nosilent_cmd "status bootstrap \ | grep 'Drupal bootstrap.*:.*'" 2>&1) _STATUS_STATUS=$(run_drush8_nosilent_cmd "status status \ | grep 'Database.*:.*'" 2>&1) if [[ "${_STATUS_BOOTSTRAP}" =~ "Drupal bootstrap" ]] \ && [[ "${_STATUS_STATUS}" =~ "Database" ]]; then _STATUS=OK _RUN_DGN=NO if [ -e "${User}/static/control/drupalgeddon.info" ]; then _RUN_DGN=YES else if [ -e "/root/.force.drupalgeddon.cnf" ]; then _RUN_DGN=YES fi fi if [ -e "${Plr}/modules/o_contrib_seven" ] \ && [ "${_RUN_DGN}" = "YES" ]; then if [ -L "/home/${_HM_U}.ftp/.drush/usr/drupalgeddon" ]; then run_drush8_cmd "en update -y" _DGDD_T=$(run_drush8_nosilent_cmd "drupalgeddon-test" 2>&1) if [[ "${_DGDD_T}" =~ "No evidence of known Drupalgeddon" ]]; then _DO_NOTHING=YES elif [[ "${_DGDD_T}" =~ "The drush command" ]] \ && [[ "${_DGDD_T}" =~ "could not be found" ]]; then _DO_NOTHING=YES elif [[ "${_DGDD_T}" =~ "has a uid that is" ]] \ && [[ ! "${_DGDD_T}" =~ "has security vulnerabilities" ]] \ && [[ "${_DGDD_T}" =~ "higher than" ]]; then _DO_NOTHING=YES elif [[ "${_DGDD_T}" =~ "has a created timestamp before" ]] \ && [[ ! "${_DGDD_T}" =~ "has security vulnerabilities" ]]; then _DO_NOTHING=YES elif [ -z "${_DGDD_T}" ]; then _DO_NOTHING=YES elif [[ "${_DGDD_T}" =~ "Drush command terminated" ]]; then echo "ALERT: THIS SITE IS PROBABLY BROKEN! ${Dir}" echo "${_DGDD_T}" else echo "ALERT: THIS SITE HAS BEEN HACKED! ${Dir}" _DETECTED="${_DGDD_T}" if [ ! -z "${_MY_EMAIL}" ]; then if [[ "${_DGDD_T}" =~ "Role \"megauser\" discovered" ]] \ || [[ "${_DGDD_T}" =~ "User \"drupaldev\" discovered" ]] \ || [[ "${_DGDD_T}" =~ "User \"owned\" discovered" ]] \ || [[ "${_DGDD_T}" =~ "User \"system\" discovered" ]] \ || [[ "${_DGDD_T}" =~ "User \"configure\" discovered" ]] \ || [[ "${_DGDD_T}" =~ "User \"drplsys\" discovered" ]]; then if [ -e "${User}/config/server_master/nginx/vhost.d/${Dom}" ]; then ### mv -f ${User}/config/server_master/nginx/vhost.d/${Dom} ${User}/config/server_master/nginx/vhost.d/.${Dom} send_shutdown_notice fi else if [[ "${_DGDD_T}" =~ "has security vulnerabilities" ]]; then send_core_alert else send_hacked_alert fi fi fi fi else _DGMR_TEST=$(run_drush8_nosilent_cmd \ "sqlq \"SELECT * FROM menu_router WHERE access_callback \ = 'file_put_contents'\" | grep 'file_put_contents'" 2>&1) if [[ "${_DGMR_TEST}" =~ "file_put_contents" ]]; then echo "ALERT: THIS SITE HAS BEEN HACKED! ${Dir}" _DETECTED="file_put_contents as access_callback detected \ in menu_router table" if [ ! -z "${_MY_EMAIL}" ]; then send_hacked_alert fi fi _DGMR_TEST=$(run_drush8_nosilent_cmd \ "sqlq \"SELECT * FROM menu_router WHERE access_callback \ = 'assert'\" | grep 'assert'" 2>&1) if [[ "${_DGMR_TEST}" =~ "assert" ]]; then echo "ALERT: THIS SITE HAS BEEN HACKED! ${Dir}" _DETECTED="assert as access_callback detected in menu_router table" if [ ! -z "${_MY_EMAIL}" ]; then send_hacked_alert fi fi fi fi else _STATUS=BROKEN echo "WARNING: THIS SITE IS BROKEN! ${Dir}" fi else _STATUS=UNKNOWN echo "WARNING: THIS SITE IS PROBABLY BROKEN? ${Dir}" fi } check_file_with_wildcard_path() { _WILDCARD_TEST=$(ls $1 2>&1) if [ -z "${_WILDCARD_TEST}" ]; then _FILE_EXISTS=NO else _FILE_EXISTS=YES fi } fix_modules() { _AUTO_CONFIG_ADVAGG=NO if [ -e "${Plr}/modules/o_contrib/advagg" ] \ || [ -e "${Plr}/modules/o_contrib_seven/advagg" ]; then _MODULE_T=$(run_drush8_nosilent_cmd "pml --status=enabled \ --type=module | grep \(advagg\)" 2>&1) if [[ "${_MODULE_T}" =~ "(advagg)" ]]; then _AUTO_CONFIG_ADVAGG=YES fi fi if [ "${_AUTO_CONFIG_ADVAGG}" = "YES" ]; then if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini \ ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi if [ -e "${_DIR_CTRL_F}" ]; then _AGG_P=$(grep "advagg_auto_configuration" ${_DIR_CTRL_F} 2>&1) _AGG_T=$(grep "^advagg_auto_configuration = TRUE" ${_DIR_CTRL_F} 2>&1) if [[ "${_AGG_T}" =~ "advagg_auto_configuration = TRUE" ]]; then _DO_NOTHING=YES else ### ### Do this only for the site level ini file. ### if [[ "${_AGG_P}" =~ "advagg_auto_configuration" ]]; then sed -i "s/.*advagg_auto_c.*/advagg_auto_configuration = TRUE/g" \ ${_DIR_CTRL_F} &> /dev/null wait else echo "advagg_auto_configuration = TRUE" >> ${_DIR_CTRL_F} fi fi fi else if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini \ ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi if [ -e "${_DIR_CTRL_F}" ]; then _AGG_P=$(grep "advagg_auto_configuration" ${_DIR_CTRL_F} 2>&1) _AGG_T=$(grep "^advagg_auto_configuration = FALSE" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_AGG_T}" =~ "advagg_auto_configuration = FALSE" ]]; then _DO_NOTHING=YES else if [[ "${_AGG_P}" =~ "advagg_auto_configuration" ]]; then sed -i "s/.*advagg_auto_c.*/advagg_auto_configuration = FALSE/g" \ ${_DIR_CTRL_F} &> /dev/null wait else echo ";advagg_auto_configuration = FALSE" >> ${_DIR_CTRL_F} fi fi fi fi if [ -e "${Plr}/modules/o_contrib_seven" ] \ && [ ! -e "${Plr}/core" ]; then _PRIV_TEST=$(run_drush8_nosilent_cmd "${vGet} ^file_default_scheme$" 2>&1) if [[ "${_PRIV_TEST}" =~ "No matching variable" ]]; then _PRIV_TEST_RESULT=NONE else _PRIV_TEST_RESULT=OK fi _AUTO_CNF_PF_DL=NO if [ "${_PRIV_TEST_RESULT}" = "OK" ]; then Pri=$(run_drush8_nosilent_cmd "${vGet} ^file_default_scheme$" \ | cut -d: -f2 \ | awk '{ print $1}' \ | sed "s/['\"]//g" \ | tr -d "\n" 2>&1) Pri=${Pri//[^a-z]/} if [ "$Pri" = "private" ] || [ "$Pri" = "public" ]; then echo Pri file_default_scheme for ${Dom} is $Pri fi if [ "$Pri" = "private" ]; then _AUTO_CNF_PF_DL=YES fi fi if [ "${_AUTO_CNF_PF_DL}" = "YES" ]; then if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini \ ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi if [ -e "${_DIR_CTRL_F}" ]; then _AC_PFD_T=$(grep "^allow_private_file_downloads = TRUE" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_AC_PFD_T}" =~ "allow_private_file_downloads = TRUE" ]]; then _DO_NOTHING=YES else ### ### Do this only for the site level ini file. ### sed -i "s/.*allow_private_f.*/allow_private_file_downloads = TRUE/g" \ ${_DIR_CTRL_F} &> /dev/null wait fi fi else if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini \ ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi if [ -e "${_DIR_CTRL_F}" ]; then _AC_PFD_T=$(grep "^allow_private_file_downloads = FALSE" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_AC_PFD_T}" =~ "allow_private_file_downloads = FALSE" ]]; then _DO_NOTHING=YES else sed -i "s/.*allow_private_f.*/allow_private_file_downloads = FALSE/g" \ ${_DIR_CTRL_F} &> /dev/null wait fi fi fi fi _AUTO_DT_FB_INT=NO if [ -e "${Plr}/sites/all/modules/fb/fb_settings.inc" ] \ || [ -e "${Plr}/sites/all/modules/contrib/fb/fb_settings.inc" ]; then _AUTO_DT_FB_INT=YES else check_file_with_wildcard_path "${Plr}/profiles/*/modules/fb/fb_settings.inc" if [ "${_FILE_EXISTS}" = "YES" ]; then _AUTO_DT_FB_INT=YES else check_file_with_wildcard_path "${Plr}/profiles/*/modules/contrib/fb/fb_settings.inc" if [ "${_FILE_EXISTS}" = "YES" ]; then _AUTO_DT_FB_INT=YES fi fi fi if [ "${_AUTO_DT_FB_INT}" = "YES" ]; then if [ -e "/data/conf/default.boa_platform_control.ini" ] \ && [ ! -e "${_PLR_CTRL_F}" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${_PLR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_PLR_CTRL_F} &> /dev/null chmod 0664 ${_PLR_CTRL_F} &> /dev/null fi if [ -e "${_PLR_CTRL_F}" ]; then _AD_FB_T=$(grep "^auto_detect_facebook_integration = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_AD_FB_T}" =~ "auto_detect_facebook_integration = TRUE" ]]; then _DO_NOTHING=YES else ### ### Do this only for the platform level ini file, so the site ### level ini file can disable this check by setting it ### explicitly to auto_detect_facebook_integration = FALSE ### sed -i "s/.*auto_detect_face.*/auto_detect_facebook_integration = TRUE/g" \ ${_PLR_CTRL_F} &> /dev/null wait fi fi else if [ -e "/data/conf/default.boa_platform_control.ini" ] \ && [ ! -e "${_PLR_CTRL_F}" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${_PLR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_PLR_CTRL_F} &> /dev/null chmod 0664 ${_PLR_CTRL_F} &> /dev/null fi if [ -e "${_PLR_CTRL_F}" ]; then _AD_FB_T=$(grep "^auto_detect_facebook_integration = FALSE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_AD_FB_T}" =~ "auto_detect_facebook_integration = FALSE" ]]; then _DO_NOTHING=YES else sed -i "s/.*auto_detect_face.*/auto_detect_facebook_integration = FALSE/g" \ ${_PLR_CTRL_F} &> /dev/null wait fi fi fi _AUTO_DETECT_DOMAIN_ACCESS_INTEGRATION=NO if [ -e "${Plr}/sites/all/modules/domain/settings.inc" ] \ || [ -e "${Plr}/sites/all/modules/contrib/domain/settings.inc" ]; then _AUTO_DETECT_DOMAIN_ACCESS_INTEGRATION=YES else check_file_with_wildcard_path "${Plr}/profiles/*/modules/domain/settings.inc" if [ "${_FILE_EXISTS}" = "YES" ]; then _AUTO_DETECT_DOMAIN_ACCESS_INTEGRATION=YES else check_file_with_wildcard_path "${Plr}/profiles/*/modules/contrib/domain/settings.inc" if [ "${_FILE_EXISTS}" = "YES" ]; then _AUTO_DETECT_DOMAIN_ACCESS_INTEGRATION=YES fi fi fi if [ "${_AUTO_DETECT_DOMAIN_ACCESS_INTEGRATION}" = "YES" ]; then if [ -e "/data/conf/default.boa_platform_control.ini" ] \ && [ ! -e "${_PLR_CTRL_F}" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${_PLR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_PLR_CTRL_F} &> /dev/null chmod 0664 ${_PLR_CTRL_F} &> /dev/null fi if [ -e "${_PLR_CTRL_F}" ]; then _AD_DA_T=$(grep "^auto_detect_domain_access_integration = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_AD_DA_T}" =~ "auto_detect_domain_access_integration = TRUE" ]]; then _DO_NOTHING=YES else ### ### Do this only for the platform level ini file, so the site ### level ini file can disable this check by setting it ### explicitly to auto_detect_domain_access_integration = FALSE ### sed -i "s/.*auto_detect_domain.*/auto_detect_domain_access_integration = TRUE/g" \ ${_PLR_CTRL_F} &> /dev/null wait fi fi else if [ -e "/data/conf/default.boa_platform_control.ini" ] \ && [ ! -e "${_PLR_CTRL_F}" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${_PLR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_PLR_CTRL_F} &> /dev/null chmod 0664 ${_PLR_CTRL_F} &> /dev/null fi if [ -e "${_PLR_CTRL_F}" ]; then _AD_DA_T=$(grep "^auto_detect_domain_access_integration = FALSE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_AD_DA_T}" =~ "auto_detect_domain_access_integration = FALSE" ]]; then _DO_NOTHING=YES else sed -i "s/.*auto_detect_domain.*/auto_detect_domain_access_integration = FALSE/g" \ ${_PLR_CTRL_F} &> /dev/null wait fi fi fi ### ### Add new INI variables if missing ### if [ -e "${_PLR_CTRL_F}" ]; then _VAR_IF_PRESENT=$(grep "session_cookie_ttl" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "session_cookie_ttl" ]]; then _DO_NOTHING=YES else echo ";session_cookie_ttl = 86400" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "session_gc_eol" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "session_gc_eol" ]]; then _DO_NOTHING=YES else echo ";session_gc_eol = 86400" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "enable_newrelic_integration" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "enable_newrelic_integration" ]]; then _DO_NOTHING=YES else echo ";enable_newrelic_integration = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_old_nine_mode" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_old_nine_mode" ]]; then _DO_NOTHING=YES else echo ";redis_old_nine_mode = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_old_eight_mode" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_old_eight_mode" ]]; then _DO_NOTHING=YES else echo ";redis_old_eight_mode = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_use_modern" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_use_modern" ]]; then _DO_NOTHING=YES else echo ";redis_use_modern = TRUE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_flush_forced_mode" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_flush_forced_mode" ]]; then _DO_NOTHING=YES else echo ";redis_flush_forced_mode = TRUE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_lock_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_lock_enable" ]]; then _DO_NOTHING=YES else echo ";redis_lock_enable = TRUE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_path_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_path_enable" ]]; then _DO_NOTHING=YES else echo ";redis_path_enable = TRUE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_scan_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_scan_enable" ]]; then _DO_NOTHING=YES else echo ";redis_scan_enable = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_exclude_bins" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_exclude_bins" ]]; then _DO_NOTHING=YES else echo ";redis_exclude_bins = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "speed_booster_anon_cache_ttl" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "speed_booster_anon_cache_ttl" ]]; then _DO_NOTHING=YES else echo ";speed_booster_anon_cache_ttl = 10" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "disable_drupal_page_cache" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "disable_drupal_page_cache" ]]; then _DO_NOTHING=YES else echo ";disable_drupal_page_cache = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "allow_private_file_downloads" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "allow_private_file_downloads" ]]; then _DO_NOTHING=YES else echo ";allow_private_file_downloads = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "entitycache_dont_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "entitycache_dont_enable" ]]; then _DO_NOTHING=YES else echo ";entitycache_dont_enable = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "views_cache_bully_dont_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "views_cache_bully_dont_enable" ]]; then _DO_NOTHING=YES else echo ";views_cache_bully_dont_enable = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "views_content_cache_dont_enable" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "views_content_cache_dont_enable" ]]; then _DO_NOTHING=YES else echo ";views_content_cache_dont_enable = FALSE" >> ${_PLR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "set_composer_manager_vendor_dir" ${_PLR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "set_composer_manager_vendor_dir" ]]; then _DO_NOTHING=YES else echo ";set_composer_manager_vendor_dir = FALSE" >> ${_PLR_CTRL_F} fi fi if [ -e "${_DIR_CTRL_F}" ]; then _VAR_IF_PRESENT=$(grep "session_cookie_ttl" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "session_cookie_ttl" ]]; then _DO_NOTHING=YES else echo ";session_cookie_ttl = 86400" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "session_gc_eol" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "session_gc_eol" ]]; then _DO_NOTHING=YES else echo ";session_gc_eol = 86400" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "enable_newrelic_integration" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "enable_newrelic_integration" ]]; then _DO_NOTHING=YES else echo ";enable_newrelic_integration = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_old_nine_mode" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_old_nine_mode" ]]; then _DO_NOTHING=YES else echo ";redis_old_nine_mode = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_old_eight_mode" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_old_eight_mode" ]]; then _DO_NOTHING=YES else echo ";redis_old_eight_mode = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_use_modern" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_use_modern" ]]; then _DO_NOTHING=YES else echo ";redis_use_modern = TRUE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_flush_forced_mode" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_flush_forced_mode" ]]; then _DO_NOTHING=YES else echo ";redis_flush_forced_mode = TRUE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_lock_enable" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_lock_enable" ]]; then _DO_NOTHING=YES else echo ";redis_lock_enable = TRUE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_path_enable" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_path_enable" ]]; then _DO_NOTHING=YES else echo ";redis_path_enable = TRUE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_scan_enable" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_scan_enable" ]]; then _DO_NOTHING=YES else echo ";redis_scan_enable = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "redis_exclude_bins" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "redis_exclude_bins" ]]; then _DO_NOTHING=YES else echo ";redis_exclude_bins = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "speed_booster_anon_cache_ttl" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "speed_booster_anon_cache_ttl" ]]; then _DO_NOTHING=YES else echo ";speed_booster_anon_cache_ttl = 10" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "disable_drupal_page_cache" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "disable_drupal_page_cache" ]]; then _DO_NOTHING=YES else echo ";disable_drupal_page_cache = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "allow_private_file_downloads" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "allow_private_file_downloads" ]]; then _DO_NOTHING=YES else echo ";allow_private_file_downloads = FALSE" >> ${_DIR_CTRL_F} fi _VAR_IF_PRESENT=$(grep "set_composer_manager_vendor_dir" ${_DIR_CTRL_F} 2>&1) if [[ "${_VAR_IF_PRESENT}" =~ "set_composer_manager_vendor_dir" ]]; then _DO_NOTHING=YES else echo ";set_composer_manager_vendor_dir = FALSE" >> ${_DIR_CTRL_F} fi fi if [ -e "${_PLR_CTRL_F}" ]; then _EC_DE_T=$(grep "^entitycache_dont_enable = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_EC_DE_T}" =~ "entitycache_dont_enable = TRUE" ]] \ || [ -e "${Plr}/profiles/commons" ]; then _ENTITYCACHE_DONT_ENABLE=YES else _ENTITYCACHE_DONT_ENABLE=NO fi else _ENTITYCACHE_DONT_ENABLE=NO fi if [ -e "${_PLR_CTRL_F}" ]; then _VCB_DE_T=$(grep "^views_cache_bully_dont_enable = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_VCB_DE_T}" =~ "views_cache_bully_dont_enable = TRUE" ]]; then _VIEWS_CACHE_BULLY_DONT_ENABLE=YES else _VIEWS_CACHE_BULLY_DONT_ENABLE=NO fi else _VIEWS_CACHE_BULLY_DONT_ENABLE=NO fi if [ -e "${_PLR_CTRL_F}" ]; then _VCC_DE_T=$(grep "^views_content_cache_dont_enable = TRUE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_VCC_DE_T}" =~ "views_content_cache_dont_enable = TRUE" ]]; then _VIEWS_CONTENT_CACHE_DONT_ENABLE=YES else _VIEWS_CONTENT_CACHE_DONT_ENABLE=NO fi else _VIEWS_CONTENT_CACHE_DONT_ENABLE=NO fi if [ -e "${Plr}/modules/o_contrib" ]; then if [ ! -e "${Plr}/modules/user" ] \ || [ ! -e "${Plr}/sites/all/modules" ] \ || [ ! -e "${Plr}/profiles" ]; then echo "WARNING: THIS PLATFORM IS BROKEN! ${Plr}" elif [ ! -e "${Plr}/modules/path_alias_cache" ]; then echo "WARNING: THIS PLATFORM IS NOT A VALID PRESSFLOW PLATFORM! ${Plr}" elif [ -e "${Plr}/modules/path_alias_cache" ] \ && [ -e "${Plr}/modules/user" ]; then _MODX=ON if [ ! -z "${_MODULES_OFF_SIX}" ]; then disable_modules_with_drush8 "${_MODULES_OFF_SIX}" fi if [ ! -z "${_MODULES_ON_SIX}" ]; then enable_modules_with_drush8 "${_MODULES_ON_SIX}" fi run_drush8_cmd "sqlq \"UPDATE system SET weight = '-1' \ WHERE type = 'module' AND name = 'path_alias_cache'\"" fi elif [ -e "${Plr}/modules/o_contrib_seven" ]; then if [ ! -e "${Plr}/modules/user" ] \ || [ ! -e "${Plr}/sites/all/modules" ] \ || [ ! -e "${Plr}/profiles" ]; then echo "WARNING: THIS PLATFORM IS BROKEN! ${Plr}" else _MODX=ON if [ ! -z "${_MODULES_OFF_SEVEN}" ]; then disable_modules_with_drush8 "${_MODULES_OFF_SEVEN}" fi if [ "${_ENTITYCACHE_DONT_ENABLE}" = "NO" ]; then enable_modules_with_drush8 "entitycache" fi if [ ! -z "${_MODULES_ON_SEVEN}" ]; then enable_modules_with_drush8 "${_MODULES_ON_SEVEN}" fi fi fi } if_site_db_conversion() { ### ### Detect db conversion mode, if set per platform or per site. ### if [ -e "${_PLR_CTRL_F}" ]; then _SQL_INDB_P=$(grep "sql_conversion_mode" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_SQL_INDB_P}" =~ "sql_conversion_mode" ]]; then _DO_NOTHING=YES else echo ";sql_conversion_mode = NO" >> ${_PLR_CTRL_F} fi _SQL_INDB_T=$(grep "^sql_conversion_mode = innodb" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_SQL_INDB_T}" =~ "sql_conversion_mode = innodb" ]]; then _SQL_CONVERT=innodb fi _SQL_MYSM_T=$(grep "^sql_conversion_mode = myisam" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_SQL_MYSM_T}" =~ "sql_conversion_mode = myisam" ]]; then _SQL_CONVERT=myisam fi fi if [ -e "${_DIR_CTRL_F}" ]; then _SQL_INDB_P=$(grep "sql_conversion_mode" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_SQL_INDB_P}" =~ "sql_conversion_mode" ]]; then _DO_NOTHING=YES else echo ";sql_conversion_mode = NO" >> ${_DIR_CTRL_F} fi _SQL_INDB_T=$(grep "^sql_conversion_mode = innodb" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_SQL_INDB_T}" =~ "sql_conversion_mode = innodb" ]]; then _SQL_CONVERT=innodb fi _SQL_MYSM_T=$(grep "^sql_conversion_mode = myisam" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_SQL_MYSM_T}" =~ "sql_conversion_mode = myisam" ]]; then _SQL_CONVERT=myisam fi fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then _DENY_SQL_CONVERT=YES _SQL_CONVERT= fi if [ -z "${_DENY_SQL_CONVERT}" ] \ && [ ! -z "${_SQL_CONVERT}" ] \ && [ "${_DOW}" = "2" ]; then if [ "${_SQL_CONVERT}" = "YES" ]; then _SQL_CONVERT=innodb elif [ "${_SQL_CONVERT}" = "NO" ]; then _SQL_CONVERT= fi if [ "${_SQL_CONVERT}" = "myisam" ] \ || [ "${_SQL_CONVERT}" = "innodb" ]; then _TIMP=$(date +%y%m%d-%H%M%S 2>&1) echo "${_TIMP} sql conversion to-${_SQL_CONVERT} \ for ${Dom} started" sql_convert _TIMP=$(date +%y%m%d-%H%M%S 2>&1) echo "${_TIMP} sql conversion to-${_SQL_CONVERT} \ for ${Dom} completed" fi fi } cleanup_ghost_platforms() { if [ -e "${Plr}" ]; then if [ ! -e "${Plr}/index.php" ] || [ ! -e "${Plr}/profiles" ]; then if [ ! -e "${Plr}/vendor" ]; then mkdir -p ${User}/undo ### mv -f ${Plr} ${User}/undo/ &> /dev/null echo "GHOST platform ${Plr} detected and moved to ${User}/undo/" fi fi fi } fix_seven_core_patch() { if [ ! -f "${Plr}/profiles/SA-CORE-2014-005-D7-fix.info" ]; then _PATCH_TEST=$(grep "foreach (array_values(\$data)" \ ${Plr}/includes/database/database.inc 2>&1) if [[ "${_PATCH_TEST}" =~ "array_values" ]]; then echo fixed > ${Plr}/profiles/SA-CORE-2014-005-D7-fix.info else cd ${Plr} patch -p1 < /var/xdrago/conf/SA-CORE-2014-005-D7.patch chown ${_HM_U}:users ${Plr}/includes/database/*.inc &> /dev/null chmod 0664 ${Plr}/includes/database/*.inc &> /dev/null echo fixed > ${Plr}/profiles/SA-CORE-2014-005-D7-fix.info fi chown ${_HM_U}:users ${Plr}/profiles/*-fix.info &> /dev/null chmod 0664 ${Plr}/profiles/*-fix.info &> /dev/null fi } fix_static_permissions() { cleanup_ghost_platforms if [ -e "${Plr}/profiles" ]; then if [ -e "${Plr}/web.config" ] && [ ! -e "${Plr}/core" ]; then fix_seven_core_patch fi if [ ! -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then if [ ! -e "${User}/log/ctrl/plr.${PlrID}.ctm-lock-${_NOW}.info" ]; then chown -R ${_HM_U} ${Plr} &> /dev/null touch ${User}/log/ctrl/plr.${PlrID}.ctm-lock-${_NOW}.info fi elif [ -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then if [ ! -e "${User}/log/ctrl/plr.${PlrID}.ctm-unlock-${_NOW}.info" ]; then chown -R ${_HM_U}.ftp ${Plr} &> /dev/null touch ${User}/log/ctrl/plr.${PlrID}.ctm-unlock-${_NOW}.info fi fi if [ ! -f "${User}/log/ctrl/plr.${PlrID}.perm-fix-${_NOW}.info" ]; then find ${Plr} -type d -exec chmod 0775 {} \; &> /dev/null find ${Plr} -type f -exec chmod 0664 {} \; &> /dev/null fi fi } fix_expected_symlinks() { if [ ! -e "${Plr}/js.php" ] && [ -e "${Plr}" ]; then if [ -e "${Plr}/modules/o_contrib_seven" ] \ && [ -e "${_O_CONTRIB_SEVEN}/js/js.php" ]; then ln -s ${_O_CONTRIB_SEVEN}/js/js.php ${Plr}/js.php &> /dev/null elif [ -e "${Plr}/modules/o_contrib" ] \ && [ -e "${_O_CONTRIB}/js/js.php" ]; then ln -s ${_O_CONTRIB}/js/js.php ${Plr}/js.php &> /dev/null fi fi } fix_permissions() { ### modules,themes,libraries - profile level in ~/static searchStringT="/static/" case ${Plr} in *"$searchStringT"*) fix_static_permissions ;; esac ### modules,themes,libraries - platform level if [ -f "${Plr}/profiles/core-permissions-update-fix.info" ]; then rm -f ${Plr}/profiles/*permissions*.info rm -f ${Plr}/sites/all/permissions-fix* fi if [ ! -f "${User}/log/ctrl/plr.${PlrID}.perm-fix-${_NOW}.info" ] \ && [ -e "${Plr}" ]; then mkdir -p ${Plr}/sites/all/{modules,themes,libraries,drush} find ${Plr}/sites/all/{modules,themes,libraries,drush}/*{.tar,.tar.gz,.zip} \ -type f -exec rm -f {} \; &> /dev/null if [ ! -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then if [ ! -e "${User}/log/ctrl/plr.${PlrID}.lock-${_NOW}.info" ]; then chown -R ${_HM_U}:users \ ${Plr}/sites/all/{modules,themes,libraries}/* &> /dev/null touch ${User}/log/ctrl/plr.${PlrID}.lock-${_NOW}.info fi elif [ -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then if [ ! -e "${User}/log/ctrl/plr.${PlrID}.unlock-${_NOW}.info" ]; then chown -R ${_HM_U}.ftp:users \ ${Plr}/sites/all/{modules,themes,libraries}/* &> /dev/null touch ${User}/log/ctrl/plr.${PlrID}.unlock-${_NOW}.info fi fi chown ${_HM_U}:users \ ${Plr}/sites/all/drush/drushrc.php \ ${Plr}/sites \ ${Plr}/sites/* \ ${Plr}/sites/sites.php \ ${Plr}/sites/all \ ${Plr}/sites/all/{modules,themes,libraries,drush} &> /dev/null chmod 0751 ${Plr}/sites &> /dev/null chmod 0755 ${Plr}/sites/* &> /dev/null chmod 0644 ${Plr}/sites/*.php &> /dev/null chmod 0664 ${Plr}/autoload.php &> /dev/null chmod 0644 ${Plr}/sites/*.txt &> /dev/null chmod 0644 ${Plr}/sites/*.yml &> /dev/null chmod 0755 ${Plr}/sites/all/drush &> /dev/null find ${Plr}/sites/all/{modules,themes,libraries} -type d -exec \ chmod 02775 {} \; &> /dev/null find ${Plr}/sites/all/{modules,themes,libraries} -type f -exec \ chmod 0664 {} \; &> /dev/null ### expected symlinks fix_expected_symlinks ### known exceptions chmod -R 775 ${Plr}/sites/all/libraries/tcpdf/cache &> /dev/null chown -R ${_HM_U}:www-data \ ${Plr}/sites/all/libraries/tcpdf/cache &> /dev/null touch ${User}/log/ctrl/plr.${PlrID}.perm-fix-${_NOW}.info fi if [ -e "${Dir}" ] \ && [ -e "${Dir}/drushrc.php" ] \ && [ -e "${Dir}/files" ] \ && [ -e "${Dir}/private" ]; then ### directory and settings files - site level if [ ! -e "${Dir}/modules" ]; then mkdir ${Dir}/modules fi if [ -e "${Dir}/aegir.services.yml" ]; then rm -f ${Dir}/aegir.services.yml fi chown ${_HM_U}:users ${Dir} &> /dev/null chown ${_HM_U}:www-data \ ${Dir}/{local.settings.php,settings.php,civicrm.settings.php,solr.php} &> /dev/null find ${Dir}/*.php -type f -exec chmod 0440 {} \; &> /dev/null chmod 0640 ${Dir}/civicrm.settings.php &> /dev/null ### modules,themes,libraries - site level find ${Dir}/{modules,themes,libraries}/*{.tar,.tar.gz,.zip} -type f -exec \ rm -f {} \; &> /dev/null rm -f ${Dir}/modules/local-allow.info if [ ! -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then chown -R ${_HM_U}:users \ ${Dir}/{modules,themes,libraries}/* &> /dev/null elif [ -e "${User}/static/control/unlock.info" ] \ && [ ! -e "${Plr}/skip.info" ]; then chown -R ${_HM_U}.ftp:users \ ${Dir}/{modules,themes,libraries}/* &> /dev/null fi chown ${_HM_U}:users \ ${Dir}/drushrc.php \ ${Dir}/{modules,themes,libraries} &> /dev/null find ${Dir}/{modules,themes,libraries} -type d -exec \ chmod 02775 {} \; &> /dev/null find ${Dir}/{modules,themes,libraries} -type f -exec \ chmod 0664 {} \; &> /dev/null ### files - site level chown -L -R ${_HM_U}:www-data ${Dir}/files &> /dev/null find ${Dir}/files/ -type d -exec chmod 02775 {} \; &> /dev/null find ${Dir}/files/ -type f -exec chmod 0664 {} \; &> /dev/null chmod 02775 ${Dir}/files &> /dev/null chown ${_HM_U}:www-data ${Dir}/files &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{tmp,images,pictures,css,js} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{advagg_css,advagg_js,ctools} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{ctools/css,imagecache,locations} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{xmlsitemap,deployment,styles,private} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{civicrm,civicrm/templates_c} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{civicrm/upload,civicrm/persist} &> /dev/null chown ${_HM_U}:www-data ${Dir}/files/{civicrm/custom,civicrm/dynamic} &> /dev/null ### private - site level chown -L -R ${_HM_U}:www-data ${Dir}/private &> /dev/null find ${Dir}/private/ -type d -exec chmod 02775 {} \; &> /dev/null find ${Dir}/private/ -type f -exec chmod 0664 {} \; &> /dev/null chown ${_HM_U}:www-data ${Dir}/private &> /dev/null chown ${_HM_U}:www-data ${Dir}/private/{files,temp} &> /dev/null chown ${_HM_U}:www-data ${Dir}/private/files/backup_migrate &> /dev/null chown ${_HM_U}:www-data ${Dir}/private/files/backup_migrate/{manual,scheduled} &> /dev/null chown -L -R ${_HM_U}:www-data ${Dir}/private/config &> /dev/null _DB_HOST_PRESENT=$(grep "^\$_SERVER\['db_host'\] = \$options\['db_host'\];" \ ${Dir}/drushrc.php 2>&1) if [[ "${_DB_HOST_PRESENT}" =~ "db_host" ]]; then if [ "${_FORCE_SITES_VERIFY}" = "YES" ]; then run_drush8_hmr_cmd "hosting-task @${Dom} verify --force" fi else echo "\$_SERVER['db_host'] = \$options['db_host'];" >> ${Dir}/drushrc.php run_drush8_hmr_cmd "hosting-task @${Dom} verify --force" fi fi } convert_controls_orig() { if [ -e "${_CTRL_DIR}/$1.info" ] \ || [ -e "${User}/static/control/$1.info" ]; then if [ ! -e "${_CTRL_F}" ] && [ -e "${_CTRL_F_TPL}" ]; then cp -af ${_CTRL_F_TPL} ${_CTRL_F} fi sed -i "s/.*$1.*/$1 = TRUE/g" ${_CTRL_F} &> /dev/null wait rm -f ${_CTRL_DIR}/$1.info fi } convert_controls_orig_no_global() { if [ -e "${_CTRL_DIR}/$1.info" ]; then if [ ! -e "${_CTRL_F}" ] && [ -e "${_CTRL_F_TPL}" ]; then cp -af ${_CTRL_F_TPL} ${_CTRL_F} fi sed -i "s/.*$1.*/$1 = TRUE/g" ${_CTRL_F} &> /dev/null wait rm -f ${_CTRL_DIR}/$1.info fi } convert_controls_value() { if [ -e "${_CTRL_DIR}/$1.info" ] \ || [ -e "${User}/static/control/$1.info" ]; then if [ ! -e "${_CTRL_F}" ] && [ -e "${_CTRL_F_TPL}" ]; then cp -af ${_CTRL_F_TPL} ${_CTRL_F} fi if [ "$1" = "nginx_cache_day" ]; then _TTL=86400 elif [ "$1" = "nginx_cache_hour" ]; then _TTL=3600 elif [ "$1" = "nginx_cache_quarter" ]; then _TTL=900 fi sed -i "s/.*speed_booster_anon.*/speed_booster_anon_cache_ttl = ${_TTL}/g" \ ${_CTRL_F} &> /dev/null wait rm -f ${_CTRL_DIR}/$1.info fi } convert_controls_renamed() { if [ -e "${_CTRL_DIR}/$1.info" ]; then if [ ! -e "${_CTRL_F}" ] && [ -e "${_CTRL_F_TPL}" ]; then cp -af ${_CTRL_F_TPL} ${_CTRL_F} fi if [ "$1" = "cookie_domain" ]; then sed -i "s/.*server_name_cookie.*/server_name_cookie_domain = TRUE/g" \ ${_CTRL_F} &> /dev/null wait fi rm -f ${_CTRL_DIR}/$1.info fi } fix_control_settings() { _CTRL_NAME_ORIG="redis_lock_enable \ redis_cache_disable \ disable_admin_dos_protection \ allow_anon_node_add \ allow_private_file_downloads" _CTRL_NAME_VALUE="nginx_cache_day \ nginx_cache_hour \ nginx_cache_quarter" _CTRL_NAME_RENAMED="cookie_domain" for ctrl in ${_CTRL_NAME_ORIG}; do convert_controls_orig "$ctrl" done for ctrl in ${_CTRL_NAME_VALUE}; do convert_controls_value "$ctrl" done for ctrl in ${_CTRL_NAME_RENAMED}; do convert_controls_renamed "$ctrl" done } fix_platform_system_control_settings() { _CTRL_NAME_ORIG="enable_user_register_protection \ entitycache_dont_enable \ views_cache_bully_dont_enable \ views_content_cache_dont_enable" for ctrl in ${_CTRL_NAME_ORIG}; do convert_controls_orig "$ctrl" done } fix_site_system_control_settings() { _CTRL_NAME_ORIG="disable_user_register_protection" for ctrl in ${_CTRL_NAME_ORIG}; do convert_controls_orig_no_global "$ctrl" done } cleanup_ini() { if [ -e "${_CTRL_F}" ]; then sed -i "s/^;;.*//g" ${_CTRL_F} &> /dev/null wait sed -i "/^$/d" ${_CTRL_F} &> /dev/null wait sed -i "s/^\[/\n\[/g" ${_CTRL_F} &> /dev/null wait fi } add_note_platform_ini() { if [ -e "${_CTRL_F}" ]; then echo "" >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; This is a platform level ACTIVE INI file which can be used to modify" >> ${_CTRL_F} echo ";; default BOA system behaviour for all sites hosted on this platform." >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; Please review complete documentation included in this file TEMPLATE:" >> ${_CTRL_F} echo ";; default.boa_platform_control.ini, since this ACTIVE INI file" >> ${_CTRL_F} echo ";; may not include all options available after upgrade to BOA-${_X_SE}" >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; Note that it takes ~60 seconds to see any modification results in action" >> ${_CTRL_F} echo ";; due to opcode caching enabled in PHP-FPM for all non-dev sites." >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} fi } add_note_site_ini() { if [ -e "${_CTRL_F}" ]; then echo "" >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; This is a site level ACTIVE INI file which can be used to modify" >> ${_CTRL_F} echo ";; default BOA system behaviour for this site only." >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; Please review complete documentation included in this file TEMPLATE:" >> ${_CTRL_F} echo ";; default.boa_site_control.ini, since this ACTIVE INI file" >> ${_CTRL_F} echo ";; may not include all options available after upgrade to BOA-${_X_SE}" >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} echo ";; Note that it takes ~60 seconds to see any modification results in action" >> ${_CTRL_F} echo ";; due to opcode caching enabled in PHP-FPM for all non-dev sites." >> ${_CTRL_F} echo ";;" >> ${_CTRL_F} fi } fix_platform_control_files() { if [ -e "/data/conf/default.boa_platform_control.ini" ]; then if [ ! -e "${Plr}/sites/all/modules/default.boa_platform_control.ini" ] \ || [ "${_CTRL_TPL_FORCE_UPDATE}" = "YES" ]; then cp -af /data/conf/default.boa_platform_control.ini \ ${Plr}/sites/all/modules/ &> /dev/null chown ${_HM_U}:users ${Plr}/sites/all/modules/default.boa_platform_control.ini &> /dev/null chmod 0664 ${Plr}/sites/all/modules/default.boa_platform_control.ini &> /dev/null fi _CTRL_F_TPL="${Plr}/sites/all/modules/default.boa_platform_control.ini" _CTRL_F="${Plr}/sites/all/modules/boa_platform_control.ini" _CTRL_DIR="${Plr}/sites/all/modules" fix_control_settings fix_platform_system_control_settings cleanup_ini add_note_platform_ini fi } fix_site_control_files() { if [ -e "/data/conf/default.boa_site_control.ini" ]; then if [ ! -e "${Dir}/modules/default.boa_site_control.ini" ] \ || [ "${_CTRL_TPL_FORCE_UPDATE}" = "YES" ]; then cp -af /data/conf/default.boa_site_control.ini ${Dir}/modules/ &> /dev/null chown ${_HM_U}:users ${Dir}/modules/default.boa_site_control.ini &> /dev/null chmod 0664 ${Dir}/modules/default.boa_site_control.ini &> /dev/null fi _CTRL_F_TPL="${Dir}/modules/default.boa_site_control.ini" _CTRL_F="${Dir}/modules/boa_site_control.ini" _CTRL_DIR="${Dir}/modules" fix_control_settings fix_site_system_control_settings cleanup_ini add_note_site_ini fi } cleanup_ghost_vhosts() { for Site in `find ${User}/config/server_master/nginx/vhost.d -maxdepth 1 \ -mindepth 1 -type f | sort`; do Dom=$(echo $Site | cut -d'/' -f9 | awk '{ print $1}' 2>&1) if [[ "${Dom}" =~ ".restore"($) ]]; then mkdir -p ${User}/undo ### mv -f ${User}/.drush/${Dom}.alias.drushrc.php ${User}/undo/ &> /dev/null ### mv -f ${User}/config/server_master/nginx/vhost.d/${Dom} ${User}/undo/ &> /dev/null echo "GHOST vhost for ${Dom} detected and moved to ${User}/undo/" fi if [ -e "${User}/config/server_master/nginx/vhost.d/${Dom}" ]; then Plx=$(cat ${User}/config/server_master/nginx/vhost.d/${Dom} \ | grep "root " \ | cut -d: -f2 \ | awk '{ print $2}' \ | sed "s/[\;]//g" 2>&1) if [[ "$Plx" =~ "aegir/distro" ]] \ || [[ "${Dom}" =~ (^)"https." ]] \ || [[ "${Dom}" =~ "--CDN"($) ]]; then _SKIP_VHOST=YES else if [ ! -e "${User}/.drush/${Dom}.alias.drushrc.php" ]; then mkdir -p ${User}/undo ### mv -f $Site ${User}/undo/ &> /dev/null echo "GHOST vhost for ${Dom} with no drushrc detected and moved to ${User}/undo/" fi fi fi done } cleanup_ghost_drushrc() { for Alias in `find ${User}/.drush/*.alias.drushrc.php -maxdepth 1 -type f \ | sort`; do AliasName=$(echo "${Alias}" | cut -d'/' -f6 | awk '{ print $1}' 2>&1) AliasName=$(echo "${AliasName}" \ | sed "s/.alias.drushrc.php//g" \ | awk '{ print $1}' 2>&1) if [[ "${AliasName}" =~ (^)"server_" ]] \ || [[ "${AliasName}" =~ (^)"hostmaster" ]]; then _IS_SITE=NO elif [[ "${AliasName}" =~ (^)"platform_" ]]; then Plm=$(cat ${Alias} \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) if [ -d "${Plm}" ]; then if [ ! -e "${Plm}/index.php" ] || [ ! -e "${Plm}/profiles" ]; then if [ ! -e "${Plm}/vendor" ]; then mkdir -p ${User}/undo ### mv -f ${Plm} ${User}/undo/ &> /dev/null echo "GHOST broken platform dir ${Plm} detected and moved to ${User}/undo/" ### mv -f ${Alias} ${User}/undo/ &> /dev/null echo "GHOST broken platform alias ${Alias} detected and moved to ${User}/undo/" fi fi else mkdir -p ${User}/undo ### mv -f ${Alias} ${User}/undo/ &> /dev/null echo "GHOST nodir platform alias ${Alias} detected and moved to ${User}/undo/" fi else _T_SITE_NAME="${AliasName}" if [[ "${_T_SITE_NAME}" =~ ".restore"($) ]]; then _IS_SITE=NO mkdir -p ${User}/undo ### mv -f ${User}/.drush/${_T_SITE_NAME}.alias.drushrc.php ${User}/undo/ &> /dev/null ### mv -f ${User}/config/server_master/nginx/vhost.d/${_T_SITE_NAME} ${User}/undo/ &> /dev/null echo "GHOST drushrc and vhost for ${_T_SITE_NAME} detected and moved to ${User}/undo/" else _T_SITE_FDIR=$(cat ${Alias} \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) if [ -e "${_T_SITE_FDIR}/drushrc.php" ] \ && [ -e "${_T_SITE_FDIR}/files" ] \ && [ -e "${_T_SITE_FDIR}/private" ]; then if [ ! -e "${Dir}/modules" ]; then mkdir ${Dir}/modules fi _IS_SITE=YES else mkdir -p ${User}/undo ### mv -f ${User}/.drush/${_T_SITE_NAME}.alias.drushrc.php ${User}/undo/ &> /dev/null echo "GHOST drushrc for ${_T_SITE_NAME} detected and moved to ${User}/undo/" if [[ ! "${_T_SITE_FDIR}" =~ "aegir/distro" ]]; then ### mv -f ${User}/config/server_master/nginx/vhost.d/${_T_SITE_NAME} ${User}/undo/ghost-vhost-${_T_SITE_NAME} &> /dev/null echo "GHOST vhost for ${_T_SITE_NAME} detected and moved to ${User}/undo/" fi if [ -d "${_T_SITE_FDIR}" ]; then ### mv -f ${_T_SITE_FDIR} ${User}/undo/ghost-site-${_T_SITE_NAME} &> /dev/null echo "GHOST site dir for ${_T_SITE_NAME} detected and moved from ${_T_SITE_FDIR} to ${User}/undo/" fi fi fi fi done } le_hm_ssl_check_update() { exeLe="${User}/tools/le/dehydrated" if [ -e "${User}/log/domain.txt" ]; then hmFront=$(cat ${User}/log/domain.txt 2>&1) hmFront=$(echo -n ${hmFront} | tr -d "\n" 2>&1) fi if [ -e "${User}/log/extra_domain.txt" ]; then hmFrontExtra=$(cat ${User}/log/extra_domain.txt 2>&1) hmFrontExtra=$(echo -n ${hmFrontExtra} | tr -d "\n" 2>&1) fi if [ -z "${hmFront}" ]; then if [ -e "${User}/.drush/hostmaster.alias.drushrc.php" ]; then hmFront=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "uri'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) fi fi if [ -x "${exeLe}" ] \ && [ ! -z "${hmFront}" ] \ && [ -e "${User}/tools/le/certs/${hmFront}/fullchain.pem" ]; then _DOM=$(date +%e 2>&1) _DOM=${_DOM//[^0-9]/} _RDM=$((RANDOM%25+6)) if [ "${_DOM}" = "${_RDM}" ] || [ -e "${User}/static/control/force-ssl-certs-rebuild.info" ]; then if [ ! -e "${User}/log/ctrl/site.${hmFront}.cert-x1-rebuilt.info" ]; then leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1' --force" mkdir -p ${User}/log/ctrl touch ${User}/log/ctrl/site.${hmFront}.cert-x1-rebuilt.info else leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1'" fi else leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1'" fi if [ ! -z "${hmFrontExtra}" ]; then echo "Running LE cert check directly for hostmaster ${_HM_U} with ${hmFrontExtra}" su -s /bin/bash - ${_HM_U} -c "${exeLe} ${leParams} --domain ${hmFront} --domain ${hmFrontExtra}" else echo "Running LE cert check directly for hostmaster ${_HM_U}" su -s /bin/bash - ${_HM_U} -c "${exeLe} ${leParams} --domain ${hmFront}" fi sleep 3 fi } le_ssl_check_update() { exeLe="${User}/tools/le/dehydrated" Vht="${User}/config/server_master/nginx/vhost.d/${Dom}" if [ -x "${exeLe}" ] && [ -e "${Vht}" ]; then _SSL_ON_TEST=$(cat ${Vht} | grep "443 ssl http2" 2>&1) if [[ "${_SSL_ON_TEST}" =~ "443 ssl http2" ]]; then if [ -e "${User}/tools/le/certs/${Dom}/fullchain.pem" ]; then echo "Running LE cert check directly for ${Dom}" useAliases="" siteAliases=`cat ${Vht} \ | grep "server_name" \ | sed "s/server_name//g; s/;//g" \ | sort | uniq \ | tr -d "\n" \ | sed "s/ / /g; s/ / /g; s/ / /g" \ | sort | uniq` for alias in `echo "${siteAliases}"`; do if [ -e "${User}/static/control/wildcard-enable-${Dom}.info" ]; then Dom=$(echo ${Dom} | sed 's/^www.//g' 2>&1) if [ -z "${useAliases}" ] \ && [ ! -z "${alias}" ] \ && [[ ! "${alias}" =~ ".nodns." ]] \ && [[ ! "${alias}" =~ "${Dom}" ]]; then useAliases="--domain ${alias}" echo "--domain ${alias}" else if [ ! -z "${alias}" ] \ && [[ ! "${alias}" =~ ".nodns." ]] \ && [[ ! "${alias}" =~ "${Dom}" ]]; then useAliases="${useAliases} --domain ${alias}" echo "--domain ${alias}" fi fi else if [[ ! "${alias}" =~ ".nodns." ]]; then echo "--domain ${alias}" if [ -z "${useAliases}" ] && [ ! -z "${alias}" ]; then useAliases="--domain ${alias}" else if [ ! -z "${alias}" ]; then useAliases="${useAliases} --domain ${alias}" fi fi else echo "ignored alias ${alias}" fi fi done _DOM=$(date +%e 2>&1) _DOM=${_DOM//[^0-9]/} _RDM=$((RANDOM%25+6)) if [ "${_DOM}" = "${_RDM}" ] || [ -e "${User}/static/control/force-ssl-certs-rebuild.info" ]; then if [ ! -e "${User}/log/ctrl/site.${Dom}.cert-x1-rebuilt.info" ]; then leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1' --force" mkdir -p ${User}/log/ctrl touch ${User}/log/ctrl/site.${Dom}.cert-x1-rebuilt.info else leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1'" fi else leParams="--cron --ipv4 --preferred-chain 'ISRG Root X1'" fi dhArgs="--domain ${Dom} ${useAliases}" if [ -e "${User}/static/control/wildcard-enable-${Dom}.info" ]; then Dom=$(echo ${Dom} | sed 's/^www.//g' 2>&1) echo "--domain *.${Dom}" if [ ! -e "${User}/tools/le/hooks/cloudflare/hook.py" ]; then mkdir -p ${User}/tools/le/hooks cd ${User}/tools/le git clone https://github.com/kappataumu/letsencrypt-cloudflare-hook hooks/cloudflare pip install -r hooks/cloudflare/requirements.txt fi if [ -e "${User}/tools/le/hooks/cloudflare/hook.py" ]; then if [ -e "${User}/tools/le/config" ]; then dhArgs="--alias ${Dom} --domain *.${Dom} --domain ${Dom} ${useAliases}" dhArgs=" ${dhArgs} --challenge dns-01 --hook '${User}/tools/le/hooks/cloudflare/hook.py'" fi fi fi echo "leParams is ${leParams}" echo "dhArgs is ${dhArgs}" su -s /bin/bash - ${_HM_U} -c "${exeLe} ${leParams} ${dhArgs}" if [ -e "${User}/static/control/wildcard-enable-${Dom}.info" ]; then sleep 30 else sleep 3 fi echo ${_MOMENT} >> /var/xdrago/log/le/${Dom} fi fi fi } if_gen_goaccess() { PrTestPower=$(grep "POWER" /root/.${_HM_U}.octopus.cnf 2>&1) PrTestCluster=$(grep "CLUSTER" /root/.${_HM_U}.octopus.cnf 2>&1) if [[ "${PrTestPower}" =~ "POWER" ]] \ || [[ "${PrTestCluster}" =~ "CLUSTER" ]]; then isWblgx=$(which weblogx 2>&1) if [ -x "${isWblgx}" ]; then ${isWblgx} --site="${1}" --env="${_HM_U}" wait if [ ! -e "/data/disk/${_HM_U}/static/goaccess" ]; then mkdir -p /data/disk/${_HM_U}/static/goaccess fi if [ -e "/var/www/adminer/access/${_HM_U}/${1}/index.html" ]; then cp -af /var/www/adminer/access/${_HM_U}/${1} /data/disk/${_HM_U}/static/goaccess/ else rm -rf /var/www/adminer/access/${_HM_U}/${1} fi fi fi } process() { cleanup_ghost_vhosts cleanup_ghost_drushrc for Site in `find ${User}/config/server_master/nginx/vhost.d \ -maxdepth 1 -mindepth 1 -type f | sort`; do _MOMENT=$(date +%y%m%d-%H%M%S 2>&1) echo ${_MOMENT} Start Counting Site $Site Dom=$(echo $Site | cut -d'/' -f9 | awk '{ print $1}' 2>&1) Dan= if [ -e "${User}/config/server_master/nginx/vhost.d/${Dom}" ]; then Plx=$(cat ${User}/config/server_master/nginx/vhost.d/${Dom} \ | grep "root " \ | cut -d: -f2 \ | awk '{ print $2}' \ | sed "s/[\;]//g" 2>&1) if [[ "$Plx" =~ "aegir/distro" ]]; then Dan=hostmaster else Dan="${Dom}" fi fi _STATUS_DISABLED=NO _STATUS_TEST=$(grep "Do not reveal Aegir front-end URL here" \ ${User}/config/server_master/nginx/vhost.d/${Dom} 2>&1) if [[ "${_STATUS_TEST}" =~ "Do not reveal Aegir front-end URL here" ]]; then _STATUS_DISABLED=YES echo "${Dom} site is DISABLED" fi if [ -e "${User}/.drush/${Dan}.alias.drushrc.php" ] \ && [ "${_STATUS_DISABLED}" = "NO" ]; then echo "Dom is ${Dom}" Dir=$(cat ${User}/.drush/${Dan}.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _DIR_CTRL_F="${Dir}/modules/boa_site_control.ini" Plr=$(cat ${User}/.drush/${Dan}.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _PLR_CTRL_F="${Plr}/sites/all/modules/boa_platform_control.ini" if [ -e "${Plr}" ]; then if [ "${_NEW_SSL}" = "YES" ] \ || [ "${_OSR}" = "chimaera" ] \ || [ "${_OSR}" = "beowulf" ] \ || [ "${_OSR}" = "bullseye" ] \ || [ "${_OSR}" = "buster" ] \ || [ "${_OSR}" = "stretch" ] \ || [ "${_OSR}" = "jessie" ] \ || [ "${_OSR}" = "trusty" ] \ || [ "${_OSR}" = "precise" ]; then PlrID=$(echo ${Plr} \ | openssl md5 \ | awk '{ print $2}' \ | tr -d "\n" 2>&1) else PlrID=$(echo ${Plr} \ | openssl md5 \ | tr -d "\n" 2>&1) fi fix_platform_control_files fix_o_contrib_symlink if [ -e "${Dir}/drushrc.php" ]; then cd ${Dir} if [ "${Dan}" = "hostmaster" ]; then _STATUS=OK if [ ! -f "${User}/log/ctrl/plr.${PlrID}.hm-fix-${_NOW}.info" ]; then su -s /bin/bash - ${_HM_U} -c "drush8 cc drush" &> /dev/null rm -rf ${User}/.tmp/cache run_drush8_hmr_cmd "dis update syslog dblog -y" run_drush8_hmr_cmd "cron" run_drush8_hmr_cmd "cache-clear all" run_drush8_hmr_cmd "cache-clear all" run_drush8_hmr_cmd "utf8mb4-convert-databases -y" touch ${User}/log/ctrl/plr.${PlrID}.hm-fix-${_NOW}.info fi else if [ -e "${Plr}/modules/o_contrib_seven" ] \ || [ -e "${Plr}/modules/o_contrib" ]; then check_site_status_with_drush8 fi fi if [ ! -z "${Dan}" ] \ && [ "${Dan}" != "hostmaster" ]; then if_site_db_conversion searchStringB=".dev." searchStringC=".devel." searchStringD=".temp." searchStringE=".tmp." searchStringF=".temporary." searchStringG=".test." searchStringH=".testing." case ${Dom} in *"$searchStringB"*) ;; *"$searchStringC"*) ;; *"$searchStringD"*) ;; *"$searchStringE"*) ;; *"$searchStringF"*) ;; *"$searchStringG"*) ;; *"$searchStringH"*) ;; *) if [ "${_MODULES_FIX}" = "YES" ]; then _CHECK_IS=OFF #if [ "${_STATUS}" = "OK" ]; then fix_modules #fi fix_robots_txt fi le_ssl_check_update if_gen_goaccess ${Dom} ;; esac fix_site_control_files if [ -e "${Plr}/modules/o_contrib_seven" ] \ || [ -e "${Plr}/modules/o_contrib" ]; then if [ "${_CLEAR_BOOST}" = "YES" ]; then fix_boost_cache fi fix_user_register_protection_with_vSet if [[ "${_X_SE}" =~ "OFF" ]]; then run_drush8_cmd "advagg-force-new-aggregates" run_drush8_cmd "cache-clear all" run_drush8_cmd "cache-clear all" fi fi fi fi ### ### Detect permissions fix overrides, if set per platform. ### _DONT_TOUCH_PERMISSIONS=NO if [ -e "${_PLR_CTRL_F}" ]; then _FIX_PERMISSIONS_PRESENT=$(grep "fix_files_permissions_daily" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_FIX_PERMISSIONS_PRESENT}" =~ "fix_files_permissions_daily" ]]; then _DO_NOTHING=YES else echo ";fix_files_permissions_daily = TRUE" >> ${_PLR_CTRL_F} fi _FIX_PERMISSIONS_TEST=$(grep "^fix_files_permissions_daily = FALSE" \ ${_PLR_CTRL_F} 2>&1) if [[ "${_FIX_PERMISSIONS_TEST}" =~ "fix_files_permissions_daily = FALSE" ]]; then _DONT_TOUCH_PERMISSIONS=YES fi fi if [ -e "${Plr}/profiles" ] \ && [ -e "${Plr}/web.config" ] \ && [ ! -e "${Plr}/core" ] \ && [ ! -f "${Plr}/profiles/SA-CORE-2014-005-D7-fix.info" ]; then _PATCH_TEST=$(grep "foreach (array_values(\$data)" \ ${Plr}/includes/database/database.inc 2>&1) if [[ "${_PATCH_TEST}" =~ "array_values" ]]; then _DONT_TOUCH_PERMISSIONS="${_DONT_TOUCH_PERMISSIONS}" else _DONT_TOUCH_PERMISSIONS=NO fi fi if [ "${_DONT_TOUCH_PERMISSIONS}" = "NO" ] \ && [ "${_PERMISSIONS_FIX}" = "YES" ]; then fix_permissions fi fi _MOMENT=$(date +%y%m%d-%H%M%S 2>&1) echo ${_MOMENT} End Counting Site $Site fi done } delete_this_empty_hostmaster_platform() { run_drush8_hmr_master_cmd "hosting-task @platform_${_T_PFM_NAME} delete --force" echo "Old empty platform_${_T_PFM_NAME} will be deleted" } check_old_empty_hostmaster_platforms() { if [ "${_DEL_OLD_EMPTY_PLATFORMS}" -gt "0" ] \ && [ ! -z "${_DEL_OLD_EMPTY_PLATFORMS}" ]; then _DO_NOTHING=YES else if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then _DEL_OLD_EMPTY_PLATFORMS="7" else _DEL_OLD_EMPTY_PLATFORMS="30" fi fi if [ ! -z "${_DEL_OLD_EMPTY_PLATFORMS}" ]; then if [ "${_DEL_OLD_EMPTY_PLATFORMS}" -gt "0" ]; then echo "_DEL_OLD_EMPTY_PLATFORMS is set to \ ${_DEL_OLD_EMPTY_PLATFORMS} days on /var/aegir instance" for Platform in `find /var/aegir/.drush/platform_* -maxdepth 1 -mtime \ +${_DEL_OLD_EMPTY_PLATFORMS} -type f | sort`; do _T_PFM_NAME=$(echo "${Platform}" \ | sed "s/.*platform_//g; s/.alias.drushrc.php//g" \ | awk '{ print $1}' 2>&1) _T_PFM_ROOT=$(cat ${Platform} \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _T_PFM_SITE=$(grep "${_T_PFM_ROOT}/sites/" \ /var/aegir/.drush/*.drushrc.php \ | grep site_path 2>&1) if [ ! -e "${_T_PFM_ROOT}/sites/all" ] \ || [ ! -e "${_T_PFM_ROOT}/index.php" ]; then mkdir -p /var/aegir/undo ### mv -f /var/aegir/.drush/platform_${_T_PFM_NAME}.alias.drushrc.php /var/aegir/undo/ &> /dev/null echo "GHOST platform ${_T_PFM_ROOT} detected and moved to /var/aegir/undo/" fi if [[ "${_T_PFM_SITE}" =~ ".restore" ]]; then echo "WARNING: ghost site leftover found: ${_T_PFM_SITE}" fi if [ -z "${_T_PFM_SITE}" ] \ && [ -e "${_T_PFM_ROOT}/sites/all" ]; then delete_this_empty_hostmaster_platform fi done fi fi } delete_this_platform() { run_drush8_hmr_cmd "hosting-task @platform_${_T_PFM_NAME} delete --force" echo "Old empty platform_${_T_PFM_NAME} will be deleted" } check_old_empty_platforms() { if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then if [[ "${_CHECK_HOST}" =~ "demo.aegir.cc" ]] \ || [ -e "${User}/static/control/platforms.info" ] \ || [ -e "/root/.debug.cnf" ]; then _DO_NOTHING=YES else if [ "${_DEL_OLD_EMPTY_PLATFORMS}" -gt "0" ] \ && [ ! -z "${_DEL_OLD_EMPTY_PLATFORMS}" ]; then _DO_NOTHING=YES else if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then _DEL_OLD_EMPTY_PLATFORMS="60" else _DEL_OLD_EMPTY_PLATFORMS="60" fi fi fi fi if [ ! -z "${_DEL_OLD_EMPTY_PLATFORMS}" ]; then if [ "${_DEL_OLD_EMPTY_PLATFORMS}" -gt "0" ]; then echo "_DEL_OLD_EMPTY_PLATFORMS is set to \ ${_DEL_OLD_EMPTY_PLATFORMS} days on ${_HM_U} instance" for Platform in `find ${User}/.drush/platform_* -maxdepth 1 -mtime \ +${_DEL_OLD_EMPTY_PLATFORMS} -type f | sort`; do _T_PFM_NAME=$(echo "${Platform}" \ | sed "s/.*platform_//g; s/.alias.drushrc.php//g" \ | awk '{ print $1}' 2>&1) _T_PFM_ROOT=$(cat ${Platform} \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _T_PFM_SITE=$(grep "${_T_PFM_ROOT}/sites/" \ ${User}/.drush/*.drushrc.php \ | grep site_path 2>&1) if [ ! -e "${_T_PFM_ROOT}/sites/all" ] \ || [ ! -e "${_T_PFM_ROOT}/index.php" ]; then if [ ! -e "${_T_PFM_ROOT}/vendor" ]; then mkdir -p ${User}/undo ### mv -f ${User}/.drush/platform_${_T_PFM_NAME}.alias.drushrc.php ${User}/undo/ &> /dev/null echo "GHOST platform ${_T_PFM_ROOT} detected and moved to ${User}/undo/" fi fi if [[ "${_T_PFM_SITE}" =~ ".restore" ]]; then echo "WARNING: ghost site leftover found: ${_T_PFM_SITE}" fi if [ -z "${_T_PFM_SITE}" ] \ && [ -e "${_T_PFM_ROOT}/sites/all" ]; then delete_this_platform fi done fi fi } purge_cruft_machine() { if [ ! -z "${_DEL_OLD_BACKUPS}" ] && [ "${_DEL_OLD_BACKUPS}" -gt "0" ]; then _PURGE_BACKUPS="${_DEL_OLD_BACKUPS}" else _PURGE_BACKUPS="30" fi if [ ! -z "${_DEL_OLD_TMP}" ] && [ "${_DEL_OLD_TMP}" -gt "0" ]; then _PURGE_TMP="${_DEL_OLD_TMP}" else _PURGE_TMP="0" fi _LOW_NR="2" if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then _PURGE_BACKUPS="3" _PURGE_TMP="0" fi _PURGE_CTRL="14" find ${User}/log/ctrl/*cert-x1-rebuilt.info \ -mtime +${_PURGE_CTRL} -type f -exec rm -rf {} \; &> /dev/null find ${User}/log/ctrl/plr* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/log/ctrl/*rom-fix.info \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/backups/* -mtime +${_PURGE_BACKUPS} -exec \ rm -rf {} \; &> /dev/null find ${User}/backup-exports/* -mtime +${_PURGE_TMP} -type f -exec \ rm -rf {} \; &> /dev/null find ${User}/distro/*/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/distro/*/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/sites/*/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/sites/*/private/files/backup_migrate/*/* \ -mtime +${_PURGE_BACKUPS} -type f -exec rm -rf {} \; &> /dev/null find ${User}/distro/*/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/distro/*/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/sites/*/files/tmp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find ${User}/static/*/sites/*/private/temp/* \ -mtime +${_PURGE_TMP} -type f -exec rm -rf {} \; &> /dev/null find /home/${_HM_U}.ftp/.tmp/* \ -mtime +${_PURGE_TMP} -exec rm -rf {} \; &> /dev/null find /home/${_HM_U}.ftp/tmp/* \ -mtime +${_PURGE_TMP} -exec rm -rf {} \; &> /dev/null find ${User}/.tmp/* \ -mtime +${_PURGE_TMP} -exec rm -rf {} \; &> /dev/null find ${User}/tmp/* \ -mtime +${_PURGE_TMP} -exec rm -rf {} \; &> /dev/null chown -R ${_HM_U}:users ${User}/tools/le mkdir -p ${User}/static/trash chown ${_HM_U}.ftp:users ${User}/static/trash &> /dev/null find ${User}/static/trash/* \ -mtime +${_PURGE_TMP} -exec rm -rf {} \; &> /dev/null for i in `dir -d /home/${_HM_U}.ftp/platforms/*`; do if [ -e "${i}" ]; then RevisionTest=$(ls ${i} \ | wc -l \ | tr -d "\n" 2>&1) if [ "${RevisionTest}" -lt "${_LOW_NR}" ] \ && [ ! -z "${RevisionTest}" ]; then if [ -d "/home/${_HM_U}.ftp/platforms" ]; then chattr -i /home/${_HM_U}.ftp/platforms chattr -i /home/${_HM_U}.ftp/platforms/* &> /dev/null fi rm -rf ${i} fi fi done for i in `dir -d ${User}/distro/*`; do if [ -d "${i}" ]; then if [ ! -d "${i}/keys" ]; then mkdir -p ${i}/keys fi RevisionTest=$(ls ${i} | wc -l 2>&1) if [ "${RevisionTest}" -lt "2" ] && [ ! -z "${RevisionTest}" ]; then _NOW=$(date +%y%m%d-%H%M%S 2>&1) mkdir -p ${User}/undo/dist/${_NOW} ### mv -f ${i} ${User}/undo/dist/${_NOW}/ &> /dev/null echo "GHOST revision ${i} detected and moved to ${User}/undo/dist/${_NOW}/" fi fi done _REVISIONS="001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 \ 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030 031 032 033 \ 034 035 036 037 038 039 040 041 042 043 044 045 046 047 048 049 050 051 \ 052 053 054 055 056 057 058 059 060 061 062 063 064 065 066 067 068 069 \ 070 071 072 073 074 075 076 077 078 079 080 081 082 083 084 085 086 087 \ 088 089 090 091 092 093 094 095 096 097 098 099 100 101 102 103 104 105" for i in ${_REVISIONS}; do if [ -e "${User}/distro/${i}" ] \ && [ ! -e "/home/${_HM_U}.ftp/platforms/${i}" ]; then if [ -d "/home/${_HM_U}.ftp/platforms" ]; then chattr -i /home/${_HM_U}.ftp/platforms chattr -i /home/${_HM_U}.ftp/platforms/* &> /dev/null fi mkdir -p /home/${_HM_U}.ftp/platforms/${i} mkdir -p ${User}/distro/${i}/keys chown ${_HM_U}.ftp:${_WEBG} ${User}/distro/${i}/keys &> /dev/null chmod 02775 ${User}/distro/${i}/keys &> /dev/null ln -sf ${User}/distro/${i}/keys /home/${_HM_U}.ftp/platforms/${i}/keys for Codebase in `find ${User}/distro/${i}/* \ -maxdepth 1 \ -mindepth 1 \ -type d \ | grep "/sites$" 2>&1`; do CodebaseName=$(echo ${Codebase} \ | cut -d'/' -f7 \ | awk '{ print $1}' 2> /dev/null) ln -sf ${Codebase} /home/${_HM_U}.ftp/platforms/${i}/${CodebaseName} echo "Fixed symlink to ${Codebase} for ${_HM_U}.ftp" done fi done } count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi echo ${_CPU_NR} > /data/all/cpuinfo chmod 644 /data/all/cpuinfo &> /dev/null } load_control() { if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_MAX_RATIO=${_CPU_MAX_RATIO//[^0-9]/} fi if [ -z "${_CPU_MAX_RATIO}" ]; then _CPU_MAX_RATIO=6 fi if [ -e "/root/.force.sites.verify.cnf" ]; then _CPU_MAX_RATIO=88 fi _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) _O_LOAD=$(( _O_LOAD / _CPU_NR )) _O_LOAD_MAX=$(( 100 * _CPU_MAX_RATIO )) } shared_codebases_cleanup() { if [ -L "/data/all" ]; then _CLD="/data/disk/codebases-cleanup" else _CLD="/var/backups/codebases-cleanup" fi _REVISIONS="001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 \ 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030 031 032 033 \ 034 035 036 037 038 039 040 041 042 043 044 045 046 047 048 049 050 051 \ 052 053 054 055 056 057 058 059 060 061 062 063 064 065 066 067 068 069 \ 070 071 072 073 074 075 076 077 078 079 080 081 082 083 084 085 086 087 \ 088 089 090 091 092 093 094 095 096 097 098 099 100 101 102 103 104 105" for i in ${_REVISIONS}; do if [ -d "/data/all/${i}/o_contrib" ]; then for Codebase in `find /data/all/${i}/* -maxdepth 1 -mindepth 1 -type d \ | grep "/profiles$" 2>&1`; do CodebaseDir=$(echo ${Codebase} \ | sed 's/\/profiles//g' \ | awk '{print $1}' 2> /dev/null) CodebaseTest=$(find /data/disk/*/distro/*/*/ -maxdepth 1 -mindepth 1 \ -type l -lname ${Codebase} | sort 2>&1) if [[ "${CodebaseTest}" =~ "No such file or directory" ]] \ || [ -z "${CodebaseTest}" ]; then mkdir -p ${_CLD}/${i} echo "Moving no longer used ${CodebaseDir} to ${_CLD}/${i}/" ### mv -f ${CodebaseDir} ${_CLD}/${i}/ sleep 1 fi done fi done } ghost_codebases_cleanup() { _CLD="/var/backups/ghost-codebases-cleanup" _REVISIONS="001 002 003 004 005 006 007 008 009 010 011 012 013 014 015 \ 016 017 018 019 020 021 022 023 024 025 026 027 028 029 030 031 032 033 \ 034 035 036 037 038 039 040 041 042 043 044 045 046 047 048 049 050 051 \ 052 053 054 055 056 057 058 059 060 061 062 063 064 065 066 067 068 069 \ 070 071 072 073 074 075 076 077 078 079 080 081 082 083 084 085 086 087 \ 088 089 090 091 092 093 094 095 096 097 098 099 100 101 102 103 104 105" for i in ${_REVISIONS}; do CodebaseTest=$(find /data/disk/*/distro/${i}/*/ -maxdepth 1 -mindepth 1 \ -type d -name vendor | sort 2>&1) for vendor in ${CodebaseTest}; do ParentDir=`echo ${vendor} | sed "s/\/vendor//g"` if [ -d "${ParentDir}/docroot/sites/all" ] \ || [ -d "${ParentDir}/html/sites/all" ] \ || [ -d "${ParentDir}/web/sites/all" ]; then _CLEAN_THIS=SKIP else _CLEAN_THIS="${ParentDir}" _TSTAMP=`date +%y%m%d-%H%M%S` mkdir -p ${_CLD}/${i}/${_TSTAMP} echo "Moving ghost ${_CLEAN_THIS} to ${_CLD}/${i}/${_TSTAMP}/" ### mv -f ${_CLEAN_THIS} ${_CLD}/${i}/${_TSTAMP}/ sleep 1 fi done done } prepare_weblogx() { _ARCHLOGS=/var/www/adminer/access/archive mkdir -p ${_ARCHLOGS}/unzip echo "[+] SYNCING LOGS TO: ${_ARCHLOGS}" rsync -rlvz --size-only --progress /var/log/nginx/access* ${_ARCHLOGS}/ echo "[+] COPYING LOGS TO: ${_ARCHLOGS}/unzip/" cp -af ${_ARCHLOGS}/access* ${_ARCHLOGS}/unzip/ echo "[+] DECOMPRESSING GZ FILES" find ${_ARCHLOGS}/unzip -name "*.gz" -exec gunzip -f {} \; echo "[+] RENAMING RAW FILES" for _log in `find ${_ARCHLOGS}/unzip \ -maxdepth 1 -mindepth 1 -type f | sort`; do mv -f ${_log} ${_log}.txt; done rm -f ${_ARCHLOGS}/unzip/*.txt.txt* touch ${_ARCHLOGS}/unzip/.global.pid } cleanup_weblogx() { _ARCHLOGS=/var/www/adminer/access/archive if [ -e "${_ARCHLOGS}/unzip" ]; then rm -f ${_ARCHLOGS}/unzip/access* rm -f ${_ARCHLOGS}/unzip/.global.pid fi } action() { prepare_weblogx for User in `find /data/disk/ -maxdepth 1 -mindepth 1 | sort`; do count_cpu load_control if [ -e "${User}/config/server_master/nginx/vhost.d" ] \ && [ ! -e "${User}/log/proxied.pid" ] \ && [ ! -e "${User}/log/CANCELLED" ]; then if [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ]; then _HM_U=$(echo ${User} | cut -d'/' -f4 | awk '{ print $1}' 2>&1) _THIS_HM_SITE=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) echo "load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX}" echo "User ${User}" mkdir -p ${User}/log/ctrl su -s /bin/bash ${_HM_U} -c "drush8 cc drush" &> /dev/null rm -rf ${User}/.tmp/cache su -s /bin/bash - ${_HM_U}.ftp -c "drush8 cc drush" &> /dev/null rm -rf /home/${_HM_U}.ftp/.tmp/cache _SQL_CONVERT=NO _DEL_OLD_EMPTY_PLATFORMS="0" if [ -e "/root/.${_HM_U}.octopus.cnf" ]; then if [ -x "/usr/bin/drush10-bin" ]; then su -s /bin/bash - ${_HM_U} -c "rm -f ~/.drush/sites/*.yml" su -s /bin/bash - ${_HM_U} -c "rm -f ~/.drush/sites/.checksums/*.md5" su -s /bin/bash - ${_HM_U} -c "drush10-bin core:init --yes" &> /dev/null su -s /bin/bash - ${_HM_U} -c "drush10-bin site:alias-convert ~/.drush/sites --yes" &> /dev/null fi source /root/.${_HM_U}.octopus.cnf _DEL_OLD_EMPTY_PLATFORMS=${_DEL_OLD_EMPTY_PLATFORMS//[^0-9]/} _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MY_EMAIL=${_MY_EMAIL//\\\@/\@} if [ -e "${User}/log/email.txt" ]; then _F_CLIENT_EMAIL=$(cat ${User}/log/email.txt 2>&1) _F_CLIENT_EMAIL=$(echo -n ${_F_CLIENT_EMAIL} | tr -d "\n" 2>&1) _F_CLIENT_EMAIL=${_F_CLIENT_EMAIL//\\\@/\@} fi if [ ! -z "${_F_CLIENT_EMAIL}" ]; then _CLIENT_EMAIL_TEST=$(grep "^_CLIENT_EMAIL=\"${_F_CLIENT_EMAIL}\"" \ /root/.${_HM_U}.octopus.cnf 2>&1) if [[ "${_CLIENT_EMAIL_TEST}" =~ "${_F_CLIENT_EMAIL}" ]]; then _DO_NOTHING=YES else sed -i "s/^_CLIENT_EMAIL=.*/_CLIENT_EMAIL=\"${_F_CLIENT_EMAIL}\"/g" \ /root/.${_HM_U}.octopus.cnf wait _CLIENT_EMAIL=${_F_CLIENT_EMAIL} fi fi fi disable_chattr ${_HM_U}.ftp rm -rf /home/${_HM_U}.ftp/drush-backups if [ -e "${_THIS_HM_SITE}" ]; then cd ${_THIS_HM_SITE} su -s /bin/bash ${_HM_U} -c "drush8 cc drush" &> /dev/null rm -rf ${User}/.tmp/cache run_drush8_hmr_cmd "${vSet} hosting_cron_default_interval 3600" run_drush8_hmr_cmd "${vSet} hosting_queue_cron_frequency 1" run_drush8_hmr_cmd "${vSet} hosting_civicrm_cron_queue_frequency 60" run_drush8_hmr_cmd "${vSet} hosting_queue_task_gc_frequency 300" if [ -e "${User}/log/hosting_cron_use_backend.txt" ]; then run_drush8_hmr_cmd "${vSet} hosting_cron_use_backend 1" else run_drush8_hmr_cmd "${vSet} hosting_cron_use_backend 0" fi run_drush8_hmr_cmd "${vSet} hosting_ignore_default_profiles 0" run_drush8_hmr_cmd "${vSet} hosting_queue_tasks_frequency 1" run_drush8_hmr_cmd "${vSet} hosting_queue_tasks_items 1" run_drush8_hmr_cmd "${vSet} hosting_delete_force 0" run_drush8_hmr_cmd "${vSet} aegir_backup_export_path ${User}/backup-exports" run_drush8_hmr_cmd "fr hosting_custom_settings -y" run_drush8_hmr_cmd "cache-clear all" run_drush8_hmr_cmd "cache-clear all" if [ -e "${User}/log/imported.pid" ] \ || [ -e "${User}/log/exported.pid" ]; then if [ ! -e "${User}/log/hosting_context.pid" ]; then _HM_NID=$(run_drush8_hmr_cmd "sqlq \ \"SELECT site.nid FROM hosting_site site JOIN \ hosting_package_instance pkgi ON pkgi.rid=site.nid JOIN \ hosting_package pkg ON pkg.nid=pkgi.package_id \ WHERE pkg.short_name='hostmaster'\" 2>&1") _HM_NID=${_HM_NID//[^0-9]/} if [ ! -z "${_HM_NID}" ]; then run_drush8_hmr_cmd "sqlq \"UPDATE hosting_context \ SET name='hostmaster' WHERE nid='${_HM_NID}'\"" echo ${_HM_NID} > ${User}/log/hosting_context.pid fi fi fi fi process run_drush8_hmr_cmd "sqlq \"DELETE FROM hosting_task \ WHERE task_type='delete' AND task_status='-1'\"" run_drush8_hmr_cmd "sqlq \"DELETE FROM hosting_task \ WHERE task_type='delete' AND task_status='0' AND executed='0'\"" run_drush8_hmr_cmd "${vSet} hosting_delete_force 0" run_drush8_hmr_cmd "sqlq \"UPDATE hosting_platform \ SET status=1 WHERE publish_path LIKE '%/aegir/distro/%'\"" check_old_empty_platforms run_drush8_hmr_cmd "${vSet} hosting_delete_force 0" run_drush8_hmr_cmd "sqlq \"UPDATE hosting_platform \ SET status=-2 WHERE publish_path LIKE '%/aegir/distro/%'\"" _THIS_HM_PLR=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) run_drush8_hmr_cmd "sqlq \"UPDATE hosting_platform \ SET status=1 WHERE publish_path LIKE '${_THIS_HM_PLR}'\"" purge_cruft_machine if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then rm -rf ${User}/clients/admin &> /dev/null rm -rf ${User}/clients/omega8ccgmailcom &> /dev/null rm -rf ${User}/clients/nocomega8cc &> /dev/null fi rm -rf ${User}/clients/*/backups &> /dev/null symlinks -dr ${User}/clients &> /dev/null if [ -d "/home/${_HM_U}.ftp" ]; then symlinks -dr /home/${_HM_U}.ftp &> /dev/null rm -f /home/${_HM_U}.ftp/{.profile,.bash_logout,.bash_profile,.bashrc} fi le_hm_ssl_check_update ${_HM_U} ### if_gen_goaccess "ALL" echo "Done for ${User}" enable_chattr ${_HM_U}.ftp else echo "load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX}" echo "...we have to wait..." fi echo echo fi done shared_codebases_cleanup ghost_codebases_cleanup check_old_empty_hostmaster_platforms cleanup_weblogx } ###--------------------### echo "INFO: Daily maintenance start" until [ ! -e "/var/run/boa_wait.pid" ]; do echo "Waiting for BOA queue availability..." sleep 5 done # _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} _DOW=$(date +%u 2>&1) _DOW=${_DOW//[^1-7]/} _CHECK_HOST=$(uname -n 2>&1) _VM_TEST=$(uname -a 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi if [ -e "/root/.force.sites.verify.cnf" ]; then _FORCE_SITES_VERIFY=YES else _FORCE_SITES_VERIFY=NO fi # if [ "${_VMFAMILY}" = "VS" ]; then _MODULES_FORCE="automated_cron \ backup_migrate \ coder \ cookie_cache_bypass \ hacked \ poormanscron \ security_review \ site_audit \ syslog \ watchdog_live \ xhprof" fi # if [ "${_DOW}" = "2" ]; then _MODULES_ON_SEVEN="redis" _MODULES_ON_SIX="redis" _MODULES_OFF_SEVEN="coder \ devel \ filefield_nginx_progress \ hacked \ l10n_update \ linkchecker \ performance \ security_review \ site_audit \ watchdog_live \ xhprof" _MODULES_OFF_SIX="coder \ cookie_cache_bypass \ devel \ hacked \ l10n_update \ linkchecker \ performance \ poormanscron \ security_review \ supercron \ watchdog_live \ xhprof" else _MODULES_ON_SEVEN="robotstxt redis" _MODULES_ON_SIX="path_alias_cache robotstxt redis" _MODULES_OFF_SEVEN="dblog syslog backup_migrate" _MODULES_OFF_SIX="dblog syslog backup_migrate" fi # _CTRL_TPL_FORCE_UPDATE=YES # # Check for last all nr if [ -e "/data/all" ]; then cd /data/all listl=([0-9]*) _LAST_ALL=${listl[@]: -1} _O_CONTRIB="/data/all/${_LAST_ALL}/o_contrib" _O_CONTRIB_SEVEN="/data/all/${_LAST_ALL}/o_contrib_seven" _O_CONTRIB_EIGHT="/data/all/${_LAST_ALL}/o_contrib_eight" _O_CONTRIB_NINE="/data/all/${_LAST_ALL}/o_contrib_nine" _O_CONTRIB_TEN="/data/all/${_LAST_ALL}/o_contrib_ten" elif [ -e "/data/disk/all" ]; then cd /data/disk/all listl=([0-9]*) _LAST_ALL=${listl[@]: -1} _O_CONTRIB="/data/disk/all/${_LAST_ALL}/o_contrib" _O_CONTRIB_SEVEN="/data/disk/all/${_LAST_ALL}/o_contrib_seven" _O_CONTRIB_EIGHT="/data/disk/all/${_LAST_ALL}/o_contrib_eight" _O_CONTRIB_NINE="/data/disk/all/${_LAST_ALL}/o_contrib_nine" _O_CONTRIB_TEN="/data/disk/all/${_LAST_ALL}/o_contrib_ten" else _O_CONTRIB=NO _O_CONTRIB_SEVEN=NO _O_CONTRIB_EIGHT=NO _O_CONTRIB_NINE=NO _O_CONTRIB_TEN=NO fi # mkdir -p /var/xdrago/log/daily mkdir -p /var/xdrago/log/le # if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf fi # find_fast_mirror # ###--------------------### if [ -z "${_SKYNET_MODE}" ] || [ "${_SKYNET_MODE}" = "ON" ]; then echo "INFO: Checking BARRACUDA version" rm -f /opt/tmp/barracuda-release.txt* curl -L -k -s \ --max-redirs 10 \ --retry 3 \ --retry-delay 15 -A iCab \ "${urlHmr}/conf/barracuda-release.txt" \ -o /opt/tmp/barracuda-release.txt else rm -f /opt/tmp/barracuda-release.txt* fi if [ -e "/opt/tmp/barracuda-release.txt" ]; then _X_VERSION=$(cat /opt/tmp/barracuda-release.txt 2>&1) _VERSIONS_TEST=$(cat /var/log/barracuda_log.txt 2>&1) if [ ! -z "${_X_VERSION}" ]; then _MY_EMAIL=${_MY_EMAIL//\\\@/\@} if [[ "${_MY_EMAIL}" =~ "omega8.cc" ]]; then _MY_EMAIL="notify@omega8.cc" fi if [[ "${_VERSIONS_TEST}" =~ "${_X_VERSION}" ]]; then _VERSIONS_TEST_RESULT=OK echo "INFO: Version test result: OK" else sT="Newer BOA available" cat <<EOF | mail -e -s "New ${_X_VERSION} ${sT}" ${_MY_EMAIL} There is new ${_X_VERSION} version available. Please review the changelog and upgrade as soon as possible to receive all security updates and new features. Changelog: https://github.com/omega8cc/boa/commits/master -- This email has been sent by your Barracuda server upgrade monitor. EOF echo "INFO: Update notice sent: OK" fi fi fi # if [ -e "/var/run/daily-fix.pid" ]; then touch /var/xdrago/log/wait-for-daily exit 1 elif [ -e "/root/.wbhd.clstr.cnf" ]; then exit 1 else touch /var/run/daily-fix.pid if [ "${_VMFAMILY}" = "VS" ]; then n=$((RANDOM%180+80)) echo "waiting $n sec" sleep $n fi if [ -z "${_PERMISSIONS_FIX}" ]; then _PERMISSIONS_FIX=YES fi if [ -z "${_MODULES_FIX}" ]; then _MODULES_FIX=YES fi if [ -z "${_CLEAR_BOOST}" ]; then _CLEAR_BOOST=YES fi if [ -e "/data/all" ]; then if [ ! -e "/data/all/permissions-fix-post-up-${_X_SE}.info" ]; then rm -f /data/all/permissions-fix* find /data/disk/*/distro/*/*/sites/all/{libraries,modules,themes} \ -type d -exec chmod 02775 {} \; &> /dev/null find /data/disk/*/distro/*/*/sites/all/{libraries,modules,themes} \ -type f -exec chmod 0664 {} \; &> /dev/null echo fixed > /data/all/permissions-fix-post-up-${_X_SE}.info fi elif [ -e "/data/disk/all" ]; then if [ ! -e "/data/disk/all/permissions-fix-post-up-${_X_SE}.info" ]; then rm -f /data/disk/all/permissions-fix* find /data/disk/*/distro/*/*/sites/all/{libraries,modules,themes} \ -type d -exec chmod 02775 {} \; &> /dev/null find /data/disk/*/distro/*/*/sites/all/{libraries,modules,themes} \ -type f -exec chmod 0664 {} \; &> /dev/null echo fixed > /data/disk/all/permissions-fix-post-up-${_X_SE}.info fi fi su -s /bin/bash - aegir -c "drush8 cc drush" &> /dev/null rm -rf /var/aegir/.tmp/cache su -s /bin/bash - aegir -c "drush8 @hostmaster dis update syslog dblog -y" &> /dev/null su -s /bin/bash - aegir -c "drush8 @hostmaster cron" &> /dev/null su -s /bin/bash - aegir -c "drush8 @hostmaster cache-clear all" &> /dev/null su -s /bin/bash - aegir -c "drush8 @hostmaster cache-clear all" &> /dev/null su -s /bin/bash - aegir -c "drush8 @hostmaster utf8mb4-convert-databases -y" &> /dev/null action >/var/xdrago/log/daily/daily-${_NOW}.log 2>&1 dhpWildPath="/etc/ssl/private/nginx-wild-ssl.dhp" if [ -e "/etc/ssl/private/4096.dhp" ]; then dhpPath="/etc/ssl/private/4096.dhp" _DIFF_T=$(diff -w -B ${dhpPath} ${dhpWildPath} 2>&1) if [ ! -z "${_DIFF_T}" ]; then cp -af ${dhpPath} ${dhpWildPath} fi fi if [ "${_NGINX_FORWARD_SECRECY}" = "YES" ]; then if [ ! -e "/etc/ssl/private/4096.dhp" ]; then echo "Generating 4096.dhp -- it may take a very long time..." openssl dhparam -out /etc/ssl/private/4096.dhp 4096 > /dev/null 2>&1 & fi for f in `find /etc/ssl/private/*.crt -type f`; do sslName=$(echo ${f} | cut -d'/' -f5 | awk '{ print $1}' | sed "s/.crt//g") sslFile="/etc/ssl/private/${sslName}.dhp" sslFileZ=${sslFile//\//\\\/} if [ -e "${f}" ] && [ ! -z "${sslName}" ]; then if [ ! -e "${sslFile}" ]; then openssl dhparam -out ${sslFile} 2048 &> /dev/null else _PFS_TEST=$(grep "DH PARAMETERS" ${sslFile} 2>&1) if [[ ! "${_PFS_TEST}" =~ "DH PARAMETERS" ]]; then openssl dhparam -out ${sslFile} 2048 &> /dev/null fi sslRootd="/var/aegir/config/server_master/nginx/pre.d" sslFileX="${sslRootd}/z_${sslName}_ssl_proxy.conf" sslFileY="${sslRootd}/${sslName}_ssl_proxy.conf" if [ -e "${sslFileX}" ]; then _DHP_TEST=$(grep "sslFile" ${sslFileX} 2>&1) if [[ "${_DHP_TEST}" =~ "sslFile" ]]; then sed -i "s/.*sslFile.*//g" ${sslFileX} &> /dev/null wait sed -i "s/ *$//g; /^$/d" ${sslFileX} &> /dev/null wait fi fi if [ -e "${sslFileY}" ]; then _DHP_TEST=$(grep "sslFile" ${sslFileY} 2>&1) if [[ "${_DHP_TEST}" =~ "sslFile" ]]; then sed -i "s/.*sslFile.*//g" ${sslFileY} &> /dev/null wait sed -i "s/ *$//g; /^$/d" ${sslFileY} &> /dev/null wait fi fi if [ -e "${sslFileX}" ]; then _DHP_TEST=$(grep "ssl_dhparam" ${sslFileX} 2>&1) if [[ ! "${_DHP_TEST}" =~ "ssl_dhparam" ]]; then sed -i "s/ssl_session_timeout .*/ssl_session_timeout 5m;\n ssl_dhparam ${sslFileZ};/g" ${sslFileX} &> /dev/null wait sed -i "s/ *$//g; /^$/d" ${sslFileX} &> /dev/null wait fi fi if [ -e "${sslFileY}" ]; then _DHP_TEST=$(grep "ssl_dhparam" ${sslFileY} 2>&1) if [[ ! "${_DHP_TEST}" =~ "ssl_dhparam" ]]; then sed -i "s/ssl_session_timeout .*/ssl_session_timeout 5m;\n ssl_dhparam ${sslFileZ};/g" ${sslFileY} &> /dev/null wait sed -i "s/ *$//g; /^$/d" ${sslFileY} &> /dev/null wait fi fi fi fi done sed -i "s/.*ssl_stapling .*//g" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/.*ssl_stapling_verify .*//g" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/.*resolver .*//g" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/.*resolver_timeout .*//g" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/ssl_prefer_server_ciphers .*/ssl_prefer_server_ciphers on;\n ssl_stapling on;\n ssl_stapling_verify on;\n resolver 1.1.1.1 1.0.0.1 valid=300s;\n resolver_timeout 5s;/g" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/ *$//g; /^$/d" /var/aegir/config/server_*/nginx/pre.d/*ssl_proxy.conf &> /dev/null wait sed -i "s/TLSv1.1 TLSv1.2 TLSv1.3;/TLSv1.2 TLSv1.3;/g" /data/disk/*/config/server_*/nginx/vhost.d/* sed -i "s/TLSv1.1 TLSv1.2 TLSv1.3;/TLSv1.2 TLSv1.3;/g" /var/aegir/config/server_*/nginx.conf sed -i "s/TLSv1.1 TLSv1.2 TLSv1.3;/TLSv1.2 TLSv1.3;/g" /var/aegir/config/server_*/nginx/vhost.d/* sed -i "s/TLSv1.1 TLSv1.2 TLSv1.3;/TLSv1.2 TLSv1.3;/g" /var/aegir/config/server_*/nginx/pre.d/*.conf service nginx reload fi fi if [ "${_PERMISSIONS_FIX}" = "YES" ] \ && [ ! -z "${_X_VERSION}" ] \ && [ -e "/opt/tmp/barracuda-release.txt" ] \ && [ ! -e "/data/all/permissions-fix-${_X_VERSION}-fixed-dz.info" ]; then echo "INFO: Fixing permissions in the /data/all tree..." find /data/conf -type d -exec chmod 0755 {} \; &> /dev/null find /data/conf -type f -exec chmod 0644 {} \; &> /dev/null chown -R root:root /data/conf &> /dev/null if [ -e "/data/all" ]; then find /data/all -type d -exec chmod 0755 {} \; &> /dev/null find /data/all -type f -exec chmod 0644 {} \; &> /dev/null chmod 02775 /data/all/*/*/sites/all/{modules,libraries,themes} &> /dev/null chmod 02775 /data/all/000/core/*/sites/all/{modules,libraries,themes} &> /dev/null chown -R root:root /data/all &> /dev/null chown -R root:users /data/all/*/*/sites &> /dev/null chown -R root:users /data/all/000/core/*/sites &> /dev/null elif [ -e "/data/disk/all" ]; then find /data/disk/all -type d -exec chmod 0755 {} \; &> /dev/null find /data/disk/all -type f -exec chmod 0644 {} \; &> /dev/null chmod 02775 /data/disk/all/*/*/sites/all/{modules,libraries,themes} &> /dev/null chmod 02775 /data/disk/all/000/core/*/sites/all/{modules,libraries,themes} &> /dev/null chown -R root:root /data/disk/all &> /dev/null chown -R root:users /data/disk/all/*/*/sites &> /dev/null chown -R root:users /data/disk/all/000/core/*/sites &> /dev/null fi chmod 02775 /data/disk/*/distro/*/*/sites/all/{modules,libraries,themes} &> /dev/null echo fixed > /data/all/permissions-fix-${_X_VERSION}-fixed-dz.info fi if [ ! -e "/var/backups/fix-sites-all-permsissions-${_X_SE}.txt" ]; then chmod 0751 /data/disk/*/distro/*/*/sites &> /dev/null chmod 0755 /data/disk/*/distro/*/*/sites/all &> /dev/null chmod 02775 /data/disk/*/distro/*/*/sites/all/{modules,libraries,themes} &> /dev/null echo FIXED > /var/backups/fix-sites-all-permsissions-${_X_SE}.txt echo "Permissions in sites/all tree just fixed" fi find /var/backups/old-sql* -mtime +1 -exec rm -rf {} \; &> /dev/null find /var/backups/ltd/*/* -mtime +0 -type f -exec rm -rf {} \; &> /dev/null find /var/backups/solr/*/* -mtime +0 -type f -exec rm -rf {} \; &> /dev/null find /var/backups/jetty* -mtime +0 -exec rm -rf {} \; &> /dev/null find /var/backups/dragon/* -mtime +7 -exec rm -rf {} \; &> /dev/null if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then if [ -d "/var/backups/codebases-cleanup" ]; then find /var/backups/codebases-cleanup/* -mtime +7 -exec rm -rf {} \; &> /dev/null elif [ -d "/data/disk/codebases-cleanup" ]; then find /data/disk/codebases-cleanup/* -mtime +7 -exec rm -rf {} \; &> /dev/null fi fi rm -f /tmp/.cron.*.pid rm -f /tmp/.busy.*.pid rm -f /data/disk/*/.tmp/.cron.*.pid rm -f /data/disk/*/.tmp/.busy.*.pid ### ### Delete duplicity ghost pid file if older than 2 days ### find /var/run/*_backup.pid -mtime +1 -exec rm -rf {} \; &> /dev/null rm -f /var/run/daily-fix.pid echo "INFO: Daily maintenance complete" exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/graceful.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_heavy_tasks_maint.cnf" ]; then exit 0 fi action() { # # Clean up postfix queue to get rid of bounced emails. # See also: https://omega8.cc/never-send-mailings-from-aegir-server-322 sudo postsuper -d ALL &> /dev/null if [ -e "/etc/init.d/rsyslog" ]; then killall -9 rsyslogd &> /dev/null service rsyslog start &> /dev/null elif [ -e "/etc/init.d/sysklogd" ]; then killall -9 sysklogd &> /dev/null service sysklogd start &> /dev/null elif [ -e "/etc/init.d/inetutils-syslogd" ]; then killall -9 syslogd &> /dev/null service inetutils-syslogd start &> /dev/null fi rm -f /var/backups/.auth.IP.list* find /var/xdrago/log/*.pid -mtime +3 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/*.log -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/*.txt -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/last* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/wait* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/lshe* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/ngin* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/grac* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/purg* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/clea* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/proc* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null find /var/xdrago/log/redi* -mtime +30 -type f -exec rm -rf {} \; &> /dev/null if [ -d "/dev/disk" ]; then _IF_CDP=$(ps aux | grep '[c]dp_io' | awk '{print $2}') if [ -z "${_IF_CDP}" ] && [ ! -e "/root/.no.swap.clear.cnf" ]; then swapoff -a swapon -a fi fi mkdir -p /usr/share/GeoIP chmod 755 /usr/share/GeoIP mkdir -p /opt/tmp cd /opt/tmp # For GeoIP2 City database: # wget -q -U iCab \ # wget -N http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz # gunzip GeoLite2-City.mmdb.gz &> /dev/null # cp -af GeoLite2-City.mmdb /usr/share/GeoIP/ # For GeoIP2 Country database: # wget -q -U iCab \ # wget -N http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz # gunzip GeoLite2-Country.mmdb.gz &> /dev/null # cp -af GeoLite2-Country.mmdb /usr/share/GeoIP/ chmod 644 /usr/share/GeoIP/* rm -rf /opt/tmp mkdir -p /opt/tmp chmod 777 /opt/tmp rm -f /opt/tmp/sess* if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then rm -f /tmp/* fi rm -f /root/ksplice-archive.asc rm -f /root/install-uptrack find /tmp/{.ICE-unix,.X11-unix,.webmin} -mtime +0 -type f -exec rm -rf {} \; if [ -e "/var/log/newrelic" ]; then echo rotate > /var/log/newrelic/nrsysmond.log echo rotate > /var/log/newrelic/php_agent.log echo rotate > /var/log/newrelic/newrelic-daemon.log fi ionice -c2 -n2 -p $$ renice ${_B_NICE} -p $$ &> /dev/null service nginx reload if [ ! -e "/root/.giant_traffic.cnf" ] \ && [ ! -e "/root/.high_traffic.cnf" ]; then echo "INFO: Solr and Jetty servers will be restarted in 60 seconds" touch /var/run/boa_wait.pid sleep 60 if [ -x "/etc/init.d/solr7" ] && [ -e "/etc/default/solr7.in.sh" ]; then service solr7 stop #kill -9 $(ps aux | grep '[j]ava-8-openjdk' | awk '{print $2}') &> /dev/null service solr7 start fi kill -9 $(ps aux | grep '[j]etty' | awk '{print $2}') &> /dev/null rm -rf /tmp/{drush*,pear,jetty*} rm -f /var/log/jetty{7,8,9}/* if [ -e "/etc/default/jetty9" ] && [ -e "/etc/init.d/jetty9" ]; then service jetty9 start fi if [ -e "/etc/default/jetty8" ] && [ -e "/etc/init.d/jetty8" ]; then service jetty8 start fi if [ -e "/etc/default/jetty7" ] && [ -e "/etc/init.d/jetty7" ]; then service jetty7 start fi rm -f /var/run/boa_wait.pid echo "INFO: Solr and Jetty servers restarted OK" fi _IF_BCP=$(ps aux | grep '[d]uplicity' | awk '{print $2}') if [ -z "${_IF_BCP}" ] \ && [ ! -e "/var/run/speed_cleanup.pid" ] \ && [ ! -e "/root/.giant_traffic.cnf" ]; then touch /var/run/speed_cleanup.pid echo " " >> /var/log/nginx/speed_cleanup.log sed -i "s/levels=2:2:2/levels=2:2/g" /var/aegir/config/server_master/nginx.conf service nginx reload &> /dev/null echo "speed_purge start `date`" >> /var/log/nginx/speed_cleanup.log nice -n19 ionice -c2 -n7 find /var/lib/nginx/speed/* -mtime +1 -exec rm -rf {} \; &> /dev/null echo "speed_purge complete `date`" >> /var/log/nginx/speed_cleanup.log service nginx reload &> /dev/null rm -f /var/run/speed_cleanup.pid fi touch /var/xdrago/log/graceful.done.pid } ###--------------------### _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} _CHECK_HOST=$(uname -n 2>&1) _VM_TEST=$(uname -a 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi if [ -e "/var/run/boa_run.pid" ] || [ -e "/root/.skip_cleanup.cnf" ]; then exit 0 else touch /var/run/boa_wait.pid sleep 60 action rm -f /var/run/boa_wait.pid exit 0 fi ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/guest-fire.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} second_flood_guard() { thisCountSec=`ps aux | grep -v "grep" | grep -v "null" | grep --count "/second.sh"` if [ ${thisCountSec} -gt "4" ]; then echo "$(date 2>&1) Too many ${thisCountSec} second.sh processes killed" >> \ /var/log/sec-count.kill.log kill -9 $(ps aux | grep '[s]econd.sh' | awk '{print $2}') &> /dev/null fi } [ ! -e "/var/run/boa_run.pid" ] && second_flood_guard csf_flood_guard() { thisCountCsf=`ps aux | grep -v "grep" | grep -v "null" | grep --count "/csf"` if [ ! -e "/var/run/boa_run.pid" ] && [ ${thisCountCsf} -gt "4" ]; then echo "$(date 2>&1) Too many ${thisCountCsf} csf processes killed" >> \ /var/log/csf-count.kill.log kill -9 $(ps aux | grep '[c]sf' | awk '{print $2}') &> /dev/null csf -tf wait csf -df wait fi thisCountFire=`ps aux | grep -v "grep" | grep -v "null" | grep --count "/fire.sh"` if [ ! -e "/var/run/boa_run.pid" ] && [ ${thisCountFire} -gt "9" ]; then echo "$(date 2>&1) Too many ${thisCountFire} fire.sh processes killed and rules purged" >> \ /var/log/fire-purge.kill.log csf -tf wait csf -df wait kill -9 $(ps aux | grep '[f]ire.sh' | awk '{print $2}') &> /dev/null elif [ ! -e "/var/run/boa_run.pid" ] && [ ${thisCountFire} -gt "7" ]; then echo "$(date 2>&1) Too many ${thisCountFire} fire.sh processes killed" >> \ /var/log/fire-count.kill.log csf -tf wait kill -9 $(ps aux | grep '[f]ire.sh' | awk '{print $2}') &> /dev/null fi } [ ! -e "/var/run/water.pid" ] && csf_flood_guard guest_guard() { if [ ! -e "/var/run/fire.pid" ] && [ ! -e "/var/run/water.pid" ]; then touch /var/run/fire.pid if [ -e "/var/xdrago/monitor/ssh.log" ]; then for _IP in `cat /var/xdrago/monitor/ssh.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 22" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi if [ -e "/var/xdrago/monitor/web.log" ]; then for _IP in `cat /var/xdrago/monitor/web.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 80" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi if [ -e "/var/xdrago/monitor/ftp.log" ]; then for _IP in `cat /var/xdrago/monitor/ftp.log | cut -d '#' -f1 | sort`; do _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 21" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else echo "Deny ${_IP} on ports 21,22,443,80 in the next 1h" csf -td ${_IP} 3600 -p 21 csf -td ${_IP} 3600 -p 22 csf -td ${_IP} 3600 -p 443 csf -td ${_IP} 3600 -p 80 fi done fi rm -f /var/run/fire.pid fi } if [ -e "/etc/csf/csf.deny" ] \ && [ ! -e "/var/run/water.pid" ] \ && [ -x "/usr/sbin/csf" ]; then [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard sleep 10 [ ! -e "/var/run/water.pid" ] && guest_guard rm -f /var/run/fire.pid fi exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/guest-water.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} whitelist_ip_pingdom() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing pingdom ips from csf.allow sed -i "s/.*pingdom.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s https://my.pingdom.com/probes/feed \ | grep '<pingdom:ip>' \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) echo _IPS pingdom list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow pingdom ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # pingdom ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_cloudflare() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing cloudflare ips from csf.allow sed -i "s/.*cloudflare.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s https://www.cloudflare.com/ips-v4 \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.\/]//g' \ | sort \ | uniq 2>&1) echo _IPS cloudflare list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow cloudflare ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # cloudflare ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_imperva() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing imperva ips from csf.allow sed -i "s/.*imperva.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS=$(curl -k -s --data "resp_format=text" https://my.imperva.com/api/integration/v1/ips \ | sed 's/.*::.*//g' \ | sed 's/[^0-9\.\/]//g' \ | sort \ | uniq 2>&1) echo _IPS imperva list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow imperva ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # imperva ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_googlebot() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing googlebot ips from csf.allow sed -i "s/.*googlebot.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="66.249.64.0/19" echo _IPS googlebot list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow googlebot ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # googlebot ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done sed -i "s/66.249..*//g" /etc/csf/csf.deny wait } whitelist_ip_microsoft() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing microsoft ips from csf.allow sed -i "s/.*microsoft.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="65.52.0.0/14 199.30.16.0/20" echo _IPS microsoft list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow microsoft ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # microsoft ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done sed -i "s/65.5.*//g" /etc/csf/csf.deny wait sed -i "s/199.30..*//g" /etc/csf/csf.deny wait } whitelist_ip_sucuri() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing sucuri ips from csf.allow sed -i "s/.*sucuri.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="192.88.134.0/23 185.93.228.0/22 66.248.200.0/22 208.109.0.0/22" echo _IPS sucuri list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow sucuri ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # sucuri ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_authzero() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing authzero ips from csf.allow sed -i "s/.*authzero.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait fi _IPS="35.167.77.121 35.166.202.113 35.160.3.103 54.183.64.135 54.67.77.38 54.67.15.170 54.183.204.205 35.171.156.124 18.233.90.226 3.211.189.167 52.28.56.226 52.28.45.240 52.16.224.164 52.16.193.66 34.253.4.94 52.50.106.250 52.211.56.181 52.213.38.246 52.213.74.69 52.213.216.142 35.156.51.163 35.157.221.52 52.28.184.187 52.28.212.16 52.29.176.99 52.57.230.214 54.76.184.103 52.210.122.50 52.208.95.174 52.210.122.50 52.208.95.174 54.76.184.103 52.64.84.177 52.64.111.197 54.153.131.0 13.210.52.131 13.55.232.24 13.54.254.182 52.62.91.160 52.63.36.78 52.64.120.184 54.66.205.24 54.79.46.4" echo _IPS authzero list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow authzero ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # authzero ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done } whitelist_ip_site24x7_extra() { _IPS="87.252.213.0/24 89.36.170.0/24 185.172.199.128/26 185.230.214.0/23 185.172.199.0/27" echo _IPS site24x7_extra list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow site24x7_extra ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # site24x7_extra ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done if [ -e "/root/.ignore.site24x7.firewall.cnf" ]; then for _IP in ${_IPS}; do echo checking csf.ignore site24x7_extra ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.ignore \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.ignore" echo "${_IP} # site24x7_extra ips" >> /etc/csf/csf.ignore else echo "${_IP} already listed in /etc/csf/csf.ignore" fi done fi } whitelist_ip_site24x7() { if [ ! -e "/root/.whitelist.dont.cleanup.cnf" ]; then echo removing site24x7 ips from csf.allow sed -i "s/.*site24x7.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow wait echo removing site24x7 ips from csf.ignore sed -i "s/.*site24x7.*//g" /etc/csf/csf.ignore wait sed -i "/^$/d" /etc/csf/csf.ignore wait fi _IPS=$(host site24x7.enduserexp.com 1.1.1.1 \ | grep 'has address' \ | cut -d ' ' -f4 \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) if [ -z "${_IPS}" ] \ || [[ ! "${_IPS}" =~ "104.236.16.22" ]] \ || [[ "${_IPS}" =~ "HINFO" ]]; then _IPS=$(dig site24x7.enduserexp.com \ | grep 'IN.*A' \ | cut -d 'A' -f2 \ | sed 's/[^0-9\.]//g' \ | sort \ | uniq 2>&1) fi echo _IPS site24x7 list.. echo ${_IPS} for _IP in ${_IPS}; do echo checking csf.allow site24x7 ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.allow \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.allow" echo "tcp|in|d=80|s=${_IP} # site24x7 ips" >> /etc/csf/csf.allow else echo "${_IP} already listed in /etc/csf/csf.allow" fi done if [ -e "/root/.ignore.site24x7.firewall.cnf" ]; then for _IP in ${_IPS}; do echo checking csf.ignore site24x7 ${_IP} now... _IP_CHECK=$(cat /etc/csf/csf.ignore \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep "${_IP}" 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /etc/csf/csf.ignore" echo "${_IP} # site24x7 ips" >> /etc/csf/csf.ignore else echo "${_IP} already listed in /etc/csf/csf.ignore" fi done fi if [ ! -e "/root/.whitelist.site24x7.cnf" ]; then csf -tf wait csf -df wait touch /root/.whitelist.site24x7.cnf fi } local_ip_rg() { if [ -e "/root/.local.IP.list" ]; then echo "the file /root/.local.IP.list already exists" for _IP in `hostname -I`; do _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ -z "${_IP_CHECK}" ]; then echo "${_IP} not yet listed in /root/.local.IP.list" echo "${_IP} # local IP address" >> /root/.local.IP.list else echo "${_IP} already listed in /root/.local.IP.list" fi done for _IP in `cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s"`; do echo removing ${_IP} from d/t firewall rules csf -ar ${_IP} &> /dev/null csf -dr ${_IP} &> /dev/null csf -tr ${_IP} &> /dev/null if [ ! -e "/root/.local.IP.csf.listed" ]; then echo removing ${_IP} from csf.ignore sed -i "s/^${_IP} .*//g" /etc/csf/csf.ignore wait echo removing ${_IP} from csf.allow sed -i "s/^${_IP} .*//g" /etc/csf/csf.allow wait echo adding ${_IP} to csf.ignore echo "${_IP} # local.IP.list" >> /etc/csf/csf.ignore wait echo adding ${_IP} to csf.allow echo "${_IP} # local.IP.list" >> /etc/csf/csf.allow wait fi done touch /root/.local.IP.csf.listed else echo "the file /root/.local.IP.list does not exist" rm -f /root/.tmp.IP.list* rm -f /root/.local.IP.list* for _IP in `hostname -I`;do echo ${_IP} >> /root/.tmp.IP.list;done for _IP in `cat /root/.tmp.IP.list \ | sort \ | uniq`;do echo "${_IP} # local IP address" >> /root/.local.IP.list;done rm -f /root/.tmp.IP.list* fi sed -i "/^$/d" /etc/csf/csf.ignore &> /dev/null wait sed -i "/^$/d" /etc/csf/csf.allow &> /dev/null wait } guard_stats() { if [ ! -e "${_HX}" ] && [ -e "${_HA}" ]; then mv -f ${_HA} ${_HX} fi if [ ! -e "${_WX}" ] && [ -e "${_WA}" ]; then mv -f ${_WA} ${_WX} fi if [ ! -e "${_FX}" ] && [ -e "${_FA}" ]; then mv -f ${_FA} ${_FX} fi if [ -e "${_HA}" ]; then for _IP in `cat ${_HA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${_HA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${_HA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 22" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force SSH Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force SSH Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi if [ -e "${_WA}" ]; then for _IP in `cat ${_WA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${_WA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${_WA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 80" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force Web Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force Web Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi if [ -e "${_FA}" ]; then for _IP in `cat ${_FA} | cut -d '#' -f1 | sort | uniq`; do _IP_RV= _NR_TEST="0" _NR_TEST=$(tr -s ' ' '\n' < ${_FA} | grep -c ${_IP} 2>&1) if [ -e "/root/.local.IP.list" ]; then _IP_CHECK=$(cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s" \ | grep ${_IP} 2>&1) if [ ! -z "${_IP_CHECK}" ]; then _NR_TEST="0" echo "${_IP} is a local IP address, ignoring ${_FA}" fi fi if [ ! -z "${_NR_TEST}" ] && [ "${_NR_TEST}" -ge "24" ]; then echo ${_IP} ${_NR_TEST} _FW_TEST= _FF_TEST= _FW_TEST=$(csf -g ${_IP} 2>&1) _FF_TEST=$(grep "=${_IP} " /etc/csf/csf.allow 2>&1) if [[ "${_FF_TEST}" =~ "${_IP}" ]] || [[ "${_FW_TEST}" =~ "DENY" ]] || [[ "${_FW_TEST}" =~ "ALLOW" ]]; then echo "${_IP} already denied or allowed on port 21" if [[ "${_FF_TEST}" =~ "${_IP}" ]]; then csf -dr ${_IP} csf -tr ${_IP} fi else _IP_RV=$(host -s ${_IP} 2>&1) if [ "${_NR_TEST}" -ge "64" ]; then echo "Deny ${_IP} permanently ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} do not delete Brute force FTP Server ${_NR_TEST} attacks ${_IP_RV} else echo "Deny ${_IP} until limits rotation ${_NR_TEST} ${_IP_RV}" csf -d ${_IP} Brute force FTP Server ${_NR_TEST} attacks ${_IP_RV} fi fi fi done fi } whitelist_ip_dns() { csf -tr 1.1.1.1 csf -tr 1.0.0.1 csf -dr 1.1.1.1 csf -dr 1.0.0.1 sed -i "s/.*1.1.1.1.*//g" /etc/csf/csf.allow sed -i "s/.*1.1.1.1.*//g" /etc/csf/csf.ignore sed -i "s/.*1.0.0.1.*//g" /etc/csf/csf.allow sed -i "s/.*1.0.0.1.*//g" /etc/csf/csf.ignore echo "tcp|out|d=53|d=1.1.1.1 # Cloudflare DNS" >> /etc/csf/csf.allow echo "tcp|out|d=53|d=1.0.0.1 # Cloudflare DNS" >> /etc/csf/csf.allow sed -i "s/.*8.8.8.8.*//g" /etc/csf/csf.allow sed -i "s/.*8.8.8.8.*//g" /etc/csf/csf.ignore sed -i "s/.*8.8.4.4.*//g" /etc/csf/csf.allow sed -i "s/.*8.8.4.4.*//g" /etc/csf/csf.ignore echo "tcp|out|d=53|d=8.8.8.8 # Google DNS" >> /etc/csf/csf.allow echo "tcp|out|d=53|d=8.8.4.4 # Google DNS" >> /etc/csf/csf.allow sed -i "/^$/d" /etc/csf/csf.ignore sed -i "/^$/d" /etc/csf/csf.allow } if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then if [ -e "/root/.local.IP.list" ]; then echo local dr/tr start `date` for _IP in `cat /root/.local.IP.list \ | cut -d '#' -f1 \ | sort \ | uniq \ | tr -d "\s"`; do csf -dr ${_IP} &> /dev/null csf -tr ${_IP} &> /dev/null done fi n=$((RANDOM%120+90)) touch /var/run/water.pid echo Waiting $n seconds... sleep $n whitelist_ip_dns whitelist_ip_pingdom whitelist_ip_cloudflare whitelist_ip_googlebot whitelist_ip_microsoft [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_imperva [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_sucuri [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_authzero [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_site24x7_extra [ -e "/root/.extended.firewall.exceptions.cnf" ] && whitelist_ip_site24x7 if [ -e "/root/.full.csf.cleanup.cnf" ]; then sed -i "s/.*do not delete.*//g" /etc/csf/csf.deny sed -i "/^$/d" /etc/csf/csf.deny fi kill -9 $(ps aux | grep '[C]onfigServer' | awk '{print $2}') &> /dev/null killall sleep &> /dev/null rm -f /etc/csf/csf.error service lfd restart wait csf -e wait csf -tf wait csf -q ### Linux kernel TCP SACK CVEs mitigation ### CVE-2019-11477 SACK Panic ### CVE-2019-11478 SACK Slowness ### CVE-2019-11479 Excess Resource Consumption Due to Low MSS Values if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then _SACK_TEST=$(ip6tables --list | grep tcpmss 2>&1) if [[ ! "${_SACK_TEST}" =~ "tcpmss" ]]; then sysctl net.ipv4.tcp_mtu_probing=0 &> /dev/null iptables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null ip6tables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null fi fi echo local start `date` local_ip_rg _HA=/var/xdrago/monitor/hackcheck.archive.log _HX=/var/xdrago/monitor/hackcheck.archive.x3.log _WA=/var/xdrago/monitor/scan_nginx.archive.log _WX=/var/xdrago/monitor/scan_nginx.archive.x3.log _FA=/var/xdrago/monitor/hackftp.archive.log _FX=/var/xdrago/monitor/hackftp.archive.x3.log echo guard start `date` guard_stats rm -f /var/xdrago/monitor/ssh.log rm -f /var/xdrago/monitor/web.log rm -f /var/xdrago/monitor/ftp.log kill -9 $(ps aux | grep '[C]onfigServer' | awk '{print $2}') &> /dev/null killall sleep &> /dev/null rm -f /etc/csf/csf.error service lfd restart wait sed -i "s/.*DHCP.*//g" /etc/csf/csf.allow wait sed -i "/^$/d" /etc/csf/csf.allow _DHCP_TEST=$(grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f13 | sort | uniq 2>&1) if [[ "${_DHCP_TEST}" =~ "port" ]]; then for _IP in `grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f12 | sort | uniq`;do echo "udp|out|d=67|d=${_IP} # Local DHCP out" >> /etc/csf/csf.allow;done else for _IP in `grep DHCPREQUEST /var/log/syslog | cut -d ' ' -f13 | sort | uniq`;do echo "udp|out|d=67|d=${_IP} # Local DHCP out" >> /etc/csf/csf.allow;done fi csf -e wait csf -q ### Linux kernel TCP SACK CVEs mitigation ### CVE-2019-11477 SACK Panic ### CVE-2019-11478 SACK Slowness ### CVE-2019-11479 Excess Resource Consumption Due to Low MSS Values if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then _SACK_TEST=$(ip6tables --list | grep tcpmss 2>&1) if [[ ! "${_SACK_TEST}" =~ "tcpmss" ]]; then sysctl net.ipv4.tcp_mtu_probing=0 &> /dev/null iptables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null ip6tables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null fi fi rm -f /var/run/water.pid echo guard fin `date` ntpdate pool.ntp.org else if [ -e "/root/.mstr.clstr.cnf" ] \ || [ -e "/root/.wbhd.clstr.cnf" ] \ || [ -e "/root/.dbhd.clstr.cnf" ]; then ntpdate pool.ntp.org fi fi exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/manage_ltd_users.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_tasks_maint.cnf" ]; then exit 0 fi os_detection_minimal() { _THIS_RV=$(lsb_release -sc 2>&1) if [ "${_THIS_RV}" = "chimaera" ] \ || [ "${_THIS_RV}" = "beowulf" ] \ || [ "${_THIS_RV}" = "bullseye" ] \ || [ "${_THIS_RV}" = "buster" ]; then _APT_UPDATE="apt-get update --allow-releaseinfo-change" else _APT_UPDATE="apt-get update" fi } os_detection_minimal apt_clean_update() { apt-get clean -qq 2> /dev/null rm -rf /var/lib/apt/lists/* &> /dev/null ${_APT_UPDATE} -qq 2> /dev/null } _X_SE="414prodT66" _CHECK_HOST=$(uname -n 2>&1) usrGroup=users _WEBG=www-data _THIS_RV=$(lsb_release -sc 2>&1) if [ "${_THIS_RV}" = "chimaera" ] \ || [ "${_THIS_RV}" = "beowulf" ] \ || [ "${_THIS_RV}" = "bullseye" ] \ || [ "${_THIS_RV}" = "buster" ] \ || [ "${_THIS_RV}" = "stretch" ] \ || [ "${_THIS_RV}" = "jessie" ] \ || [ "${_THIS_RV}" = "trusty" ] \ || [ "${_THIS_RV}" = "precise" ]; then _RUBY_VRN=3.1.2 else _RUBY_VRN=2.0.0 fi _VM_TEST=$(uname -a 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi if [ -x "/usr/bin/gpg2" ]; then _GPG=gpg2 else _GPG=gpg fi crlGet="-L --max-redirs 10 -k -s --retry 10 --retry-delay 5 -A iCab" aptYesUnth="-y --allow-unauthenticated" ###-------------SYSTEM-----------------### count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi } find_fast_mirror() { isNetc=$(which netcat 2>&1) if [ ! -x "${isNetc}" ] || [ -z "${isNetc}" ]; then if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi apt_clean_update apt-get install netcat ${aptYesUnth} &> /dev/null sleep 3 fi ffMirr=$(which ffmirror 2>&1) if [ -x "${ffMirr}" ]; then ffList="/var/backups/boa-mirrors-2023-01.txt" mkdir -p /var/backups if [ ! -e "${ffList}" ]; then echo "de.files.aegir.cc" > ${ffList} echo "ny.files.aegir.cc" >> ${ffList} echo "sg.files.aegir.cc" >> ${ffList} fi if [ -e "${ffList}" ]; then _CHECK_MIRROR=$(bash ${ffMirr} < ${ffList} 2>&1) _USE_MIR="${_CHECK_MIRROR}" [[ "${_USE_MIR}" =~ "printf" ]] && _USE_MIR="files.aegir.cc" else _USE_MIR="files.aegir.cc" fi else _USE_MIR="files.aegir.cc" fi urlDev="http://${_USE_MIR}/dev" urlHmr="http://${_USE_MIR}/versions/master/aegir" } extract_archive() { if [ ! -z "$1" ]; then case $1 in *.tar.bz2) tar xjf $1 ;; *.tar.gz) tar xzf $1 ;; *.tar.xz) tar xvf $1 ;; *.bz2) bunzip2 $1 ;; *.rar) unrar x $1 ;; *.gz) gunzip -q $1 ;; *.tar) tar xf $1 ;; *.tbz2) tar xjf $1 ;; *.tgz) tar xzf $1 ;; *.zip) unzip -qq $1 ;; *.Z) uncompress $1 ;; *.7z) 7z x $1 ;; *) echo "'$1' cannot be extracted via >extract<" ;; esac rm -f $1 fi } get_dev_ext() { if [ ! -z "$1" ]; then curl ${crlGet} "${urlDev}/HEAD/$1" -o "$1" if [ -e "$1" ]; then extract_archive "$1" else echo "OOPS: $1 failed download from ${urlDev}/HEAD/$1" fi fi } ###----------------------------### ## Manage ltd shell users ## ###----------------------------### # # Remove dangerous stuff from the string. sanitize_string() { echo "$1" | sed 's/[\\\/\^\?\>\`\#\"\{\(\$\@\&\|\*]//g; s/\(['"'"'\]\)//g' } # # Add ltd-shell group if not exists. add_ltd_group_if_not_exists() { _LTD_EXISTS=$(getent group ltd-shell 2>&1) if [[ "${_LTD_EXISTS}" =~ "ltd-shell" ]]; then _DO_NOTHING=YES else addgroup --system ltd-shell &> /dev/null fi } # # Add to rvm group if needed. if_add_to_rvm_group() { isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [ -d "/home/$1/" ]; then _ID_SHELLS=$(id -nG $1 2>&1) if [[ ! "${_ID_SHELLS}" =~ "rvm" ]]; then isRvm=$(which rvm 2>&1) if [ -x "${isRvm}" ]; then rvmPth="${isRvm}" elif [ -x "/usr/local/rvm/bin/rvm" ]; then rvmPth="/usr/local/rvm/bin/rvm" fi if [ -x "${rvmPth}" ]; then usermod -aG rvm $1 fi fi _ID_SHELLS="" fi } # # Enable chattr. enable_chattr() { isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [ -d "/home/$1/" ]; then if_add_to_rvm_group $1 _U_HD="/home/$1/.drush" _U_TP="/home/$1/.tmp" _U_II="${_U_HD}/php.ini" if [ ! -e "${_U_HD}/.ctrl.${_X_SE}.pid" ]; then if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then rm -rf ${_U_HD}/ else rm -f ${_U_HD}/{drush_make,registry_rebuild,clean_missing_modules} rm -f ${_U_HD}/{drupalgeddon,drush_ecl,make_local,safe_cache_form*} rm -f ${_U_HD}/usr/{drush_make,registry_rebuild,clean_missing_modules} rm -f ${_U_HD}/usr/{drupalgeddon,drush_ecl,make_local,safe_cache_form*} rm -f ${_U_HD}/usr/{mydropwizard,utf8mb4_convert} rm -f ${_U_HD}/.ctrl* rm -rf ${_U_HD}/{cache,drush.ini,*drushrc*,*.inc} fi mkdir -p ${_U_HD}/usr mkdir -p ${_U_TP} touch ${_U_TP} find ${_U_TP}/ -mtime +0 -exec rm -rf {} \; &> /dev/null chown $1:${usrGroup} ${_U_TP} chown $1:${usrGroup} ${_U_HD} chmod 02755 ${_U_TP} chmod 02755 ${_U_HD} if [ ! -L "${_U_HD}/usr/registry_rebuild" ] \ && [ -e "${dscUsr}/.drush/usr/registry_rebuild" ]; then ln -sf ${dscUsr}/.drush/usr/registry_rebuild \ ${_U_HD}/usr/registry_rebuild fi if [ ! -L "${_U_HD}/usr/clean_missing_modules" ] \ && [ -e "${dscUsr}/.drush/usr/clean_missing_modules" ]; then ln -sf ${dscUsr}/.drush/usr/clean_missing_modules \ ${_U_HD}/usr/clean_missing_modules fi if [ ! -L "${_U_HD}/usr/drupalgeddon" ] \ && [ -e "${dscUsr}/.drush/usr/drupalgeddon" ]; then ln -sf ${dscUsr}/.drush/usr/drupalgeddon \ ${_U_HD}/usr/drupalgeddon fi if [ ! -L "${_U_HD}/usr/drush_ecl" ] \ && [ -e "${dscUsr}/.drush/usr/drush_ecl" ]; then ln -sf ${dscUsr}/.drush/usr/drush_ecl \ ${_U_HD}/usr/drush_ecl fi if [ ! -L "${_U_HD}/usr/safe_cache_form_clear" ] \ && [ -e "${dscUsr}/.drush/usr/safe_cache_form_clear" ]; then ln -sf ${dscUsr}/.drush/usr/safe_cache_form_clear \ ${_U_HD}/usr/safe_cache_form_clear fi if [ ! -L "${_U_HD}/usr/utf8mb4_convert" ] \ && [ -e "${dscUsr}/.drush/usr/utf8mb4_convert" ]; then ln -sf ${dscUsr}/.drush/usr/utf8mb4_convert \ ${_U_HD}/usr/utf8mb4_convert fi fi _CHECK_USE_PHP_CLI=$(grep "/opt/php" \ ${dscUsr}/tools/drush/drush.php 2>&1) _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [[ "${_CHECK_USE_PHP_CLI}" =~ "php${e}" ]] \ && [ ! -e "${_U_HD}/.ctrl.php${e}.${_X_SE}.pid" ]; then _PHP_CLI_UPDATE=YES fi done echo _PHP_CLI_UPDATE is ${_PHP_CLI_UPDATE} for $1 if [ "${_PHP_CLI_UPDATE}" = "YES" ] \ || [ ! -e "${_U_II}" ] \ || [ ! -e "${_U_HD}/.ctrl.${_X_SE}.pid" ]; then mkdir -p ${_U_HD} rm -f ${_U_HD}/.ctrl.php* rm -f ${_U_II} if [ ! -z "${_T_CLI_VRN}" ]; then _USE_PHP_CLI="${_T_CLI_VRN}" echo "_USE_PHP_CLI is ${_USE_PHP_CLI} for $1 at ${_USER} WTF" echo "_T_CLI_VRN is ${_T_CLI_VRN}" else _CHECK_USE_PHP_CLI=$(grep "/opt/php" \ ${dscUsr}/tools/drush/drush.php 2>&1) echo "_CHECK_USE_PHP_CLI is ${_CHECK_USE_PHP_CLI} for $1 at ${_USER}" if [[ "${_CHECK_USE_PHP_CLI}" =~ "php82" ]]; then _USE_PHP_CLI=8.2 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php81" ]]; then _USE_PHP_CLI=8.1 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php80" ]]; then _USE_PHP_CLI=8.0 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php74" ]]; then _USE_PHP_CLI=7.4 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php73" ]]; then _USE_PHP_CLI=7.3 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php72" ]]; then _USE_PHP_CLI=7.2 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php71" ]]; then _USE_PHP_CLI=7.1 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php70" ]]; then _USE_PHP_CLI=7.0 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php56" ]]; then _USE_PHP_CLI=5.6 fi fi echo _USE_PHP_CLI is ${_USE_PHP_CLI} for $1 if [ "${_USE_PHP_CLI}" = "8.2" ]; then cp -af /opt/php82/lib/php.ini ${_U_II} _U_INI=82 elif [ "${_USE_PHP_CLI}" = "8.1" ]; then cp -af /opt/php81/lib/php.ini ${_U_II} _U_INI=81 elif [ "${_USE_PHP_CLI}" = "8.0" ]; then cp -af /opt/php80/lib/php.ini ${_U_II} _U_INI=80 elif [ "${_USE_PHP_CLI}" = "7.4" ]; then cp -af /opt/php74/lib/php.ini ${_U_II} _U_INI=74 elif [ "${_USE_PHP_CLI}" = "7.3" ]; then cp -af /opt/php73/lib/php.ini ${_U_II} _U_INI=73 elif [ "${_USE_PHP_CLI}" = "7.2" ]; then cp -af /opt/php72/lib/php.ini ${_U_II} _U_INI=72 elif [ "${_USE_PHP_CLI}" = "7.1" ]; then cp -af /opt/php71/lib/php.ini ${_U_II} _U_INI=71 elif [ "${_USE_PHP_CLI}" = "7.0" ]; then cp -af /opt/php70/lib/php.ini ${_U_II} _U_INI=70 elif [ "${_USE_PHP_CLI}" = "5.6" ]; then cp -af /opt/php56/lib/php.ini ${_U_II} _U_INI=56 fi if [ -e "${_U_II}" ]; then _INI="open_basedir = \".: \ /data/all: \ /data/conf: \ /data/disk/all: \ /home/$1: \ /opt/php56: \ /opt/php70: \ /opt/php71: \ /opt/php72: \ /opt/php73: \ /opt/php74: \ /opt/php80: \ /opt/php81: \ /opt/php82: \ /opt/tika: \ /opt/tika7: \ /opt/tika8: \ /opt/tika9: \ /dev/urandom: \ /opt/tools/drush: \ /usr/bin: \ /usr/local/bin: \ ${dscUsr}/.drush/usr: \ ${dscUsr}/distro: \ ${dscUsr}/platforms: \ ${dscUsr}/static\"" _INI=$(echo "${_INI}" | sed "s/ //g" 2>&1) _INI=$(echo "${_INI}" | sed "s/open_basedir=/open_basedir = /g" 2>&1) _INI=${_INI//\//\\\/} _QTP=${_U_TP//\//\\\/} sed -i "s/.*open_basedir =.*/${_INI}/g" ${_U_II} wait sed -i "s/.*error_reporting =.*/error_reporting = 1/g" ${_U_II} wait sed -i "s/.*session.save_path =.*/session.save_path = ${_QTP}/g" ${_U_II} wait sed -i "s/.*soap.wsdl_cache_dir =.*/soap.wsdl_cache_dir = ${_QTP}/g" ${_U_II} wait sed -i "s/.*sys_temp_dir =.*/sys_temp_dir = ${_QTP}/g" ${_U_II} wait sed -i "s/.*upload_tmp_dir =.*/upload_tmp_dir = ${_QTP}/g" ${_U_II} wait echo > ${_U_HD}/.ctrl.php${_U_INI}.${_X_SE}.pid echo > ${_U_HD}/.ctrl.${_X_SE}.pid fi fi UQ="$1" if [ -f "${dscUsr}/static/control/compass.info" ]; then if [ -d "/home/${UQ}/.rvm/src" ]; then rm -rf /home/${UQ}/.rvm/src/* fi if [ -d "/home/${UQ}/.rvm/archives" ]; then rm -rf /home/${UQ}/.rvm/archives/* fi if [ -d "/home/${UQ}/.rvm/log" ]; then rm -rf /home/${UQ}/.rvm/log/* fi if [ ! -x "/home/${UQ}/.rvm/bin/rvm" ]; then touch /var/run/manage_rvm_users.pid if [ -d "/usr/local/rvm" ]; then mv -f /usr/local/rvm /usr/local/.off_rvm fi if [ -x "/bin/websh" ] && [ -L "/bin/sh" ]; then _WEB_SH=$(readlink -n /bin/sh 2>&1) _WEB_SH=$(echo -n ${_WEB_SH} | tr -d "\n" 2>&1) if [ -x "/bin/dash" ]; then if [ "${_WEB_SH}" != "/bin/dash" ]; then rm -f /bin/sh ln -s /bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/dash /usr/bin/sh fi fi elif [ -x "/usr/bin/dash" ]; then if [ "${_WEB_SH}" != "/usr/bin/dash" ]; then rm -f /bin/sh ln -s /usr/bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/dash /usr/bin/sh fi fi elif [ -x "/bin/bash" ]; then if [ "${_WEB_SH}" != "/bin/bash" ]; then rm -f /bin/sh ln -s /bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/bash /usr/bin/sh fi fi elif [ -x "/usr/bin/bash" ]; then if [ "${_WEB_SH}" != "/usr/bin/bash" ]; then rm -f /bin/sh ln -s /usr/bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/bash /usr/bin/sh fi fi fi fi su -s /bin/bash - ${UQ} -c "\curl -k -sSL ${urlDev}/mpapis.asc | ${_GPG} --import -" su -s /bin/bash - ${UQ} -c "\curl -k -sSL ${urlDev}/pkuczynski.asc | ${_GPG} --import -" su -s /bin/bash ${UQ} -c "\curl -k -sSL ${urlHmr}/helpers/rvm-installer.sh | bash -s stable" su -s /bin/bash - ${UQ} -c "rvm get stable --auto-dotfiles" su -s /bin/bash - ${UQ} -c "echo rvm_autoupdate_flag=0 > ~/.rvmrc" wait su -s /bin/bash - ${UQ} -c "echo rvm_silence_path_mismatch_check_flag=1 >> ~/.rvmrc" rm -f /var/run/manage_rvm_users.pid if [ -d "/usr/local/.off_rvm" ]; then mv -f /usr/local/.off_rvm /usr/local/rvm fi rm -f /bin/sh ln -s /bin/websh /bin/sh rm -f /usr/bin/sh ln -s /bin/websh /usr/bin/sh fi su -s /bin/bash - ${UQ} -c "echo rvm_autoupdate_flag=0 > ~/.rvmrc" wait su -s /bin/bash - ${UQ} -c "echo rvm_silence_path_mismatch_check_flag=1 >> ~/.rvmrc" if [ ! -e "/home/${UQ}/.rvm/rubies/default" ]; then touch /var/run/manage_rvm_users.pid if [ -d "/usr/local/rvm" ]; then mv -f /usr/local/rvm /usr/local/.off_rvm fi if [ -x "/bin/websh" ] && [ -L "/bin/sh" ]; then _WEB_SH=$(readlink -n /bin/sh 2>&1) _WEB_SH=$(echo -n ${_WEB_SH} | tr -d "\n" 2>&1) if [ -x "/bin/dash" ]; then if [ "${_WEB_SH}" != "/bin/dash" ]; then rm -f /bin/sh ln -s /bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/dash /usr/bin/sh fi fi elif [ -x "/usr/bin/dash" ]; then if [ "${_WEB_SH}" != "/usr/bin/dash" ]; then rm -f /bin/sh ln -s /usr/bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/dash /usr/bin/sh fi fi elif [ -x "/bin/bash" ]; then if [ "${_WEB_SH}" != "/bin/bash" ]; then rm -f /bin/sh ln -s /bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/bash /usr/bin/sh fi fi elif [ -x "/usr/bin/bash" ]; then if [ "${_WEB_SH}" != "/usr/bin/bash" ]; then rm -f /bin/sh ln -s /usr/bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/bash /usr/bin/sh fi fi fi fi su -s /bin/bash - ${UQ} -c "rvm install ${_RUBY_VRN}" su -s /bin/bash - ${UQ} -c "rvm use ${_RUBY_VRN} --default" rm -f /var/run/manage_rvm_users.pid if [ -d "/usr/local/.off_rvm" ]; then mv -f /usr/local/.off_rvm /usr/local/rvm fi rm -f /bin/sh ln -s /bin/websh /bin/sh rm -f /usr/bin/sh ln -s /bin/websh /usr/bin/sh fi if [ ! -f "${dscUsr}/log/.gems.build.d.${UQ}.${_X_SE}.txt" ]; then rm -f ${dscUsr}/log/eventmachine* if [ -x "/bin/websh" ] && [ -L "/bin/sh" ]; then _WEB_SH=$(readlink -n /bin/sh 2>&1) _WEB_SH=$(echo -n ${_WEB_SH} | tr -d "\n" 2>&1) if [ -x "/bin/dash" ]; then if [ "${_WEB_SH}" != "/bin/dash" ]; then rm -f /bin/sh ln -s /bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/dash /usr/bin/sh fi fi elif [ -x "/usr/bin/dash" ]; then if [ "${_WEB_SH}" != "/usr/bin/dash" ]; then rm -f /bin/sh ln -s /usr/bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/dash /usr/bin/sh fi fi elif [ -x "/bin/bash" ]; then if [ "${_WEB_SH}" != "/bin/bash" ]; then rm -f /bin/sh ln -s /bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/bash /usr/bin/sh fi fi elif [ -x "/usr/bin/bash" ]; then if [ "${_WEB_SH}" != "/usr/bin/bash" ]; then rm -f /bin/sh ln -s /usr/bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/bash /usr/bin/sh fi fi fi fi touch /var/run/manage_rvm_users.pid if [ -d "/usr/local/rvm" ]; then mv -f /usr/local/rvm /usr/local/.off_rvm fi su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative bluecloth" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative eventmachine" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --version 1.0.3 eventmachine" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative ffi" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --version 1.9.3 ffi" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --version 1.9.18 ffi" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative hitimes" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative http_parser.rb" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative oily_png" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --version 1.1.1 oily_png" &> /dev/null su -s /bin/bash - ${UQ} -c "rvm all do gem install --conservative yajl-ruby" &> /dev/null touch ${dscUsr}/log/.gems.build.d.${UQ}.${_X_SE}.txt rm -f /var/run/manage_rvm_users.pid if [ -d "/usr/local/.off_rvm" ]; then mv -f /usr/local/.off_rvm /usr/local/rvm fi rm -f /bin/sh ln -s /bin/websh /bin/sh rm -f /usr/bin/sh ln -s /bin/websh /usr/bin/sh fi if [ -d "/home/${UQ}/.rvm/src" ]; then rm -rf /home/${UQ}/.rvm/src/* fi if [ -d "/home/${UQ}/.rvm/archives" ]; then rm -rf /home/${UQ}/.rvm/archives/* fi if [ -d "/home/${UQ}/.rvm/log" ]; then rm -rf /home/${UQ}/.rvm/log/* fi if [ ! -d "/home/${UQ}/.npm-packages" ] || [ ! -d "/home/${UQ}/.npm" ]; then su -s /bin/bash - ${UQ} -c "mkdir ~/.bundle" su -s /bin/bash - ${UQ} -c "mkdir ~/.composer" su -s /bin/bash - ${UQ} -c "mkdir ~/.config" su -s /bin/bash - ${UQ} -c "mkdir ~/.npm-packages" su -s /bin/bash - ${UQ} -c "mkdir ~/.npm" su -s /bin/bash - ${UQ} -c "mkdir ~/.sass-cache" su -s /bin/bash - ${UQ} -c "echo prefix = /home/${UQ}/.npm-packages > ~/.npmrc" fi rm -f /home/${UQ}/{.profile,.bash_logout,.bash_profile,.bashrc,.zlogin,.zshrc} rm -f /home/${UQ}/.rvm/scripts/notes else if [ -d "/home/${UQ}/.rvm" ] || [ -d "/home/${UQ}/.gem" ]; then rm -f ${dscUsr}/log/.gems.build* rm -rf /home/${UQ}/.rvm &> /dev/null rm -rf /home/${UQ}/.gem &> /dev/null fi if [ -d "/home/${UQ}/.npm" ] || [ -d "/home/${UQ}/.npm-packages" ]; then rm -f /home/${UQ}/.npmrc rm -rf /home/${UQ}/.npm &> /dev/null rm -rf /home/${UQ}/.npm-packages &> /dev/null fi fi if [ "$1" != "${_USER}.ftp" ]; then if [ -d "/home/$1/" ]; then chattr +i /home/$1/ fi else if [ -d "/home/$1/platforms/" ]; then chattr +i /home/$1/platforms/ chattr +i /home/$1/platforms/* &> /dev/null fi fi if [ -d "/home/$1/.drush/" ]; then chattr +i /home/$1/.drush/ fi if [ -d "/home/$1/.drush/usr/" ]; then chattr +i /home/$1/.drush/usr/ fi if [ -f "/home/$1/.drush/php.ini" ]; then chattr +i /home/$1/.drush/*.ini fi if [ -d "/home/$1/.bazaar/" ]; then chattr +i /home/$1/.bazaar/ fi fi } # # Disable chattr. disable_chattr() { isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [ -d "/home/$1/" ]; then if [ "$1" != "${_USER}.ftp" ]; then if [ -d "/home/$1/" ]; then chattr -i /home/$1/ fi else if [ -d "/home/$1/platforms/" ]; then chattr -i /home/$1/platforms/ chattr -i /home/$1/platforms/* &> /dev/null fi fi if [ -d "/home/$1/.drush/" ]; then chattr -i /home/$1/.drush/ fi if [ -d "/home/$1/.drush/usr/" ]; then chattr -i /home/$1/.drush/usr/ fi if [ -f "/home/$1/.drush/php.ini" ]; then chattr -i /home/$1/.drush/*.ini fi if [ -d "/home/$1/.bazaar/" ]; then chattr -i /home/$1/.bazaar/ fi fi } # # Kill zombies. kill_zombies() { for Existing in `cat /etc/passwd | cut -d ':' -f1 | sort`; do _SEC_IDY=$(id -nG ${Existing} 2>&1) if [[ "${_SEC_IDY}" =~ "ltd-shell" ]] \ && [ ! -z "${Existing}" ] \ && [[ ! "${Existing}" =~ ".ftp"($) ]] \ && [[ ! "${Existing}" =~ ".web"($) ]]; then usrParent=$(echo ${Existing} | cut -d. -f1 | awk '{ print $1}' 2>&1) usrParentTest=${usrParent//[^a-z0-9]/} if [ ! -z "${usrParentTest}" ]; then _PAR_DIR="/data/disk/${usrParent}/clients" _SEC_SYM="/home/${Existing}/sites" _SEC_DIR=$(readlink -n ${_SEC_SYM} 2>&1) _SEC_DIR=$(echo -n ${_SEC_DIR} | tr -d "\n" 2>&1) if [ ! -L "${_SEC_SYM}" ] || [ ! -e "${_SEC_DIR}" ] \ || [ ! -e "/home/${usrParent}.ftp/users/${Existing}" ]; then mkdir -p /var/backups/zombie/deleted/${_NOW} kill -9 $(ps aux | grep '[g]pg-agent' | awk '{print $2}') &> /dev/null disable_chattr ${Existing} deluser \ --remove-home \ --backup-to /var/backups/zombie/deleted/${_NOW} ${Existing} &> /dev/null rm -f /home/${usrParent}.ftp/users/${Existing} echo Zombie from etc.passwd ${Existing} killed echo fi fi fi done for Existing in `ls /home | cut -d '/' -f1 | sort`; do isTest=${Existing//[^a-z0-9]/} if [ ! -z "${isTest}" ]; then _SEC_IDY=$(id -nG ${Existing} 2>&1) if [[ "${_SEC_IDY}" =~ "No such user" ]] \ && [ ! -z "${Existing}" ] \ && [[ ! "${Existing}" =~ ".ftp"($) ]] \ && [[ ! "${Existing}" =~ ".web"($) ]]; then disable_chattr ${Existing} mkdir -p /var/backups/zombie/deleted/${_NOW} mv /home/${Existing} /var/backups/zombie/deleted/${_NOW}/.leftover-${Existing} usrParent=$(echo ${Existing} | cut -d. -f1 | awk '{ print $1}' 2>&1) if [ -e "/home/${usrParent}.ftp/users/${Existing}" ]; then rm -f /home/${usrParent}.ftp/users/${Existing} fi echo Zombie from home.dir ${Existing} killed echo fi fi done } # # Fix dot dirs. fix_dot_dirs() { usrLtdTest=${usrLtd//[^a-z0-9]/} if [ ! -z "${usrLtdTest}" ]; then usrTmp="/home/${usrLtd}/.tmp" if [ ! -d "${usrTmp}" ]; then mkdir -p ${usrTmp} chown ${usrLtd}:${usrGroup} ${usrTmp} chmod 02755 ${usrTmp} fi usrLftp="/home/${usrLtd}/.lftp" if [ ! -d "${usrLftp}" ]; then mkdir -p ${usrLftp} chown ${usrLtd}:${usrGroup} ${usrLftp} chmod 02755 ${usrLftp} fi usrLhist="/home/${usrLtd}/.lhistory" if [ ! -e "${usrLhist}" ]; then touch ${usrLhist} chown ${usrLtd}:${usrGroup} ${usrLhist} chmod 644 ${usrLhist} fi usrDrush="/home/${usrLtd}/.drush" if [ ! -d "${usrDrush}" ]; then mkdir -p ${usrDrush} chown ${usrLtd}:${usrGroup} ${usrDrush} chmod 700 ${usrDrush} fi usrSsh="/home/${usrLtd}/.ssh" if [ ! -d "${usrSsh}" ]; then mkdir -p ${usrSsh} chown -R ${usrLtd}:${usrGroup} ${usrSsh} chmod 700 ${usrSsh} fi chmod 600 ${usrSsh}/id_{r,d}sa &> /dev/null chmod 600 ${usrSsh}/known_hosts &> /dev/null usrBzr="/home/${usrLtd}/.bazaar" if [ -x "/usr/local/bin/bzr" ]; then if [ ! -z "${usrLtd}" ] && [ ! -e "${usrBzr}/bazaar.conf" ]; then mkdir -p ${usrBzr} echo ignore_missing_extensions=True > ${usrBzr}/bazaar.conf chown -R ${usrLtd}:${usrGroup} ${usrBzr} chmod 700 ${usrBzr} fi else if [ ! -z "${usrLtd}" ] && [ -d "${usrBzr}" ]; then rm -rf ${usrBzr} fi fi fi } # # Manage Drush Aliases. manage_sec_user_drush_aliases() { if [ -e "${Client}" ]; then if [ -L "${usrLtdRoot}/sites" ]; then symTgt=$(readlink -n ${usrLtdRoot}/sites 2>&1) symTgt=$(echo -n ${symTgt} | tr -d "\n" 2>&1) else rm -f ${usrLtdRoot}/sites fi if [ "${symTgt}" != "${Client}" ] \ || [ ! -e "${usrLtdRoot}/sites" ]; then rm -f ${usrLtdRoot}/sites ln -sf ${Client} ${usrLtdRoot}/sites fi fi if [ ! -e "${usrLtdRoot}/.drush" ]; then mkdir -p ${usrLtdRoot}/.drush fi for Alias in `find ${usrLtdRoot}/.drush/*.alias.drushrc.php \ -maxdepth 1 -type f | sort`; do AliasName=$(echo "$Alias" | cut -d'/' -f5 | awk '{ print $1}' 2>&1) AliasName=$(echo "${AliasName}" \ | sed "s/.alias.drushrc.php//g" \ | awk '{ print $1}' 2>&1) if [ ! -z "${AliasName}" ] \ && [ ! -e "${usrLtdRoot}/sites/${AliasName}" ]; then rm -f ${usrLtdRoot}/.drush/${AliasName}.alias.drushrc.php fi done for Symlink in `find ${usrLtdRoot}/sites/ \ -maxdepth 1 -mindepth 1 | sort`; do SiteName=$(echo $Symlink \ | cut -d'/' -f5 \ | awk '{ print $1}' 2>&1) pthAliasMain="${pthParentUsr}/.drush/${SiteName}.alias.drushrc.php" pthAliasCopy="${usrLtdRoot}/.drush/${SiteName}.alias.drushrc.php" if [ ! -z "$SiteName" ] && [ ! -e "${pthAliasCopy}" ]; then cp -af ${pthAliasMain} ${pthAliasCopy} chmod 440 ${pthAliasCopy} elif [ ! -z "$SiteName" ] && [ -e "${pthAliasCopy}" ]; then _DIFF_T=$(diff -w -B ${pthAliasCopy} ${pthAliasMain} 2>&1) if [ ! -z "${_DIFF_T}" ]; then cp -af ${pthAliasMain} ${pthAliasCopy} chmod 440 ${pthAliasCopy} fi fi done } # # OK, create user. ok_create_user() { usrLtdTest=${usrLtd//[^a-z0-9]/} if [ ! -z "${usrLtdTest}" ]; then _ADMIN="${_USER}.ftp" echo "_ADMIN is == ${_ADMIN} == at ok_create_user" usrLtdRoot="/home/${usrLtd}" _SEC_SYM="${usrLtdRoot}/sites" _TMP="/var/tmp" if [ ! -L "${_SEC_SYM}" ]; then mkdir -p /var/backups/zombie/deleted/${_NOW} mv -f ${usrLtdRoot} /var/backups/zombie/deleted/${_NOW}/ &> /dev/null fi if [ ! -d "${usrLtdRoot}" ]; then if [ -e "/usr/bin/mysecureshell" ] && [ -e "/etc/ssh/sftp_config" ]; then useradd -d ${usrLtdRoot} -s /usr/bin/mysecureshell -m -N -r ${usrLtd} echo "usrLtdRoot is == ${usrLtdRoot} == at ok_create_user" else useradd -d ${usrLtdRoot} -s /usr/bin/lshell -m -N -r ${usrLtd} fi adduser ${usrLtd} ${_WEBG} _ESC_LUPASS="" _LEN_LUPASS=0 if [ "${_STRONG_PASSWORDS}" = "YES" ] ; then _PWD_CHARS=32 elif [ "${_STRONG_PASSWORDS}" = "NO" ]; then _PWD_CHARS=8 else _STRONG_PASSWORDS=${_STRONG_PASSWORDS//[^0-9]/} if [ ! -z "${_STRONG_PASSWORDS}" ] \ && [ "${_STRONG_PASSWORDS}" -gt "8" ]; then _PWD_CHARS="${_STRONG_PASSWORDS}" else _PWD_CHARS=8 fi if [ ! -z "${_PWD_CHARS}" ] && [ "${_PWD_CHARS}" -gt "128" ]; then _PWD_CHARS=128 fi fi if [ "${_STRONG_PASSWORDS}" = "YES" ] || [ "${_PWD_CHARS}" -gt "8" ]; then _ESC_LUPASS=$(randpass "${_PWD_CHARS}" alnum 2>&1) _ESC_LUPASS=$(echo -n "${_ESC_LUPASS}" | tr -d "\n" 2>&1) _LEN_LUPASS=$(echo ${#_ESC_LUPASS} 2>&1) fi if [ -z "${_ESC_LUPASS}" ] || [ "${_LEN_LUPASS}" -lt "9" ]; then _ESC_LUPASS=$(shuf -zer -n19 {A..Z} {a..z} {0..9} | tr -d '\0' 2>&1) _ESC_LUPASS=$(echo -n "${_ESC_LUPASS}" | tr -d "\n" 2>&1) _ESC_LUPASS=$(sanitize_string "${_ESC_LUPASS}" 2>&1) fi ph=$(mkpasswd -m sha-512 "${_ESC_LUPASS}" \ $(openssl rand -base64 16 | tr -d '+=' | head -c 16) 2>&1) usermod -p $ph ${usrLtd} passwd -w 7 -x 90 ${usrLtd} usermod -aG lshellg ${usrLtd} usermod -aG ltd-shell ${usrLtd} isRvm=$(which rvm 2>&1) if [ -x "${isRvm}" ]; then rvmPth="${isRvm}" elif [ -x "/usr/local/rvm/bin/rvm" ]; then rvmPth="/usr/local/rvm/bin/rvm" fi if [ -x "${rvmPth}" ]; then usermod -aG rvm ${usrLtd} fi fi if [ ! -e "/home/${_ADMIN}/users/${usrLtd}" ] \ && [ ! -z "${_ESC_LUPASS}" ]; then if [ -e "/usr/bin/mysecureshell" ] \ && [ -e "/etc/ssh/sftp_config" ]; then chsh -s /usr/bin/mysecureshell ${usrLtd} else chsh -s /usr/bin/lshell ${usrLtd} fi echo >> ${_THIS_LTD_CONF} echo "[${usrLtd}]" >> ${_THIS_LTD_CONF} echo "path : [${_ALLD_DIR}]" >> ${_THIS_LTD_CONF} chmod 700 ${usrLtdRoot} mkdir -p /home/${_ADMIN}/users echo "${_ESC_LUPASS}" > /home/${_ADMIN}/users/${usrLtd} fi fix_dot_dirs rm -f ${usrLtdRoot}/{.profile,.bash_logout,.bash_profile,.bashrc} fi } # # OK, update user. ok_update_user() { usrLtdTest=${usrLtd//[^a-z0-9]/} if [ ! -z "${usrLtdTest}" ]; then _ADMIN="${_USER}.ftp" usrLtdRoot="/home/${usrLtd}" if [ -e "/home/${_ADMIN}/users/${usrLtd}" ]; then echo >> ${_THIS_LTD_CONF} echo "[${usrLtd}]" >> ${_THIS_LTD_CONF} echo "path : [${_ALLD_DIR}]" >> ${_THIS_LTD_CONF} manage_sec_user_drush_aliases chmod 700 ${usrLtdRoot} fi fix_dot_dirs rm -f ${usrLtdRoot}/{.profile,.bash_logout,.bash_profile,.bashrc} fi } # # Add user if not exists. add_user_if_not_exists() { usrLtdTest=${usrLtd//[^a-z0-9]/} if [ ! -z "${usrLtdTest}" ]; then _ID_EXISTS=$(getent passwd ${usrLtd} 2>&1) _ID_SHELLS=$(id -nG ${usrLtd} 2>&1) echo "_ID_EXISTS is == ${_ID_EXISTS} == at add_user_if_not_exists" echo "_ID_SHELLS is == ${_ID_SHELLS} == at add_user_if_not_exists" if [ -z "${_ID_EXISTS}" ]; then echo "We will create user == ${usrLtd} ==" ok_create_user manage_sec_user_drush_aliases enable_chattr ${usrLtd} elif [[ "${_ID_EXISTS}" =~ "${usrLtd}" ]] \ && [[ "${_ID_SHELLS}" =~ "ltd-shell" ]]; then echo "We will update user == ${usrLtd} ==" disable_chattr ${usrLtd} rm -rf /home/${usrLtd}/drush-backups usrTmp="/home/${usrLtd}/.tmp" if [ ! -d "${usrTmp}" ]; then mkdir -p ${usrTmp} chown ${usrLtd}:${usrGroup} ${usrTmp} chmod 02755 ${usrTmp} fi find ${usrTmp} -mtime +0 -exec rm -rf {} \; &> /dev/null ok_update_user enable_chattr ${usrLtd} fi fi } # # Manage Access Paths. manage_sec_access_paths() { #for Domain in `find ${Client}/ -maxdepth 1 -mindepth 1 -type l -printf %P\\n | sort` for Domain in `find ${Client}/ -maxdepth 1 -mindepth 1 -type l | sort`; do _PATH_DOM=$(readlink -n ${Domain} 2>&1) _PATH_DOM=$(echo -n ${_PATH_DOM} | tr -d "\n" 2>&1) _ALLD_DIR="${_ALLD_DIR}, '${_PATH_DOM}'" if [ -e "${_PATH_DOM}" ]; then _ALLD_NUM=$(( _ALLD_NUM += 1 )) fi echo Done for ${Domain} at ${Client} done } # # Manage Secondary Users. manage_sec() { for Client in `find ${pthParentUsr}/clients/ -maxdepth 1 -mindepth 1 -type d | sort`; do usrLtd=$(echo ${Client} | cut -d'/' -f6 | awk '{ print $1}' 2>&1) usrLtd=${usrLtd//[^a-zA-Z0-9]/} usrLtd=$(echo -n ${usrLtd} | tr A-Z a-z 2>&1) if [ ! -z "${usrLtd}" ]; then usrLtd="${_USER}.${usrLtd}" echo "usrLtd is == ${usrLtd} == at manage_sec" _ALLD_NUM="0" _ALLD_CTL="1" _ALLD_DIR="'${Client}'" cd ${Client} manage_sec_access_paths #_ALLD_DIR="${_ALLD_DIR}, '/home/${usrLtd}'" if [ "${_ALLD_NUM}" -ge "${_ALLD_CTL}" ]; then add_user_if_not_exists echo Done for ${Client} at ${pthParentUsr} else echo Empty ${Client} at ${pthParentUsr} - deleting now if [ -e "${Client}" ]; then rmdir ${Client} fi fi fi done } # # Update local INI for PHP CLI on the Aegir Satellite Instance. php_cli_local_ini_update() { _U_HD="${dscUsr}/.drush" _U_TP="${dscUsr}/.tmp" _U_II="${_U_HD}/php.ini" _PHP_CLI_UPDATE=NO _CHECK_USE_PHP_CLI=$(grep "/opt/php" ${_DRUSH_FILE} 2>&1) _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [[ "${_CHECK_USE_PHP_CLI}" =~ "php${e}" ]] \ && [ ! -e "${_U_HD}/.ctrl.php${e}.${_X_SE}.pid" ]; then _PHP_CLI_UPDATE=YES fi done if [ "${_PHP_CLI_UPDATE}" = "YES" ] \ || [ ! -e "${_U_II}" ] \ || [ ! -d "${_U_TP}" ] \ || [ ! -e "${_U_HD}/.ctrl.${_X_SE}.pid" ]; then mkdir -p ${_U_TP} touch ${_U_TP} find ${_U_TP}/ -mtime +0 -exec rm -rf {} \; &> /dev/null mkdir -p ${_U_HD} chown ${_USER}:${usrGroup} ${_U_TP} chown ${_USER}:${usrGroup} ${_U_HD} chmod 755 ${_U_TP} chmod 755 ${_U_HD} chattr -i ${_U_II} rm -f ${_U_HD}/.ctrl.php* rm -f ${_U_II} if [[ "${_CHECK_USE_PHP_CLI}" =~ "php82" ]]; then cp -af /opt/php82/lib/php.ini ${_U_II} _U_INI=82 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php81" ]]; then cp -af /opt/php81/lib/php.ini ${_U_II} _U_INI=81 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php80" ]]; then cp -af /opt/php80/lib/php.ini ${_U_II} _U_INI=80 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php74" ]]; then cp -af /opt/php74/lib/php.ini ${_U_II} _U_INI=74 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php73" ]]; then cp -af /opt/php73/lib/php.ini ${_U_II} _U_INI=73 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php72" ]]; then cp -af /opt/php72/lib/php.ini ${_U_II} _U_INI=72 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php71" ]]; then cp -af /opt/php71/lib/php.ini ${_U_II} _U_INI=71 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php70" ]]; then cp -af /opt/php70/lib/php.ini ${_U_II} _U_INI=70 elif [[ "${_CHECK_USE_PHP_CLI}" =~ "php56" ]]; then cp -af /opt/php56/lib/php.ini ${_U_II} _U_INI=56 fi if [ -e "${_U_II}" ]; then _INI="open_basedir = \".: \ /data/all: \ /data/conf: \ /data/disk/all: \ /opt/php56: \ /opt/php70: \ /opt/php71: \ /opt/php72: \ /opt/php73: \ /opt/php74: \ /opt/php80: \ /opt/php81: \ /opt/php82: \ /opt/tika: \ /opt/tika7: \ /opt/tika8: \ /opt/tika9: \ /dev/urandom: \ /opt/tmp/make_local: \ /opt/tools/drush: \ ${dscUsr}: \ /usr/local/bin: \ /usr/bin\"" _INI=$(echo "${_INI}" | sed "s/ //g" 2>&1) _INI=$(echo "${_INI}" | sed "s/open_basedir=/open_basedir = /g" 2>&1) _INI=${_INI//\//\\\/} _QTP=${_U_TP//\//\\\/} sed -i "s/.*open_basedir =.*/${_INI}/g" ${_U_II} wait sed -i "s/.*error_reporting =.*/error_reporting = 1/g" ${_U_II} wait sed -i "s/.*session.save_path =.*/session.save_path = ${_QTP}/g" ${_U_II} wait sed -i "s/.*soap.wsdl_cache_dir =.*/soap.wsdl_cache_dir = ${_QTP}/g" ${_U_II} wait sed -i "s/.*sys_temp_dir =.*/sys_temp_dir = ${_QTP}/g" ${_U_II} wait sed -i "s/.*upload_tmp_dir =.*/upload_tmp_dir = ${_QTP}/g" ${_U_II} wait echo > ${_U_HD}/.ctrl.php${_U_INI}.${_X_SE}.pid echo > ${_U_HD}/.ctrl.${_X_SE}.pid fi chattr +i ${_U_II} fi } # # Update PHP-CLI for Drush. php_cli_drush_update() { if [ ! -z "${1}" ]; then _DRUSH_FILE="${dscUsr}/tools/drush/${1}" else _DRUSH_FILE="${dscUsr}/tools/drush/drush.php" fi if [ "${_T_CLI_VRN}" = "8.2" ] && [ -x "/opt/php82/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php82\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php82/bin elif [ "${_T_CLI_VRN}" = "8.1" ] && [ -x "/opt/php81/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php81\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php81/bin elif [ "${_T_CLI_VRN}" = "8.0" ] && [ -x "/opt/php80/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php80\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php80/bin elif [ "${_T_CLI_VRN}" = "7.4" ] && [ -x "/opt/php74/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php74\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php74/bin elif [ "${_T_CLI_VRN}" = "7.3" ] && [ -x "/opt/php73/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php73\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php73/bin elif [ "${_T_CLI_VRN}" = "7.2" ] && [ -x "/opt/php72/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php72\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php72/bin elif [ "${_T_CLI_VRN}" = "7.1" ] && [ -x "/opt/php71/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php71\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php71/bin elif [ "${_T_CLI_VRN}" = "7.0" ] && [ -x "/opt/php70/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php70\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php70/bin elif [ "${_T_CLI_VRN}" = "5.6" ] && [ -x "/opt/php56/bin/php" ]; then sed -i "s/^#\!\/.*/#\!\/opt\/php56\/bin\/php/g" ${_DRUSH_FILE} &> /dev/null _T_CLI=/opt/php56/bin else _T_CLI=/foo/bar fi if [ -x "${_T_CLI}/php" ]; then _DRUSHCMD="${_T_CLI}/php ${dscUsr}/tools/drush/drush.php" if [ -e "${dscUsr}/aegir.sh" ]; then rm -f ${dscUsr}/aegir.sh fi touch ${dscUsr}/aegir.sh echo -e "#!/bin/bash\n\nPATH=.:${_T_CLI}:/usr/sbin:/usr/bin:/sbin:/bin\n${_DRUSHCMD} \ '@hostmaster' hosting-dispatch\ntouch ${dscUsr}/${_USER}-task.done" \ | fmt -su -w 2500 | tee -a ${dscUsr}/aegir.sh >/dev/null 2>&1 chown ${_USER}:${usrGroup} ${dscUsr}/aegir.sh &> /dev/null chmod 0700 ${dscUsr}/aegir.sh &> /dev/null fi echo OK > ${dscUsr}/static/control/.ctrl.cli.${_X_SE}.pid } # # Tune FPM workers. satellite_tune_fpm_workers() { _VM_TEST=$(uname -a 2>&1) _AWS_TEST_A=$(dmidecode -s bios-version 2>&1) _AWS_TEST_B=$(head -c 3 /sys/hypervisor/uuid 2>&1) if [ -e "/proc/bean_counters" ]; then _VMFAMILY="VZ" else _VMFAMILY="XEN" fi if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" fi if [[ "${_AWS_TEST_A}" =~ "amazon" ]] \ || [[ "${_AWS_TEST_B}" =~ "ec2" ]]; then _VMFAMILY="AWS" fi _RAM=$(free -mt | grep Mem: | awk '{ print $2 }' 2>&1) if [ "${_RESERVED_RAM}" -gt "0" ]; then _RAM=$(( _RAM - _RESERVED_RAM )) fi _USE=$(( _RAM / 4 )) if [ "${_USE}" -ge "512" ] && [ "${_USE}" -lt "2048" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi elif [ "${_USE}" -ge "2048" ]; then if [ "${_VMFAMILY}" = "XEN" ] || [ "${_VMFAMILY}" = "AWS" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi elif [ "${_VMFAMILY}" = "VS" ] || [ "${_VMFAMILY}" = "TG" ]; then if [ -e "/boot/grub/grub.cfg" ] \ || [ -e "/boot/grub/menu.lst" ] \ || [ -e "/root/.tg.cnf" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi else if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi fi else if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi fi else if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _L_PHP_FPM_WORKERS=$(( _CPU_NR * 4 )) else _L_PHP_FPM_WORKERS=${_PHP_FPM_WORKERS} fi fi } # # Disable New Relic per Octopus instance. disable_newrelic() { _THIS_POOL_TPL="/opt/php$1/etc/pool.d/$2.conf" if [ -e "${_THIS_POOL_TPL}" ]; then _CHECK_NEW_RELIC_KEY=$(grep "newrelic.enabled.*true" ${_THIS_POOL_TPL} 2>&1) if [[ "${_CHECK_NEW_RELIC_KEY}" =~ "newrelic.enabled" ]]; then echo "New Relic for $2 will be disabled because newrelic.info does not exist" sed -i "s/^php_admin_value\[newrelic.license\].*/php_admin_value\[newrelic.license\] = \"\"/g" ${_THIS_POOL_TPL} wait sed -i "s/^php_admin_value\[newrelic.enabled\].*/php_admin_value\[newrelic.enabled\] = \"false\"/g" ${_THIS_POOL_TPL} wait if [ "$3" = "1" ] && [ -e "/etc/init.d/php$1-fpm" ]; then service php$1-fpm reload &> /dev/null fi fi fi } # # Enable New Relic per Octopus instance. enable_newrelic() { _LOC_NEW_RELIC_KEY=$(cat ${dscUsr}/static/control/newrelic.info 2>&1) _LOC_NEW_RELIC_KEY=${_LOC_NEW_RELIC_KEY//[^0-9a-zA-Z]/} _LOC_NEW_RELIC_KEY=$(echo -n ${_LOC_NEW_RELIC_KEY} | tr -d "\n" 2>&1) if [ -z "${_LOC_NEW_RELIC_KEY}" ]; then disable_newrelic $1 $2 $3 else _THIS_POOL_TPL="/opt/php$1/etc/pool.d/$2.conf" if [ -e "${_THIS_POOL_TPL}" ]; then _CHECK_NEW_RELIC_TPL=$(grep "newrelic.license" ${_THIS_POOL_TPL} 2>&1) _CHECK_NEW_RELIC_KEY=$(grep "${_LOC_NEW_RELIC_KEY}" ${_THIS_POOL_TPL} 2>&1) if [[ "${_CHECK_NEW_RELIC_KEY}" =~ "${_LOC_NEW_RELIC_KEY}" ]]; then echo "New Relic integration is already active for $2" else if [[ "${_CHECK_NEW_RELIC_TPL}" =~ "newrelic.license" ]]; then echo "New Relic for $2 update with key ${_LOC_NEW_RELIC_KEY} in php$1" sed -i "s/^php_admin_value\[newrelic.license\].*/php_admin_value\[newrelic.license\] = \"${_LOC_NEW_RELIC_KEY}\"/g" ${_THIS_POOL_TPL} wait sed -i "s/^php_admin_value\[newrelic.enabled\].*/php_admin_value\[newrelic.enabled\] = \"true\"/g" ${_THIS_POOL_TPL} wait else echo "New Relic for $2 setup with key ${_LOC_NEW_RELIC_KEY} in php$1" echo "php_admin_value[newrelic.license] = \"${_LOC_NEW_RELIC_KEY}\"" >> ${_THIS_POOL_TPL} echo "php_admin_value[newrelic.enabled] = \"true\"" >> ${_THIS_POOL_TPL} fi if [ "$3" = "1" ] && [ -e "/etc/init.d/php$1-fpm" ]; then service php$1-fpm reload &> /dev/null fi fi fi fi } # # Switch New Relic on or off per Octopus instance. switch_newrelic() { isPhp="$1" isPhp=${isPhp//[^0-9]/} isUsr="$2" isUsr=${isUsr//[^a-z0-9]/} isRld="$3" isRld=${isRld//[^0-1]/} if [ ! -z "${isPhp}" ] && [ ! -z "${isUsr}" ] && [ ! -z "${isRld}" ]; then if [ -e "${dscUsr}/static/control/newrelic.info" ]; then enable_newrelic $1 $2 $3 else disable_newrelic $1 $2 $3 fi fi } # # Update web user. satellite_web_user_update() { isTest="${_WEB}" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [[ ! "${_WEB}" =~ ".ftp"($) ]]; then _T_HD="/home/${_WEB}/.drush" _T_TP="/home/${_WEB}/.tmp" _T_TS="/home/${_WEB}/.aws" _T_II="${_T_HD}/php.ini" if [ -d "/home/${_WEB}" ] && [ ! -e "/home/${_WEB}/.lock" ]; then chattr -i /home/${_WEB} if [ -d "/home/${_WEB}/.drush" ]; then chattr -i /home/${_WEB}/.drush fi if [ -e "${_T_II}" ]; then chattr -i ${_T_II} fi mkdir -p /home/${_WEB}/.{tmp,drush,aws} touch /home/${_WEB}/.lock isTest="$1" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ]; then if [ "$1" = "hhvm" ]; then if [ -e "/opt/php56/etc/php56.ini" ] \ && [ -x "/opt/php56/bin/php" ]; then _T_PV=56 fi else _T_PV=$1 fi fi if [ ! -z "${_T_PV}" ] && [ -e "/opt/php${_T_PV}/etc/php${_T_PV}.ini" ]; then cp -af /opt/php${_T_PV}/etc/php${_T_PV}.ini ${_T_II} else if [ -e "/opt/php82/etc/php82.ini" ]; then cp -af /opt/php82/etc/php82.ini ${_T_II} _T_PV=82 elif [ -e "/opt/php81/etc/php81.ini" ]; then cp -af /opt/php81/etc/php81.ini ${_T_II} _T_PV=81 elif [ -e "/opt/php80/etc/php80.ini" ]; then cp -af /opt/php80/etc/php80.ini ${_T_II} _T_PV=80 elif [ -e "/opt/php74/etc/php74.ini" ]; then cp -af /opt/php74/etc/php74.ini ${_T_II} _T_PV=74 elif [ -e "/opt/php73/etc/php73.ini" ]; then cp -af /opt/php73/etc/php73.ini ${_T_II} _T_PV=73 elif [ -e "/opt/php72/etc/php72.ini" ]; then cp -af /opt/php72/etc/php72.ini ${_T_II} _T_PV=72 elif [ -e "/opt/php71/etc/php71.ini" ]; then cp -af /opt/php71/etc/php71.ini ${_T_II} _T_PV=71 elif [ -e "/opt/php70/etc/php70.ini" ]; then cp -af /opt/php70/etc/php70.ini ${_T_II} _T_PV=70 elif [ -e "/opt/php56/etc/php56.ini" ]; then cp -af /opt/php56/etc/php56.ini ${_T_II} _T_PV=56 fi fi if [ -e "${_T_II}" ]; then _INI="open_basedir = \".: \ /data/all: \ /data/conf: \ /data/disk/all: \ /mnt: \ /opt/php56: \ /opt/php70: \ /opt/php71: \ /opt/php72: \ /opt/php73: \ /opt/php74: \ /opt/php80: \ /opt/php81: \ /opt/php82: \ /opt/tika: \ /opt/tika7: \ /opt/tika8: \ /opt/tika9: \ /dev/urandom: \ /srv: \ /usr/bin: \ /usr/local/bin: \ /var/second/${_USER}: \ ${dscUsr}/aegir: \ ${dscUsr}/backup-exports: \ ${dscUsr}/distro: \ ${dscUsr}/platforms: \ ${dscUsr}/static: \ ${_T_HD}: \ ${_T_TP}: \ ${_T_TS}\"" _INI=$(echo "${_INI}" | sed "s/ //g" 2>&1) _INI=$(echo "${_INI}" | sed "s/open_basedir=/open_basedir = /g" 2>&1) _INI=${_INI//\//\\\/} _QTP=${_T_TP//\//\\\/} sed -i "s/.*open_basedir =.*/${_INI}/g" ${_T_II} wait sed -i "s/.*session.save_path =.*/session.save_path = ${_QTP}/g" ${_T_II} wait sed -i "s/.*soap.wsdl_cache_dir =.*/soap.wsdl_cache_dir = ${_QTP}/g" ${_T_II} wait sed -i "s/.*sys_temp_dir =.*/sys_temp_dir = ${_QTP}/g" ${_T_II} wait sed -i "s/.*upload_tmp_dir =.*/upload_tmp_dir = ${_QTP}/g" ${_T_II} wait if [ "$1" = "hhvm" ]; then sed -i "s/.*ioncube.*//g" ${_T_II} wait sed -i "s/.*opcache.*//g" ${_T_II} wait fi rm -f ${_T_HD}/.ctrl.php* echo > ${_T_HD}/.ctrl.php${_T_PV}.${_X_SE}.pid fi chmod 700 /home/${_WEB} chown -R ${_WEB}:${_WEBG} /home/${_WEB} chmod 550 /home/${_WEB}/.drush chmod 440 /home/${_WEB}/.drush/php.ini rm -f /home/${_WEB}/.lock if [ -d "/home/${_WEB}" ]; then chattr +i /home/${_WEB} fi if [ -d "/home/${_WEB}/.drush" ]; then chattr +i /home/${_WEB}/.drush fi if [ -e "${_T_II}" ]; then chattr +i ${_T_II} fi fi fi } # # Remove web user. satellite_remove_web_user() { isTest="${_WEB}" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [[ ! "${_WEB}" =~ ".ftp"($) ]]; then if [ -d "/home/${_WEB}/" ] || [ "$1" = "clean" ]; then chattr -i /home/${_WEB}/ if [ -d "/home/${_WEB}/.drush/" ]; then chattr -i /home/${_WEB}/.drush/ fi kill -9 $(ps aux | grep '[g]pg-agent' | awk '{print $2}') &> /dev/null deluser \ --remove-home \ --backup-to /var/backups/zombie/deleted ${_WEB} &> /dev/null if [ -d "/home/${_WEB}/" ]; then rm -rf /home/${_WEB}/ &> /dev/null fi fi fi } # # Add web user. satellite_create_web_user() { isTest="${_WEB}" isTest=${isTest//[^a-z0-9]/} if [ ! -z "${isTest}" ] && [[ ! "${_WEB}" =~ ".ftp"($) ]]; then _T_HD="/home/${_WEB}/.drush" _T_II="${_T_HD}/php.ini" _T_ID_EXISTS=$(getent passwd ${_WEB} 2>&1) if [ ! -z "${_T_ID_EXISTS}" ] && [ -e "${_T_II}" ]; then satellite_web_user_update "$1" elif [ -z "${_T_ID_EXISTS}" ] || [ ! -e "${_T_II}" ]; then satellite_remove_web_user "clean" adduser --force-badname --system --ingroup www-data ${_WEB} &> /dev/null satellite_web_user_update "$1" fi fi } # # Add site specific socket config include. site_socket_inc_gen() { unlAeg="${dscUsr}/static/control/unlock-aegir-php.info" mltFpm="${dscUsr}/static/control/multi-fpm.info" preFpm="${dscUsr}/static/control/.prev-multi-fpm.info" mltNgx="${dscUsr}/static/control/.multi-nginx-fpm.pid" fpmPth="${dscUsr}/config/server_master/nginx/post.d" hmFront=$(cat ${dscUsr}/log/domain.txt 2>&1) hmFront=$(echo -n ${hmFront} | tr -d "\n" 2>&1) hmstAls="${dscUsr}/.drush/${hmFront}.alias.drushrc.php" if [ -x "/opt/php74/bin/php" ] && [ -e "${dscUsr}/log/domain.txt" ]; then if [ ! -e "${unlAeg}" ]; then if [ ! -e "${dscUsr}/log/locked-aegir-php.txt" ]; then echo "${hmFront} 7.4" >> ${mltFpm} if [ ! -e "${hmstAls}" ]; then ln -s ${dscUsr}/.drush/hostmaster.alias.drushrc.php ${hmstAls} fi wait sed -i "s/^place.holder.dont.remove 5.6//g" ${mltFpm} wait sed -i "s/^place.holder.dont.remove 7.4//g" ${mltFpm} wait echo "place.holder.dont.remove 7.4" >> ${mltFpm} wait sed -i "s/ *$//g; /^$/d" ${mltFpm} wait echo "7.4" > ${dscUsr}/static/control/cli.info rm -f ${dscUsr}/log/unlocked-aegir-php.txt echo "7.4" > ${dscUsr}/log/locked-aegir-php.txt fi if [ ! -e "${fpmPth}/fpm_include_site_${hmFront}.inc" ]; then mltFpmUpdateForce=YES fi else if [ ! -e "${dscUsr}/log/unlocked-aegir-php.txt" ]; then if [ -e "${mltFpm}" ]; then sed -i "s/^${hmFront} 7.4//g" ${mltFpm} wait sed -i "s/^place.holder.dont.remove 5.6//g" ${mltFpm} wait sed -i "s/^place.holder.dont.remove 7.4//g" ${mltFpm} wait echo "place.holder.dont.remove 7.4" >> ${mltFpm} wait sed -i "s/ *$//g; /^$/d" ${mltFpm} wait fi mltFpmUpdateForce=YES rm -f ${dscUsr}/log/locked-aegir-php.txt touch ${dscUsr}/log/unlocked-aegir-php.txt fi fi fi if [ -x "/opt/php74/bin/php" ] && [ ! -e "/home/${_USER}.74.web" ]; then rm -f /data/disk/${_USER}/config/server_master/nginx/post.d/fpm_include_default.inc mltFpmUpdateForce=YES fi if [ -f "${mltFpm}" ]; then chown ${_USER}.ftp:${usrGroup} ${dscUsr}/static/control/*.info mltFpmUpdate=NO if [ ! -f "${preFpm}" ]; then rm -rf ${preFpm} cp -af ${mltFpm} ${preFpm} fi diffFpmTest=$(diff -w -B ${mltFpm} ${preFpm} 2>&1) if [ ! -z "${diffFpmTest}" ]; then mltFpmUpdate=YES fi if [ ! -f "${mltNgx}" ] \ || [ "${mltFpmUpdate}" = "YES" ] \ || [ "${mltFpmUpdateForce}" = "YES" ]; then rm -f ${fpmPth}/fpm_include_site_* IFS=$'\12' for p in `cat ${mltFpm}`;do _SITE_NAME=`echo $p | cut -d' ' -f1 | awk '{ print $1}'` _SITE_NAME=${_SITE_NAME//[^a-zA-Z0-9-.]/} _SITE_NAME=$(echo -n ${_SITE_NAME} | tr A-Z a-z 2>&1) _SITE_NAME=$(echo -n ${_SITE_NAME} | tr -d "\n" 2>&1) _SITE_SOCKET=`echo $p | cut -d' ' -f2 | awk '{ print $1}'` _SITE_SOCKET=${_SITE_SOCKET//[^0-9]/} _SITE_SOCKET=$(echo -n ${_SITE_SOCKET} | tr -d "\n" 2>&1) _SOCKET_L_NAME="${_USER}.${_SITE_SOCKET}" if [ ! -z "${_SITE_NAME}" ] \ && [ ! -z "${_SITE_SOCKET}" ] \ && [ -e "${dscUsr}/.drush/${_SITE_NAME}.alias.drushrc.php" ] \ && [ -e "/var/run/${_SOCKET_L_NAME}.fpm.socket" ]; then fpmInc="${fpmPth}/fpm_include_site_${_SITE_NAME}.inc" echo "if ( \$main_site_name = ${_SITE_NAME} ) {" > ${fpmInc} echo " set \$user_socket \"${_SOCKET_L_NAME}\";" >> ${fpmInc} echo "}" >> ${fpmInc} fi done touch ${mltNgx} rm -rf ${preFpm} cp -af ${mltFpm} ${preFpm} ### reload nginx service nginx reload &> /dev/null fi else if [ -f "${mltNgx}" ]; then rm -f ${mltNgx} fi if [ -f "${preFpm}" ]; then rm -f ${preFpm} fi fi } # # Switch PHP Version. switch_php() { _PHP_CLI_UPDATE=NO _FORCE_FPM_SETUP=NO _NEW_FPM_SETUP=NO _T_CLI_VRN="" if [ -e "${dscUsr}/static/control/fpm.info" ] \ || [ -e "${dscUsr}/static/control/cli.info" ] \ || [ -e "${dscUsr}/static/control/hhvm.info" ]; then echo "Custom FPM, HHVM or CLI settings for ${_USER} exist, running switch_php checks" if [ ! -e "${dscUsr}/log/un-chattr-ctrl.info" ]; then chattr -i ${dscUsr}/static/control/fpm.info &> /dev/null chattr -i ${dscUsr}/static/control/cli.info &> /dev/null chattr -i ${dscUsr}/log/fpm.txt &> /dev/null chattr -i ${dscUsr}/log/cli.txt &> /dev/null chattr -i ${dscUsr}/config/server_master/nginx/post.d/fpm_include_default.inc &> /dev/null touch ${dscUsr}/log/un-chattr-ctrl.info fi if [ ! -e "${dscUsr}/static/control/.single-fpm.${_X_SE}.pid" ]; then rm -f ${dscUsr}/static/control/.single-fpm*.pid echo OK > ${dscUsr}/static/control/.single-fpm.${_X_SE}.pid _FORCE_FPM_SETUP=YES fi if [ -e "${dscUsr}/static/control/cli.info" ]; then _T_CLI_VRN=$(cat ${dscUsr}/static/control/cli.info 2>&1) _T_CLI_VRN=${_T_CLI_VRN//[^0-9.]/} _T_CLI_VRN=$(echo -n ${_T_CLI_VRN} | tr -d "\n" 2>&1) if [ "${_T_CLI_VRN}" = "82" ]; then _T_CLI_VRN=8.2 elif [ "${_T_CLI_VRN}" = "81" ]; then _T_CLI_VRN=8.1 elif [ "${_T_CLI_VRN}" = "80" ]; then _T_CLI_VRN=8.0 elif [ "${_T_CLI_VRN}" = "74" ]; then _T_CLI_VRN=7.4 elif [ "${_T_CLI_VRN}" = "73" ]; then _T_CLI_VRN=7.3 elif [ "${_T_CLI_VRN}" = "72" ]; then _T_CLI_VRN=7.2 elif [ "${_T_CLI_VRN}" = "71" ]; then _T_CLI_VRN=7.1 elif [ "${_T_CLI_VRN}" = "70" ]; then _T_CLI_VRN=7.0 elif [ "${_T_CLI_VRN}" = "56" ]; then _T_CLI_VRN=5.6 fi if [ "${_T_CLI_VRN}" = "8.2" ] \ || [ "${_T_CLI_VRN}" = "8.1" ] \ || [ "${_T_CLI_VRN}" = "8.0" ] \ || [ "${_T_CLI_VRN}" = "7.4" ] \ || [ "${_T_CLI_VRN}" = "7.3" ] \ || [ "${_T_CLI_VRN}" = "7.2" ] \ || [ "${_T_CLI_VRN}" = "7.1" ] \ || [ "${_T_CLI_VRN}" = "7.0" ] \ || [ "${_T_CLI_VRN}" = "5.6" ]; then if [ "${_T_CLI_VRN}" = "8.2" ] \ && [ ! -x "/opt/php82/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "8.1" ] \ && [ ! -x "/opt/php81/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 fi elif [ "${_T_CLI_VRN}" = "8.0" ] \ && [ ! -x "/opt/php80/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "7.4" ] \ && [ ! -x "/opt/php74/bin/php" ]; then if [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "7.3" ] \ && [ ! -x "/opt/php73/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "7.2" ] \ && [ ! -x "/opt/php72/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "7.1" ] \ && [ ! -x "/opt/php71/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "7.0" ] \ && [ ! -x "/opt/php70/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi elif [ "${_T_CLI_VRN}" = "5.6" ] \ && [ ! -x "/opt/php56/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_CLI_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_CLI_VRN=8.1 fi fi if [ "${_T_CLI_VRN}" != "${_PHP_CLI_VERSION}" ] \ || [ ! -e "${dscUsr}/static/control/.ctrl.cli.${_X_SE}.pid" ]; then _PHP_CLI_UPDATE=YES _DRUSH_FILES="drush.php drush" for df in ${_DRUSH_FILES}; do php_cli_drush_update "${df}" done if [ -x "${_T_CLI}/php" ]; then php_cli_local_ini_update sed -i "s/^_PHP_CLI_VERSION=.*/_PHP_CLI_VERSION=${_T_CLI_VRN}/g" \ /root/.${_USER}.octopus.cnf &> /dev/null wait echo ${_T_CLI_VRN} > ${dscUsr}/log/cli.txt echo ${_T_CLI_VRN} > ${dscUsr}/static/control/cli.info chown ${_USER}.ftp:${usrGroup} ${dscUsr}/static/control/cli.info fi fi fi fi if [ -e "${dscUsr}/static/control/hhvm.info" ]; then if [ -x "/usr/bin/hhvm" ] \ && [ -e "/var/xdrago/conf/hhvm/init.d/hhvm.foo" ] \ && [ -e "/var/xdrago/conf/hhvm/server.foo.ini" ]; then if [ ! -e "/opt/hhvm/server.${_USER}.ini" ] \ || [ ! -e "/etc/init.d/hhvm.${_USER}" ] \ || [ ! -e "/var/run/hhvm/${_USER}" ] ; then ### create or update special system user if needed satellite_create_web_user "hhvm" ### configure custom hhvm server init.d script cp -af /var/xdrago/conf/hhvm/init.d/hhvm.foo /etc/init.d/hhvm.${_USER} sed -i "s/foo/${_USER}/g" /etc/init.d/hhvm.${_USER} &> /dev/null wait sed -i "s/.ftp/.web/g" /etc/init.d/hhvm.${_USER} &> /dev/null wait chmod 755 /etc/init.d/hhvm.${_USER} chown root:root /etc/init.d/hhvm.${_USER} update-rc.d hhvm.${_USER} defaults &> /dev/null ### configure custom hhvm server ini file mkdir -p /opt/hhvm cp -af /var/xdrago/conf/hhvm/server.foo.ini /opt/hhvm/server.${_USER}.ini sed -i "s/foo/${_USER}/g" /opt/hhvm/server.${_USER}.ini &> /dev/null wait sed -i "s/.ftp/.web/g" /opt/hhvm/server.${_USER}.ini &> /dev/null wait chmod 755 /opt/hhvm/server.${_USER}.ini chown root:root /opt/hhvm/server.${_USER}.ini mkdir -p /var/log/hhvm/${_USER} chown ${_WEB}:${_WEBG} /var/log/hhvm/${_USER} ### start custom hhvm server service hhvm.${_USER} start &> /dev/null ### remove fpm control file to avoid confusion rm -f ${dscUsr}/static/control/fpm.info ### update nginx configuration sed -i "s/unix:.*fpm.socket;/unix:\/var\/run\/hhvm\/${_USER}\/hhvm.socket;/g" \ ${dscUsr}/config/includes/nginx_vhost_common.conf wait sed -i "s/unix:.*fpm.socket;/unix:\/var\/run\/hhvm\/${_USER}\/hhvm.socket;/g" \ ${dscUsr}/.drush/sys/provision/http/Provision/Config/Nginx/Inc/vhost_include.tpl.php wait ### reload nginx service nginx reload &> /dev/null fi fi else if [ -e "/opt/hhvm/server.${_USER}.ini" ] \ || [ -e "/etc/init.d/hhvm.${_USER}" ] \ || [ -e "/var/run/hhvm/${_USER}" ] ; then ### disable no longer used custom hhvm server instance if [ -e "/etc/init.d/hhvm.${_USER}" ]; then service hhvm.${_USER} stop &> /dev/null update-rc.d -f hhvm.${_USER} remove &> /dev/null rm -f /etc/init.d/hhvm.${_USER} fi ### delete special system user no longer needed satellite_remove_web_user "hhvm" ### delete leftovers rm -f /opt/hhvm/server.${_USER}.ini rm -rf /var/run/hhvm/${_USER} rm -rf /var/log/hhvm/${_USER} ### update nginx configuration sed -i "s/\/var\/run\/hhvm\/${_USER}\/hhvm.socket;/\/var\/run\/\$user_socket.fpm.socket;/g" \ ${dscUsr}/config/includes/nginx_vhost_common.conf wait sed -i "s/\/var\/run\/hhvm\/${_USER}\/hhvm.socket;/\/var\/run\/\$user_socket.fpm.socket;/g" \ ${dscUsr}/.drush/sys/provision/http/Provision/Config/Nginx/Inc/vhost_include.tpl.php wait ### reload nginx service nginx reload &> /dev/null ### create dummy control file to enable PHP-FPM again echo 7.4 > ${dscUsr}/static/control/fpm.info chown ${_USER}.ftp:${usrGroup} ${dscUsr}/static/control/fpm.info _FORCE_FPM_SETUP=YES fi fi sleep 5 if [ ! -e "${dscUsr}/static/control/hhvm.info" ] \ && [ -e "${dscUsr}/static/control/fpm.info" ] \ && [ -e "/var/xdrago/conf/fpm-pool-foo-multi.conf" ]; then _PHP_FPM_MULTI=NO if [ -f "${dscUsr}/static/control/multi-fpm.info" ] \ && [ -d "${dscUsr}/tools/le" ]; then _PHP_FPM_MULTI=YES if [ ! -e "${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid" ]; then rm -f ${dscUsr}/static/control/.multi-fpm*.pid echo OK > ${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid _FORCE_FPM_SETUP=YES fi else if [ -e "${dscUsr}/config/server_master/nginx/post.d/fpm_include_default.inc" ]; then rm -f ${dscUsr}/config/server_master/nginx/post.d/fpm_include_* rm -f ${dscUsr}/static/control/.multi-fpm*.pid service nginx reload &> /dev/null fi fi _T_FPM_VRN=$(cat ${dscUsr}/static/control/fpm.info 2>&1) _T_FPM_VRN=${_T_FPM_VRN//[^0-9.]/} _T_FPM_VRN=$(echo -n ${_T_FPM_VRN} | tr -d "\n" 2>&1) if [ "${_T_FPM_VRN}" = "82" ]; then _T_FPM_VRN=8.2 elif [ "${_T_FPM_VRN}" = "81" ]; then _T_FPM_VRN=8.1 elif [ "${_T_FPM_VRN}" = "80" ]; then _T_FPM_VRN=8.0 elif [ "${_T_FPM_VRN}" = "74" ]; then _T_FPM_VRN=7.4 elif [ "${_T_FPM_VRN}" = "73" ]; then _T_FPM_VRN=7.3 elif [ "${_T_FPM_VRN}" = "72" ]; then _T_FPM_VRN=7.2 elif [ "${_T_FPM_VRN}" = "71" ]; then _T_FPM_VRN=7.1 elif [ "${_T_FPM_VRN}" = "70" ]; then _T_FPM_VRN=7.0 elif [ "${_T_FPM_VRN}" = "56" ]; then _T_FPM_VRN=5.6 fi if [ "${_T_FPM_VRN}" = "8.2" ] \ || [ "${_T_FPM_VRN}" = "8.1" ] \ || [ "${_T_FPM_VRN}" = "8.0" ] \ || [ "${_T_FPM_VRN}" = "7.4" ] \ || [ "${_T_FPM_VRN}" = "7.3" ] \ || [ "${_T_FPM_VRN}" = "7.2" ] \ || [ "${_T_FPM_VRN}" = "7.1" ] \ || [ "${_T_FPM_VRN}" = "7.0" ] \ || [ "${_T_FPM_VRN}" = "5.6" ]; then if [ "${_T_FPM_VRN}" = "8.2" ] \ && [ ! -x "/opt/php82/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "8.1" ] \ && [ ! -x "/opt/php81/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 fi elif [ "${_T_FPM_VRN}" = "8.0" ] \ && [ ! -x "/opt/php80/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "7.4" ] \ && [ ! -x "/opt/php74/bin/php" ]; then if [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "7.3" ] \ && [ ! -x "/opt/php73/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "7.2" ] \ && [ ! -x "/opt/php72/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "7.1" ] \ && [ ! -x "/opt/php71/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "7.0" ] \ && [ ! -x "/opt/php70/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi elif [ "${_T_FPM_VRN}" = "5.6" ] \ && [ ! -x "/opt/php56/bin/php" ]; then if [ -x "/opt/php74/bin/php" ]; then _T_FPM_VRN=7.4 elif [ -x "/opt/php81/bin/php" ]; then _T_FPM_VRN=8.1 fi fi if [ "${_T_FPM_VRN}" != "${_PHP_FPM_VERSION}" ] \ || [ "${_FORCE_FPM_SETUP}" = "YES" ]; then _NEW_FPM_SETUP=YES fi ### update fpm_include_default.inc if needed _PHP_SV=${_T_FPM_VRN//[^0-9]/} if [ -z "${_PHP_SV}" ]; then _PHP_SV=74 fi _FMP_D_INC="${dscUsr}/config/server_master/nginx/post.d/fpm_include_default.inc" if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then _PHP_M_V="82 81 80 74 73 72 71 70 56" _D_POOL="${_USER}.${_PHP_SV}" if [ ! -e "${_FMP_D_INC}" ]; then echo "set \$user_socket \"${_D_POOL}\";" > ${_FMP_D_INC} touch ${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid _NEW_FPM_SETUP=YES else _CHECK_FMP_D=$(grep "${_D_POOL}" ${_FMP_D_INC} 2>&1) if [[ "${_CHECK_FMP_D}" =~ "${_D_POOL}" ]]; then echo "${_D_POOL} already set in ${_FMP_D_INC}" else echo "${_D_POOL} must be updated in ${_FMP_D_INC}" echo "set \$user_socket \"${_D_POOL}\";" > ${_FMP_D_INC} touch ${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid _NEW_FPM_SETUP=YES fi fi else _PHP_M_V="${_PHP_SV}" rm -f ${dscUsr}/static/control/.multi-fpm*.pid rm -f ${_FMP_D_INC} fi if [ ! -z "${_T_FPM_VRN}" ] \ && [ "${_NEW_FPM_SETUP}" = "YES" ]; then satellite_tune_fpm_workers _LIM_FPM="${_L_PHP_FPM_WORKERS}" if [[ "${_THISHOST}" =~ ".host8." ]] \ || [[ "${_THISHOST}" =~ ".boa.io"($) ]] \ || [[ "${_THISHOST}" =~ ".o8.io"($) ]] \ || [[ "${_THISHOST}" =~ ".aegir.cc"($) ]]; then if [ "${_CLIENT_OPTION}" = "CLUSTER" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=96 _PHP_FPM_WORKERS=192 fi elif [ "${_CLIENT_OPTION}" = "LITE" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=32 _PHP_FPM_WORKERS=64 fi elif [ "${_CLIENT_OPTION}" = "PHANTOM" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=16 _PHP_FPM_WORKERS=32 fi elif [ "${_CLIENT_OPTION}" = "POWER" ] \ || [ "${_CLIENT_OPTION}" = "BUS" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=8 _PHP_FPM_WORKERS=16 fi elif [ "${_CLIENT_OPTION}" = "EDGE" ] \ || [ "${_CLIENT_OPTION}" = "SSD" ] \ || [ "${_CLIENT_OPTION}" = "CLASSIC" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=2 _PHP_FPM_WORKERS=4 fi elif [ "${_CLIENT_OPTION}" = "MINI" ] \ || [ "${_CLIENT_OPTION}" = "MICRO" ]; then if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _LIM_FPM=1 _PHP_FPM_WORKERS=2 fi else _LIM_FPM=2 _PHP_FPM_WORKERS=4 fi if [ -e "${dscUsr}/log/cores.txt" ]; then _CLIENT_CORES=$(cat ${dscUsr}/log/cores.txt 2>&1) _CLIENT_CORES=$(echo -n ${_CLIENT_CORES} | tr -d "\n" 2>&1) fi _CLIENT_CORES=${_CLIENT_CORES//[^0-9]/} if [ ! -z "${_CLIENT_CORES}" ] \ && [ "${_CLIENT_CORES}" -gt "0" ]; then _LIM_FPM=$(( _LIM_FPM *= _CLIENT_CORES )) _PHP_FPM_WORKERS=$(( _PHP_FPM_WORKERS *= _CLIENT_CORES )) fi if [ "${_LIM_FPM}" -gt "100" ]; then _LIM_FPM=100 fi if [ "${_PHP_FPM_WORKERS}" -gt "200" ]; then _PHP_FPM_WORKERS=200 fi fi _CHILD_MAX_FPM=$(( _LIM_FPM * 2 )) if [ "${_PHP_FPM_WORKERS}" = "AUTO" ]; then _DO_NOTHING=YES else _PHP_FPM_WORKERS=${_PHP_FPM_WORKERS//[^0-9]/} if [ ! -z "${_PHP_FPM_WORKERS}" ] \ && [ "${_PHP_FPM_WORKERS}" -gt "0" ]; then _CHILD_MAX_FPM="${_PHP_FPM_WORKERS}" fi fi sed -i "s/^_PHP_FPM_VERSION=.*/_PHP_FPM_VERSION=${_T_FPM_VRN}/g" \ /root/.${_USER}.octopus.cnf &> /dev/null wait echo ${_T_FPM_VRN} > ${dscUsr}/log/fpm.txt if [ "${_PHP_FPM_MULTI}" = "NO" ]; then echo ${_T_FPM_VRN} > ${dscUsr}/static/control/fpm.info fi chown ${_USER}.ftp:${usrGroup} ${dscUsr}/static/control/fpm.info _PHP_OLD_SV=${_PHP_FPM_VERSION//[^0-9]/} _PHP_SV=${_T_FPM_VRN//[^0-9]/} if [ -z "${_PHP_SV}" ]; then _PHP_SV=74 fi ### create or update special system user if needed _FMP_D_INC="${dscUsr}/config/server_master/nginx/post.d/fpm_include_default.inc" if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then _PHP_M_V="82 81 80 74 73 72 71 70 56" _D_POOL="${_USER}.${_PHP_SV}" if [ ! -e "${_FMP_D_INC}" ]; then echo "set \$user_socket \"${_D_POOL}\";" > ${_FMP_D_INC} touch ${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid else _CHECK_FMP_D=$(grep "${_D_POOL}" ${_FMP_D_INC} 2>&1) if [[ "${_CHECK_FMP_D}" =~ "${_D_POOL}" ]]; then echo "${_D_POOL} already set in ${_FMP_D_INC}" else echo "${_D_POOL} must be updated in ${_FMP_D_INC}" echo "set \$user_socket \"${_D_POOL}\";" > ${_FMP_D_INC} touch ${dscUsr}/static/control/.multi-fpm.${_X_SE}.pid fi fi else _PHP_M_V="${_PHP_SV}" rm -f ${dscUsr}/static/control/.multi-fpm*.pid rm -f ${_FMP_D_INC} fi for m in ${_PHP_M_V}; do if [ -x "/opt/php${m}/bin/php" ]; then if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then _WEB="${_USER}.${m}.web" _POOL="${_USER}.${m}" else _WEB="${_USER}.web" _POOL="${_USER}" fi if [ -e "/home/${_WEB}/.drush/php.ini" ]; then _OLD_PHP_IN_USE=$(grep "/lib/php" /home/${_WEB}/.drush/php.ini 2>&1) _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [[ "${_OLD_PHP_IN_USE}" =~ "php${e}" ]]; then if [ "${e}" != "${m}" ] \ || [ ! -e "/home/${_WEB}/.drush/.ctrl.php${m}.${_X_SE}.pid" ]; then echo _OLD_PHP_IN_USE is ${_OLD_PHP_IN_USE} for ${_WEB} update echo _NEW_PHP_TO_USE is ${m} for ${_WEB} update satellite_web_user_update "${m}" fi fi done else echo _NEW_PHP_TO_USE is ${m} for ${_WEB} create satellite_create_web_user "${m}" fi fi done ### create or update special system user if needed if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then _PHP_M_V="82 81 80 74 73 72 71 70 56" rm -f /opt/php*/etc/pool.d/${_USER}.conf else _PHP_M_V="${_PHP_SV}" rm -f /opt/php*/etc/pool.d/${_USER}.*.conf rm -f /opt/php*/etc/pool.d/${_USER}.conf fi for m in ${_PHP_M_V}; do if [ -x "/opt/php${m}/bin/php" ]; then if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then _WEB="${_USER}.${m}.web" _POOL="${_USER}.${m}" else _WEB="${_USER}.web" _POOL="${_USER}" fi if [ "${_PHP_FPM_MULTI}" = "YES" ] \ && [ -d "${dscUsr}/tools/le" ]; then cp -af /var/xdrago/conf/fpm-pool-foo-multi.conf \ /opt/php${m}/etc/pool.d/${_POOL}.conf else cp -af /var/xdrago/conf/fpm-pool-foo.conf \ /opt/php${m}/etc/pool.d/${_POOL}.conf fi sed -i "s/.ftp/.web/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait sed -i "s/\/data\/disk\/foo\/.tmp/\/home\/foo.web\/.tmp/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait sed -i "s/foo.web/${_WEB}/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait sed -i "s/THISPOOL/${_POOL}/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait sed -i "s/foo/${_USER}/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait if [ ! -z "${_PHP_FPM_DENY}" ]; then sed -i "s/passthru,/${_PHP_FPM_DENY},/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait else if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]] \ || [ -e "/root/.host8.cnf" ]; then _DO_NOTHING=YES else sed -i "s/passthru,//g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait fi fi if [ "${_PHP_FPM_TIMEOUT}" = "AUTO" ] \ || [ -z "${_PHP_FPM_TIMEOUT}" ]; then _PHP_FPM_TIMEOUT=180 fi _PHP_FPM_TIMEOUT=${_PHP_FPM_TIMEOUT//[^0-9]/} if [ "${_PHP_FPM_TIMEOUT}" -lt "60" ]; then _PHP_FPM_TIMEOUT=60 fi if [ "${_PHP_FPM_TIMEOUT}" -gt "180" ]; then _PHP_FPM_TIMEOUT=180 fi if [ ! -z "${_PHP_FPM_TIMEOUT}" ]; then _PHP_TO="${_PHP_FPM_TIMEOUT}s" sed -i "s/180s/${_PHP_TO}/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait fi if [ ! -z "${_CHILD_MAX_FPM}" ]; then sed -i "s/pm.max_children =.*/pm.max_children = ${_CHILD_MAX_FPM}/g" \ /opt/php${m}/etc/pool.d/${_POOL}.conf &> /dev/null wait fi switch_newrelic ${m} ${_POOL} 0 nrCheck=YES if [ -e "/etc/init.d/php${_PHP_OLD_SV}-fpm" ]; then service php${_PHP_OLD_SV}-fpm reload &> /dev/null fi if [ -e "/etc/init.d/php${m}-fpm" ]; then service php${m}-fpm reload &> /dev/null fi fi done fi fi fi fi } # # Manage mirroring of drush aliases. manage_site_drush_alias_mirror() { for Alias in `find /home/${_USER}.ftp/.drush/*.alias.drushrc.php \ -maxdepth 1 -type f | sort`; do AliasFile=$(echo "$Alias" | cut -d'/' -f5 | awk '{ print $1}' 2>&1) if [ ! -e "${pthParentUsr}/.drush/${AliasFile}" ] \ && [ ! -z "${AliasFile}" ]; then rm -f /home/${_USER}.ftp/.drush/${AliasFile} fi done if [ -e "/home/${_USER}.ftp/.drush/hm.alias.drushrc.php" ]; then rm -f /home/${_USER}.ftp/.drush/hm.alias.drushrc.php fi if [ -e "/home/${_USER}.ftp/.drush/self.alias.drushrc.php" ]; then rm -f /home/${_USER}.ftp/.drush/self.alias.drushrc.php fi if [ -e "${dscUsr}/.drush/.alias.drushrc.php" ]; then rm -f ${dscUsr}/.drush/.alias.drushrc.php fi isAliasUpdate=NO for Alias in `find ${pthParentUsr}/.drush/*.alias.drushrc.php \ -maxdepth 1 -type f | sort`; do AliasName=$(echo "$Alias" | cut -d'/' -f6 | awk '{ print $1}' 2>&1) AliasName=$(echo "${AliasName}" \ | sed "s/.alias.drushrc.php//g" \ | awk '{ print $1}' 2>&1) if [ "${AliasName}" = "hm" ] \ || [[ "${AliasName}" =~ (^)"platform_" ]] \ || [[ "${AliasName}" =~ (^)"server_" ]] \ || [[ "${AliasName}" =~ (^)"self" ]] \ || [[ "${AliasName}" =~ (^)"hostmaster" ]] \ || [ -z "${AliasName}" ]; then _IS_SITE=NO else SiteName="${AliasName}" echo SiteName is $SiteName if [[ "$SiteName" =~ ".restore"($) ]]; then _IS_SITE=NO rm -f ${pthParentUsr}/.drush/${SiteName}.alias.drushrc.php else SiteDir=$(cat $Alias \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) if [ -d "${SiteDir}" ]; then echo SiteDir is ${SiteDir} pthAliasMain="${pthParentUsr}/.drush/${SiteName}.alias.drushrc.php" pthAliasCopy="/home/${_USER}.ftp/.drush/${SiteName}.alias.drushrc.php" if [ ! -e "${pthAliasCopy}" ]; then cp -af ${pthAliasMain} ${pthAliasCopy} chmod 440 ${pthAliasCopy} isAliasUpdate=YES else _DIFF_T=$(diff -w -B ${pthAliasCopy} ${pthAliasMain} 2>&1) if [ ! -z "${_DIFF_T}" ]; then cp -af ${pthAliasMain} ${pthAliasCopy} chmod 440 ${pthAliasCopy} isAliasUpdate=YES fi fi else rm -f ${pthAliasCopy} echo "ZOMBIE ${SiteDir} IN ${pthAliasMain}" fi fi fi done if [ -x "/usr/bin/drush10-bin" ]; then if [ "${isAliasUpdate}" = "YES" ] \ || [ ! -e "/home/${_USER}.ftp/.drush/sites/.checksums" ]; then su -s /bin/bash - ${_USER}.ftp -c "rm -f ~/.drush/sites/*.yml" su -s /bin/bash - ${_USER}.ftp -c "rm -f ~/.drush/sites/.checksums/*.md5" su -s /bin/bash - ${_USER}.ftp -c "drush10-bin core:init --yes" &> /dev/null su -s /bin/bash - ${_USER}.ftp -c "drush10-bin site:alias-convert ~/.drush/sites --yes" &> /dev/null fi fi } # # Manage Primary Users. manage_user() { for pthParentUsr in `find /data/disk/ -maxdepth 1 -mindepth 1 | sort`; do if [ -e "${pthParentUsr}/config/server_master/nginx/vhost.d" ] \ && [ -e "${pthParentUsr}/log/fpm.txt" ] \ && [ ! -e "${pthParentUsr}/log/proxied.pid" ] \ && [ ! -e "${pthParentUsr}/log/CANCELLED" ]; then _USER="" _USER=$(echo ${pthParentUsr} | cut -d'/' -f4 | awk '{ print $1}' 2>&1) echo "_USER is == ${_USER} == at manage_user" _WEB="${_USER}.web" dscUsr="/data/disk/${_USER}" octInc="${dscUsr}/config/includes" octTpl="${dscUsr}/.drush/sys/provision/http/Provision/Config/Nginx" usrDgn="${dscUsr}/.drush/usr/drupalgeddon" if [ ! -e "${dscUsr}/rector.php" ]; then rm -f ${dscUsr}/*.php* &> /dev/null rm -f ${dscUsr}/composer.lock &> /dev/null rm -f ${dscUsr}/composer.json &> /dev/null rm -f -r ${dscUsr}/vendor &> /dev/null rm -f -r ${dscUsr}/static/vendor &> /dev/null rm -f -r ${dscUsr}/.cache/composer &> /dev/null rm -f -r ${dscUsr}/.config/composer &> /dev/null rm -f -r ${dscUsr}/.composer &> /dev/null fi chmod 0440 ${dscUsr}/.drush/*.php &> /dev/null chmod 0400 ${dscUsr}/.drush/drushrc.php &> /dev/null chmod 0400 ${dscUsr}/.drush/hm.alias.drushrc.php &> /dev/null chmod 0400 ${dscUsr}/.drush/hostmaster*.php &> /dev/null chmod 0400 ${dscUsr}/.drush/platform_*.php &> /dev/null chmod 0400 ${dscUsr}/.drush/server_*.php &> /dev/null chmod 0710 ${dscUsr}/.drush &> /dev/null find ${dscUsr}/config/server_master \ -type d -exec chmod 0700 {} \; &> /dev/null find ${dscUsr}/config/server_master \ -type f -exec chmod 0600 {} \; &> /dev/null chmod +rx ${dscUsr}/config{,/server_master{,/nginx{,/passwords.d}}} &> /dev/null chmod +r ${dscUsr}/config/server_master/nginx/passwords.d/* &> /dev/null if [ ! -e "${dscUsr}/.tmp/.ctrl.${_X_SE}.pid" ]; then rm -rf ${dscUsr}/.drush/cache mkdir -p ${dscUsr}/.tmp touch ${dscUsr}/.tmp find ${dscUsr}/.tmp/ -mtime +0 -exec rm -rf {} \; &> /dev/null chown ${_USER}:${usrGroup} ${dscUsr}/.tmp &> /dev/null chmod 02755 ${dscUsr}/.tmp &> /dev/null echo OK > ${dscUsr}/.tmp/.ctrl.${_X_SE}.pid fi if [ ! -e "${dscUsr}/static/control/.ctrl.${_X_SE}.pid" ] \ && [ -e "/home/${_USER}.ftp/clients" ]; then mkdir -p ${dscUsr}/static/control chmod 755 ${dscUsr}/static/control if [ -e "/var/xdrago/conf/control-readme.txt" ]; then cp -af /var/xdrago/conf/control-readme.txt \ ${dscUsr}/static/control/README.txt &> /dev/null chmod 0644 ${dscUsr}/static/control/README.txt fi chown -R ${_USER}.ftp:${usrGroup} ${dscUsr}/static/control rm -f ${dscUsr}/static/control/.ctrl.* echo OK > ${dscUsr}/static/control/.ctrl.${_X_SE}.pid fi if [ -e "${dscUsr}/static/control/ssl-live-mode.info" ]; then if [ -e "${dscUsr}/tools/le/.ctrl/ssl-demo-mode.pid" ]; then rm -f ${dscUsr}/tools/le/.ctrl/ssl-demo-mode.pid fi fi if [ -e "/root/.${_USER}.octopus.cnf" ]; then source /root/.${_USER}.octopus.cnf fi _THIS_HM_PLR=$(cat ${dscUsr}/.drush/hostmaster.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) if [ -e "${_THIS_HM_PLR}/modules/path_alias_cache" ] \ && [ -x "/opt/tools/drush/8/drush/drush.php" ]; then if [ -x "/opt/php56/bin/php" ]; then echo 5.6 > ${dscUsr}/static/control/cli.info elif [ -x "/opt/php55/bin/php" ]; then echo 5.5 > ${dscUsr}/static/control/cli.info fi fi nrCheck= switch_php ### reload nginx service nginx reload &> /dev/null if [ -z ${nrCheck} ]; then if [ -z ${_PHP_SV} ]; then _PHP_SV=${_PHP_FPM_VERSION//[^0-9]/} if [ -z "${_PHP_SV}" ]; then _PHP_SV=74 fi fi if [ -f "${dscUsr}/static/control/multi-fpm.info" ]; then _PHP_M_V="82 81 80 74 73 72 71 70 56" for m in ${_PHP_M_V}; do if [ -x "/opt/php${m}/bin/php" ] \ && [ -e "/opt/php${m}/etc/pool.d/${_USER}.${m}.conf" ]; then switch_newrelic ${m} ${_USER}.${m} 1 fi done else if [ -x "/opt/php${_PHP_SV}/bin/php" ] \ && [ -e "/opt/php${_PHP_SV}/etc/pool.d/${_USER}.conf" ]; then switch_newrelic ${_PHP_SV} ${_USER} 1 fi fi fi site_socket_inc_gen if [ -e "${pthParentUsr}/clients" ] && [ ! -z ${_USER} ]; then echo Managing Users for ${pthParentUsr} Instance rm -rf ${pthParentUsr}/clients/admin &> /dev/null rm -rf ${pthParentUsr}/clients/omega8ccgmailcom &> /dev/null rm -rf ${pthParentUsr}/clients/nocomega8cc &> /dev/null rm -rf ${pthParentUsr}/clients/*/backups &> /dev/null symlinks -dr ${pthParentUsr}/clients &> /dev/null if [ -d "/home/${_USER}.ftp" ]; then disable_chattr ${_USER}.ftp symlinks -dr /home/${_USER}.ftp &> /dev/null echo >> ${_THIS_LTD_CONF} echo "[${_USER}.ftp]" >> ${_THIS_LTD_CONF} echo "path : ['${dscUsr}/distro', \ '${dscUsr}/static', \ '${dscUsr}/backups', \ '${dscUsr}/clients']" \ | fmt -su -w 2500 >> ${_THIS_LTD_CONF} manage_site_drush_alias_mirror manage_sec if [ -d "/home/${_USER}.ftp/clients" ]; then chown -R ${_USER}.ftp:${usrGroup} /home/${_USER}.ftp/users chmod 700 /home/${_USER}.ftp/users chmod 600 /home/${_USER}.ftp/users/* fi if [ ! -L "/home/${_USER}.ftp/static" ]; then rm -f /home/${_USER}.ftp/{backups,clients,static} ln -sf ${dscUsr}/backups /home/${_USER}.ftp/backups ln -sf ${dscUsr}/clients /home/${_USER}.ftp/clients ln -sf ${dscUsr}/static /home/${_USER}.ftp/static fi if [ ! -e "/home/${_USER}.ftp/.tmp/.ctrl.${_X_SE}.pid" ]; then rm -rf /home/${_USER}.ftp/.drush/cache rm -rf /home/${_USER}.ftp/.tmp mkdir -p /home/${_USER}.ftp/.tmp chown ${_USER}.ftp:${usrGroup} /home/${_USER}.ftp/.tmp &> /dev/null chmod 700 /home/${_USER}.ftp/.tmp &> /dev/null echo OK > /home/${_USER}.ftp/.tmp/.ctrl.${_X_SE}.pid fi enable_chattr ${_USER}.ftp echo Done for ${pthParentUsr} else echo Directory /home/${_USER}.ftp not available fi echo else echo Directory ${pthParentUsr}/clients not available fi echo fi done } # # Find correct IP. find_correct_ip() { if [ -e "/root/.found_correct_ipv4.cnf" ]; then _LOC_IP=$(cat /root/.found_correct_ipv4.cnf 2>&1) _LOC_IP=$(echo -n ${_LOC_IP} | tr -d "\n" 2>&1) else _LOC_IP=$(curl ${crlGet} https://api.ipify.org \ | sed 's/[^0-9\.]//g' 2>&1) if [ -z "${_LOC_IP}" ]; then _LOC_IP=$(curl ${crlGet} http://ipv4.icanhazip.com \ | sed 's/[^0-9\.]//g' 2>&1) fi if [ ! -z "${_LOC_IP}" ]; then echo ${_LOC_IP} > /root/.found_correct_ipv4.cnf fi fi } # # Restrict node if needed. fix_node_in_lshell_access() { pthLog="/var/xdrago/log" if [ -e "/etc/lshell.conf" ]; then PrTestPower=$(grep "POWER" /root/.*.octopus.cnf 2>&1) PrTestPhantom=$(grep "PHANTOM" /root/.*.octopus.cnf 2>&1) PrTestCluster=$(grep "CLUSTER" /root/.*.octopus.cnf 2>&1) if [[ "${PrTestPower}" =~ "POWER" ]] \ || [[ "${PrTestPhantom}" =~ "PHANTOM" ]] \ || [[ "${PrTestCluster}" =~ "CLUSTER" ]] \ || [ -e "/root/.allow.node.lshell.cnf" ]; then _ALLOW_NODE=YES else _ALLOW_NODE=NO sed -i "s/, 'node',/,/g" /etc/lshell.conf sed -i "s/, 'node',/,/g" /var/xdrago/conf/lshell.conf fi fi } ###-------------SYSTEM-----------------### if [ ! -e "/home/.ctrl.${_X_SE}.pid" ]; then chattr -i /home chmod 0711 /home chown root:root /home rm -f /home/.ctrl.* while IFS=':' read -r login pass uid gid uname homedir shell; do if [[ "${homedir}" = **/home/** ]]; then if [ -d "${homedir}" ]; then chattr -i ${homedir} chown ${uid}:${gid} ${homedir} &> /dev/null if [ -d "${homedir}/.ssh" ]; then chattr -i ${homedir}/.ssh chown -R ${uid}:${gid} ${homedir}/.ssh &> /dev/null fi if [ -d "${homedir}/.tmp" ]; then chattr -i ${homedir}/.tmp chown -R ${uid}:${gid} ${homedir}/.tmp &> /dev/null fi if [ -d "${homedir}/.drush" ]; then chattr +i ${homedir}/.drush/usr chattr +i ${homedir}/.drush/*.ini chattr +i ${homedir}/.drush fi if [[ ! "${login}" =~ ".ftp"($) ]] \ && [[ ! "${login}" =~ ".web"($) ]]; then chattr +i ${homedir} fi fi fi done < /etc/passwd touch /home/.ctrl.${_X_SE}.pid fi if [ ! -L "/usr/bin/MySecureShell" ] && [ -x "/usr/bin/mysecureshell" ]; then mv -f /usr/bin/MySecureShell /var/backups/legacy-MySecureShell-bin ln -sf /usr/bin/mysecureshell /usr/bin/MySecureShell fi _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/ltd/{conf,log,old} mkdir -p /var/backups/zombie/deleted _THIS_LTD_CONF="/var/backups/ltd/conf/lshell.conf.${_NOW}" if [ -e "/var/run/manage_rvm_users.pid" ] \ || [ -e "/var/run/manage_ltd_users.pid" ] \ || [ -e "/var/run/boa_run.pid" ] \ || [ -e "/var/run/boa_wait.pid" ]; then touch /var/xdrago/log/wait-manage-ltd-users.pid echo "Another BOA task is running, we have to wait" sleep 10 exit 0 elif [ ! -e "/var/xdrago/conf/lshell.conf" ]; then echo "Missing /var/xdrago/conf/lshell.conf template" exit 0 else touch /var/run/manage_ltd_users.pid count_cpu find_fast_mirror find /etc/[a-z]*\.lock -maxdepth 1 -type f -exec rm -rf {} \; &> /dev/null if [ ! -e "${pthLog}/node.manage.lshell.ctrl.${_X_SE}.pid" ]; then fix_node_in_lshell_access touch ${pthLog}/node.manage.lshell.ctrl.${_X_SE}.pid fi cat /var/xdrago/conf/lshell.conf > ${_THIS_LTD_CONF} find_correct_ip sed -i "s/1.1.1.1/${_LOC_IP}/g" ${_THIS_LTD_CONF} wait if [ ! -e "/root/.allow.mc.cnf" ]; then sed -i "s/'mc', //g" ${_THIS_LTD_CONF} wait sed -i "s/, 'mc':'mc -u'//g" ${_THIS_LTD_CONF} wait fi add_ltd_group_if_not_exists kill_zombies >/var/backups/ltd/log/zombies-${_NOW}.log 2>&1 manage_user >/var/backups/ltd/log/users-${_NOW}.log 2>&1 if [ -e "${_THIS_LTD_CONF}" ]; then _DIFF_T=$(diff -w -B ${_THIS_LTD_CONF} /etc/lshell.conf 2>&1) if [ ! -z "${_DIFF_T}" ]; then cp -af /etc/lshell.conf /var/backups/ltd/old/lshell.conf-before-${_NOW} cp -af ${_THIS_LTD_CONF} /etc/lshell.conf else rm -f ${_THIS_LTD_CONF} fi fi if [ -L "/bin/sh" ]; then _WEB_SH=$(readlink -n /bin/sh 2>&1) _WEB_SH=$(echo -n ${_WEB_SH} | tr -d "\n" 2>&1) if [ -x "/bin/websh" ]; then if [ "${_WEB_SH}" != "/bin/websh" ] \ && [ ! -e "/root/.dbhd.clstr.cnf" ]; then rm -f /bin/sh ln -s /bin/websh /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/websh /usr/bin/sh fi fi else if [ -x "/bin/dash" ]; then if [ "${_WEB_SH}" != "/bin/dash" ]; then rm -f /bin/sh ln -s /bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/dash /usr/bin/sh fi fi elif [ -x "/usr/bin/dash" ]; then if [ "${_WEB_SH}" != "/usr/bin/dash" ]; then rm -f /bin/sh ln -s /usr/bin/dash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/dash /usr/bin/sh fi fi elif [ -x "/bin/bash" ]; then if [ "${_WEB_SH}" != "/bin/bash" ]; then rm -f /bin/sh ln -s /bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /bin/bash /usr/bin/sh fi fi elif [ -x "/usr/bin/bash" ]; then if [ "${_WEB_SH}" != "/usr/bin/bash" ]; then rm -f /bin/sh ln -s /usr/bin/bash /bin/sh if [ -e "/usr/bin/sh" ]; then rm -f /usr/bin/sh ln -s /usr/bin/bash /usr/bin/sh fi fi fi curl -s -A iCab "${urlHmr}/helpers/websh.sh.txt" -o /bin/websh chmod 755 /bin/websh fi fi rm -f ${_TMP}/*.txt if [ ! -e "/root/.home.no.wildcard.chmod.cnf" ]; then chmod 700 /home/* &> /dev/null fi chmod 0600 /var/log/lsh/* chmod 0440 /var/aegir/.drush/*.php &> /dev/null chmod 0400 /var/aegir/.drush/drushrc.php &> /dev/null chmod 0400 /var/aegir/.drush/hm.alias.drushrc.php &> /dev/null chmod 0400 /var/aegir/.drush/hostmaster*.php &> /dev/null chmod 0400 /var/aegir/.drush/platform_*.php &> /dev/null chmod 0400 /var/aegir/.drush/server_*.php &> /dev/null chmod 0710 /var/aegir/.drush &> /dev/null find /var/aegir/config/server_master \ -type d -exec chmod 0700 {} \; &> /dev/null find /var/aegir/config/server_master \ -type f -exec chmod 0600 {} \; &> /dev/null if [ -e "/var/scout" ]; then _SCOUT_CRON_OFF=$(grep "OFFscoutOFF" /etc/crontab 2>&1) if [[ "${_SCOUT_CRON_OFF}" =~ "OFFscoutOFF" ]]; then sleep 5 sed -i "s/OFFscoutOFF/scout/g" /etc/crontab &> /dev/null wait fi fi if [ -e "/var/backups/reports/up/barracuda" ]; then if [ -e "/root/.mstr.clstr.cnf" ] \ || [ -e "/root/.wbhd.clstr.cnf" ] \ || [ -e "/root/.dbhd.clstr.cnf" ]; then if [ -e "/var/spool/cron/crontabs/aegir" ]; then sleep 180 rm -f /var/spool/cron/crontabs/aegir ionice -c2 -n0 -p $$ service cron reload &> /dev/null fi fi if [ -e "/root/.mstr.clstr.cnf" ] \ || [ -e "/root/.wbhd.clstr.cnf" ]; then if [ ! -e "/root/.remote.db.cnf" ] \ && [ ! -e "/root/.dbhd.clstr.cnf" ]; then touch /root/.remote.db.cnf fi if [ -e "/var/run/mysqld/mysqld.pid" ] \ && [ ! -e "/root/.dbhd.clstr.cnf" ]; then ionice -c2 -n0 -p $$ service cron stop &> /dev/null sleep 180 touch /root/.remote.db.cnf if [ "${_DB_SERIES}" = "10.4" ] \ || [ "${_DB_SERIES}" = "10.3" ] \ || [ "${_DB_SERIES}" = "10.2" ] \ || [ "${_DB_SERIES}" = "5.7" ]; then _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) mysql -u root -e "SET GLOBAL innodb_max_dirty_pages_pct = 0;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_change_buffering = 'none';" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity = 2000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity_max = 4000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_pct = 100;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_now = ON;" &> /dev/null fi service mysql stop &> /dev/null sleep 5 service cron start &> /dev/null fi fi fi sleep 5 rm -f /var/run/manage_ltd_users.pid exit 0 fi ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/manage_solr_config.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi _X_SE="414prodT66" _WEBG=www-data _OSR=$(lsb_release -sc 2>&1) _SSL_ITD=$(openssl version 2>&1 \ | tr -d "\n" \ | cut -d" " -f2 \ | awk '{ print $1}') if [[ "${_SSL_ITD}" =~ "3.1." ]] \ || [[ "${_SSL_ITD}" =~ "1.1.1" ]] \ || [[ "${_SSL_ITD}" =~ "1.1.0" ]] \ || [[ "${_SSL_ITD}" =~ "1.0.2" ]] \ || [[ "${_SSL_ITD}" =~ "1.0.1" ]]; then _NEW_SSL=YES fi crlGet="-L --max-redirs 10 -k -s --retry 10 --retry-delay 5 -A iCab" aptYesUnth="-y --allow-unauthenticated" vSet="variable-set --always-set" ###-------------SYSTEM-----------------### check_config_diff() { # $1 is template path # $2 is a path to core config preCnf="$1" slrCnf="$2" if [ -f "${preCnf}" ] && [ -f "${slrCnf}" ]; then slrCnfUpdate=NO diffMyTest=$(diff -w -B ${slrCnf} ${preCnf} 2>&1) if [ -z "${diffMyTest}" ]; then slrCnfUpdate="" echo "INFO: ${slrCnf} diff0 empty -- nothing to update" else slrCnfUpdate=YES # diffMyTest=$(echo -n ${diffMyTest} | fmt -su -w 2500 2>&1) echo "INFO: ${slrCnf} diff1 ${diffMyTest}" fi fi } write_solr_config() { # ${1} is module # ${2} is a path to solr.php # ${3} is Jetty/Solr version if [ ! -z "${1}" ] \ && [ ! -z "${2}" ] \ && [ ! -z "${3}" ] \ && [ ! -z "${SolrCoreID}" ] \ && [ -e "${Dir}" ]; then if [ "${3}" = "solr7" ]; then _PRT="9077" _VRS="7.7.3" else _PRT="8099" _VRS="4.9.1" fi echo "Your SOLR core access details for ${Dom} site are as follows:" > ${2} echo >> ${2} echo " Drupal 8 and newer" >> ${2} echo " Solr version .....: ${_VRS}" >> ${2} echo " Solr host ........: 127.0.0.1" >> ${2} echo " Solr port ........: ${_PRT}" >> ${2} echo " Solr path ........: leave empty" >> ${2} echo " Solr core ........: ${SolrCoreID}" >> ${2} echo >> ${2} echo " Don't forget to manually upload the configuration files" >> ${2} echo " (schema.xml, solrconfig.xml) under ${Dom}/files/solr" >> ${2} echo >> ${2} echo " Drupal 7:" >> ${2} echo " Solr version .....: ${_VRS}" >> ${2} echo " Solr host ........: 127.0.0.1" >> ${2} echo " Solr port ........: ${_PRT}" >> ${2} echo " Solr path ........: /solr/${SolrCoreID}" >> ${2} echo >> ${2} echo "It has been auto-configured to work with latest version" >> ${2} echo "of ${1} module, but you need to add the module to" >> ${2} echo "your site codebase before you will be able to use Solr." >> ${2} echo >> ${2} echo "To learn more please make sure to check the module docs at:" >> ${2} echo >> ${2} echo "https://drupal.org/project/${1}" >> ${2} chown ${_HM_U}:users ${2} &> /dev/null chmod 440 ${2} &> /dev/null fi } reload_core_cnf() { # ${1} is solr server port # ${2} is solr core name # Example: reload_core_cnf 9077 ${SolrCoreID} # Example: reload_core_cnf 8099 ${SolrCoreID} curl "http://127.0.0.1:${1}/solr/admin/cores?action=RELOAD&core=${2}" &> /dev/null echo "Reloaded Solr core ${2} cnf on port ${1}" sleep 3 } update_solr() { # ${1} is module # ${2} is solr core path (auto) == _SOLR_DIR _SERV="solr7" if [ ! -z "${1}" ] && [ -e "/data/conf/solr" ]; then if [ "${1}" = "apachesolr" ]; then _SERV="jetty9" if [ -e "${Plr}/modules/o_contrib_seven" ]; then if [ ! -e "${2}/conf/.protected.conf" ] && [ -e "${2}/conf" ]; then slrCnfUpdate="" check_config_diff "/data/conf/solr/apachesolr/7/schema.xml" "${2}/conf/schema.xml" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/apachesolr/7/* ${2}/conf/ chmod 644 ${2}/conf/* chown jetty9:jetty9 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi fi elif [ -e "${Plr}/modules/o_contrib" ]; then if [ ! -e "${2}/conf/.protected.conf" ] && [ -e "${2}/conf" ]; then slrCnfUpdate="" check_config_diff "/data/conf/solr/apachesolr/6/schema.xml" "${2}/conf/schema.xml" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/apachesolr/6/* ${2}/conf/ chmod 644 ${2}/conf/* chown jetty9:jetty9 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi fi fi elif [ "${1}" = "search_api_solr" ] \ && [ -e "${Plr}/modules/o_contrib_seven" ]; then if [ ! -e "${2}/conf/.protected.conf" ] && [ -e "${2}/conf" ]; then check_config_diff "/data/conf/solr/search_api_solr/7/schema.xml" "${2}/conf/schema.xml" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/search_api_solr/7/* ${2}/conf/ chmod 644 ${2}/conf/* chown solr7:solr7 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi check_config_diff "/data/conf/solr/search_api_solr/7/solrcore.properties" "${2}/conf/solrcore.properties" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/search_api_solr/7/* ${2}/conf/ chmod 644 ${2}/conf/* chown solr7:solr7 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi fi elif [ "${1}" = "search_api_solr" ] \ && [ -e "${Plr}/sites/${Dom}/files/solr/schema.xml" ] \ && [ -e "${Plr}/sites/${Dom}/files/solr/solrconfig.xml" ] \ && [ -e "${Plr}/sites/${Dom}/files/solr/solrcore.properties" ]; then if [ ! -e "${2}/conf/.protected.conf" ] && [ -e "${2}/conf" ]; then check_config_diff "${Plr}/sites/${Dom}/files/solr/schema.xml" "${2}/conf/schema.xml" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af ${Plr}/sites/${Dom}/files/solr/* ${2}/conf/ chmod 644 ${2}/conf/* chown solr7:solr7 ${2}/conf/* rm -f ${Plr}/sites/${Dom}/files/solr/* touch ${2}/conf/.yes-custom.txt touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi fi elif [ "${1}" = "search_api_solr" ] \ && [ ! -e "${Plr}/sites/${Dom}/files/solr/schema.xml" ]; then if [ ! -e "${2}/conf/.protected.conf" ] \ && [ ! -e "${2}/conf/.yes-custom.txt" ] \ && [ -e "${2}/conf" ]; then check_config_diff "/data/conf/solr/search_api_solr/8/schema.xml" "${2}/conf/schema.xml" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/search_api_solr/8/* ${2}/conf/ chmod 644 ${2}/conf/* chown solr7:solr7 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi check_config_diff "/data/conf/solr/search_api_solr/8/solrcore.properties" "${2}/conf/solrcore.properties" if [ ! -z "${slrCnfUpdate}" ]; then rm -f ${2}/conf/* cp -af /data/conf/solr/search_api_solr/8/* ${2}/conf/ chmod 644 ${2}/conf/* chown solr7:solr7 ${2}/conf/* touch ${2}/conf/.just-updated.pid else rm -f ${2}/conf/.just-updated.pid rm -f ${2}/conf/.yes-update.txt fi fi fi fiLe="${Dir}/solr.php" echo "Info file for ${Dom} is ${fiLe}" echo "Info _SERV is ${_SERV}" _SOLR_CONFIG_INFO_UPDATE=NO if [ -e "${fiLe}" ]; then _SOLR_CONFIG_INFO_TEST=$(grep "${SolrCoreID}" ${fiLe} 2>&1) if [[ ! "${_SOLR_CONFIG_INFO_TEST}" =~ "${SolrCoreID}" ]]; then _SOLR_CONFIG_INFO_UPDATE=YES fi fi if [ ! -e "${fiLe}" ] \ || [ "${_SOLR_CONFIG_INFO_UPDATE}" = "YES" ] \ || [ -e "${2}/conf/.just-updated.pid" ]; then if [[ "${2}" =~ "/opt/solr4" ]] && [ ! -z "${_SERV}" ]; then write_solr_config ${1} ${fiLe} ${_SERV} echo "Updated ${fiLe} with ${2} details" touch ${2}/conf/${_X_SE}.conf reload_core_cnf 8099 ${SolrCoreID} elif [[ "${2}" =~ "/var/solr7/data" ]] && [ ! -z "${_SERV}" ]; then write_solr_config ${1} ${fiLe} ${_SERV} echo "Updated ${fiLe} with ${2} details" touch ${2}/conf/${_X_SE}.conf reload_core_cnf 9077 ${SolrCoreID} fi fi fi } add_solr() { # ${1} is module # ${2} is solr core path if [ "${1}" = "apachesolr" ]; then _SOLR_BASE="/opt/solr4" elif [ "${1}" = "search_api_solr" ]; then _SOLR_BASE="/var/solr7/data" fi if [ ! -z "${1}" ] && [ ! -z "${2}" ] && [ -e "/data/conf/solr" ]; then if [ ! -e "${2}" ]; then if [ "${_SOLR_BASE}" = "/var/solr7/data" ] \ && [ -x "/opt/solr7/bin/solr" ] \ && [ -e "/var/solr7/data/solr.xml" ]; then if [ -e "${Plr}/modules/o_contrib_eight" ] \ || [ -e "${Plr}/modules/o_contrib_nine" ] \ || [ -e "${Plr}/modules/o_contrib_ten" ]; then su -s /bin/bash - solr7 -c "/opt/solr7/bin/solr create_core -p 9077 -c ${SolrCoreID} -d /data/conf/solr/search_api_solr/8" elif [ -e "${Plr}/modules/o_contrib_seven" ]; then su -s /bin/bash - solr7 -c "/opt/solr7/bin/solr create_core -p 9077 -c ${SolrCoreID} -d /data/conf/solr/search_api_solr/7" else echo "The search_api_solr is supported only for Drupal 7 and newer!" fi else rm -rf ${_SOLR_BASE}/core0/data/* cp -a ${_SOLR_BASE}/core0 ${2} sed -i "s/.*name=\"${LegacySolrCoreID}\".*//g" ${_SOLR_BASE}/solr.xml wait sed -i "s/.*name=\"${OldSolrCoreID}\".*//g" ${_SOLR_BASE}/solr.xml wait sed -i "s/.*<core name=\"core0\" instanceDir=\"core0\" \/>.*/<core name=\"core0\" instanceDir=\"core0\" \/>\n<core name=\"${SolrCoreID}\" instanceDir=\"${SolrCoreID}\" \/>\n/g" ${_SOLR_BASE}/solr.xml wait sed -i "/^$/d" ${_SOLR_BASE}/solr.xml &> /dev/null wait if [[ "${_SOLR_BASE}" =~ "/opt/solr4" ]]; then kill -9 $(ps aux | grep '[j]etty9' | awk '{print $2}') &> /dev/null service jetty9 start &> /dev/null fi fi echo "New Solr with ${1} for ${2} added" fi update_solr "${1}" "${2}" fi } delete_solr() { # ${1} is solr core path if [[ "${1}" =~ "solr4" ]]; then _SOLR_BASE="/opt/solr4" elif [[ "${1}" =~ "solr7" ]]; then _SOLR_BASE="/var/solr7/data" fi if [ ! -z "${1}" ] && [ -e "/data/conf/solr" ] && [ -e "${1}/conf" ]; then if [ "${_SOLR_BASE}" = "/var/solr7/data" ] \ && [ -x "/opt/solr7/bin/solr" ] \ && [ -e "/var/solr7/data/solr.xml" ]; then if [ -e "${_SOLR_BASE}/${SolrCoreID}" ]; then su -s /bin/bash - solr7 -c "/opt/solr7/bin/solr delete -p 9077 -c ${SolrCoreID}" sleep 3 fi if [ -e "${_SOLR_BASE}/${OldSolrCoreID}" ]; then su -s /bin/bash - solr7 -c "/opt/solr7/bin/solr delete -p 9077 -c ${OldSolrCoreID}" sleep 3 fi if [ -e "${_SOLR_BASE}/${LegacySolrCoreID}" ]; then su -s /bin/bash - solr7 -c "/opt/solr7/bin/solr delete -p 9077 -c ${LegacySolrCoreID}" sleep 3 fi rm -f ${Dir}/solr.php else sed -i "s/.*instanceDir=\"${SolrCoreID}\".*//g" ${_SOLR_BASE}/solr.xml wait sed -i "s/.*name=\"${LegacySolrCoreID}\".*//g" ${_SOLR_BASE}/solr.xml wait sed -i "s/.*name=\"${OldSolrCoreID}\".*//g" ${_SOLR_BASE}/solr.xml wait sed -i "/^$/d" ${_SOLR_BASE}/solr.xml &> /dev/null wait rm -rf ${1} rm -f ${Dir}/solr.php if [[ "${_SOLR_BASE}" =~ "/opt/solr4" ]]; then kill -9 $(ps aux | grep '[j]etty9' | awk '{print $2}') &> /dev/null service jetty9 start &> /dev/null fi fi echo "Deleted Solr core in ${1}" fi } check_solr() { # ${1} is module # ${2} is solr core path if [ ! -z "${1}" ] && [ ! -z "${2}" ] && [ -e "/data/conf/solr" ]; then echo "Checking Solr with ${1} for ${2}" if [ ! -e "${2}" ]; then add_solr "${1}" "${2}" else update_solr "${1}" "${2}" fi fi } setup_solr() { if [ -e "/data/conf/default.boa_site_control.ini" ] \ && [ ! -e "${_DIR_CTRL_F}" ]; then cp -af /data/conf/default.boa_site_control.ini ${_DIR_CTRL_F} &> /dev/null chown ${_HM_U}:users ${_DIR_CTRL_F} &> /dev/null chmod 0664 ${_DIR_CTRL_F} &> /dev/null fi ### ### Support for solr_integration_module directive ### if [ -e "${_DIR_CTRL_F}" ]; then _SOLR_MODULE="your_module_name_here" _SOLR_IM_PT=$(grep "solr_integration_module" ${_DIR_CTRL_F} 2>&1) if [[ "${_SOLR_IM_PT}" =~ "solr_integration_module" ]]; then _DO_NOTHING=YES else echo ";solr_integration_module = your_module_name_here" >> ${_DIR_CTRL_F} fi _ASOLR_T=$(grep "^solr_integration_module = apachesolr" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_ASOLR_T}" =~ "apachesolr" ]]; then _SOLR_MODULE="apachesolr" fi _SAPI_SOLR_T=$(grep "^solr_integration_module = search_api_solr" \ ${_DIR_CTRL_F} 2>&1) if [[ "${_SAPI_SOLR_T}" =~ "search_api_solr" ]]; then _SOLR_MODULE="search_api_solr" fi if [ "${_SOLR_MODULE}" = "apachesolr" ]; then _SOLR_BASE="/opt/solr4" elif [ "${_SOLR_MODULE}" = "search_api_solr" ]; then _SOLR_BASE="/var/solr7/data" fi _SOLR_DIR="${_SOLR_BASE}/${SolrCoreID}" if [ "${_SOLR_MODULE}" = "search_api_solr" ] \ || [ "${_SOLR_MODULE}" = "apachesolr" ]; then check_solr "${_SOLR_MODULE}" "${_SOLR_DIR}" else _SOLR_DIR_DEL="/opt/solr4/${SolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" _SOLR_DIR_DEL="/var/solr7/data/${SolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" _SOLR_DIR_DEL="/opt/solr4/${LegacySolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" _SOLR_DIR_DEL="/var/solr7/data/${LegacySolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" _SOLR_DIR_DEL="/opt/solr4/${OldSolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" _SOLR_DIR_DEL="/var/solr7/data/${OldSolrCoreID}" delete_solr "${_SOLR_DIR_DEL}" fi fi ### ### Support for solr_custom_config directive ### if [ -e "${_DIR_CTRL_F}" ]; then _SLR_CM_CFG_P=$(grep "solr_custom_config" ${_DIR_CTRL_F} 2>&1) if [[ "${_SLR_CM_CFG_P}" =~ "solr_custom_config" ]]; then _DO_NOTHING=YES else echo ";solr_custom_config = NO" >> ${_DIR_CTRL_F} fi _SLR_CM_CFG_RT=NO _SOLR_PROTECT_CTRL="${_SOLR_DIR}/conf/.protected.conf" _SLR_CM_CFG_T=$(grep "^solr_custom_config = YES" ${_DIR_CTRL_F} 2>&1) if [[ "${_SLR_CM_CFG_T}" =~ "solr_custom_config = YES" ]]; then _SLR_CM_CFG_RT=YES if [ ! -e "${_SOLR_PROTECT_CTRL}" ]; then touch ${_SOLR_PROTECT_CTRL} fi echo "Solr config for ${_SOLR_DIR} is protected" else if [ -e "${_SOLR_PROTECT_CTRL}" ]; then rm -f ${_SOLR_PROTECT_CTRL} fi fi fi ### ### Support for solr_update_config directive ### if [ -e "${_DIR_CTRL_F}" ]; then _SOLR_UP_CFG_PT=$(grep "solr_update_config" ${_DIR_CTRL_F} 2>&1) if [[ "${_SOLR_UP_CFG_PT}" =~ "solr_update_config" ]]; then _DO_NOTHING=YES else echo ";solr_update_config = NO" >> ${_DIR_CTRL_F} fi _SOLR_UP_CFG_TT=$(grep "^solr_update_config = YES" ${_DIR_CTRL_F} 2>&1) if [[ "${_SOLR_UP_CFG_TT}" =~ "solr_update_config = YES" ]]; then if [ "${_SLR_CM_CFG_RT}" = "NO" ] \ && [ ! -e "${_SOLR_PROTECT_CTRL}" ]; then update_solr "${_SOLR_MODULE}" "${_SOLR_DIR}" fi fi fi } proceed_solr() { if [ ! -z "${Dan}" ] \ && [ "${Dan}" != "hostmaster" ]; then CoreID="${Dan}.${_HM_U}" CoreHS=$(echo ${CoreID} \ | openssl md5 \ | awk '{ print $2}' \ | tr -d "\n" 2>&1) #SolrCoreID="${_HM_U}-${Dan}-${CoreHS}" LegacySolrCoreID="${_HM_U}.${Dan}" OldSolrCoreID="solr.${_HM_U}.${Dan}" SolrCoreID="oct.${_HM_U}.${Dan}" setup_solr fi } check_sites_list() { for Site in `find ${User}/config/server_master/nginx/vhost.d \ -maxdepth 1 -mindepth 1 -type f | sort`; do _MOMENT=$(date +%y%m%d-%H%M%S 2>&1) echo ${_MOMENT} Start Checking Site $Site Dom=$(echo $Site | cut -d'/' -f9 | awk '{ print $1}' 2>&1) if [ -e "${User}/config/server_master/nginx/vhost.d/${Dom}" ]; then Plx=$(cat ${User}/config/server_master/nginx/vhost.d/${Dom} \ | grep "root " \ | cut -d: -f2 \ | awk '{ print $2}' \ | sed "s/[\;]//g" 2>&1) if [[ "$Plx" =~ "aegir/distro" ]]; then Dan="hostmaster" else Dan="${Dom}" fi fi _STATUS_DISABLED=NO _STATUS_TEST=$(grep "Do not reveal Aegir front-end URL here" \ ${User}/config/server_master/nginx/vhost.d/${Dom} 2>&1) if [[ "${_STATUS_TEST}" =~ "Do not reveal Aegir front-end URL here" ]]; then _STATUS_DISABLED=YES echo "${Dom} site is DISABLED" fi if [ -e "${User}/.drush/${Dan}.alias.drushrc.php" ] \ && [ "${_STATUS_DISABLED}" = "NO" ]; then Dir=$(cat ${User}/.drush/${Dan}.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _DIR_CTRL_F="${Dir}/modules/boa_site_control.ini" Plr=$(cat ${User}/.drush/${Dan}.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _PLR_CTRL_F="${Plr}/sites/all/modules/boa_platform_control.ini" proceed_solr fi done } count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi echo ${_CPU_NR} > /data/all/cpuinfo chmod 644 /data/all/cpuinfo &> /dev/null } load_control() { if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_MAX_RATIO=${_CPU_MAX_RATIO//[^0-9]/} fi if [ -z "${_CPU_MAX_RATIO}" ]; then _CPU_MAX_RATIO=6 fi _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) _O_LOAD=$(( _O_LOAD / _CPU_NR )) _O_LOAD_MAX=$(( 100 * _CPU_MAX_RATIO )) } fix_solr7_cnf() { if [ -x "/etc/init.d/solr7" ] && [ -e "/var/solr7/logs" ]; then _IF_RESTART_SOLR=NO for pRp in `find /var/solr7/data/oct.*/conf/solrcore.properties -maxdepth 1 | sort`; do if [ -e "${pRp}" ]; then _PRP_TEST_ID=$(grep "solr7" ${pRp} 2>&1) if [[ ! "${_PRP_TEST_ID}" =~ "solr7" ]]; then sed -i "s/^solr\.install\.dir.*//g" ${pRp} sed -i "s/^solr\.contrib\.dir.*//g" ${pRp} echo "solr.install.dir=/opt/solr7" >> ${pRp} sed -i "/^$/d" ${pRp} echo "Fixed ${pRp}" _IF_RESTART_SOLR=YES fi fi done pRp="/var/xdrago/conf/solr/search_api_solr/7/solrcore.properties" if [ -e "${pRp}" ]; then _PRP_TEST_ID=$(grep "solr7" ${pRp} 2>&1) if [[ ! "${_PRP_TEST_ID}" =~ "solr7" ]]; then sed -i "s/^solr\.install\.dir.*//g" ${pRp} sed -i "s/^solr\.contrib\.dir.*//g" ${pRp} echo "solr.install.dir=/opt/solr7" >> ${pRp} sed -i "/^$/d" ${pRp} echo "Fixed ${pRp}" _IF_RESTART_SOLR=YES fi fi pRp="/var/xdrago/conf/solr/search_api_solr/8/solrcore.properties" if [ -e "${pRp}" ]; then _PRP_TEST_ID=$(grep "solr7" ${pRp} 2>&1) if [[ ! "${_PRP_TEST_ID}" =~ "solr7" ]]; then sed -i "s/^solr\.install\.dir.*//g" ${pRp} sed -i "s/^solr\.contrib\.dir.*//g" ${pRp} echo "solr.install.dir=/opt/solr7" >> ${pRp} sed -i "/^$/d" ${pRp} echo "Fixed ${pRp}" _IF_RESTART_SOLR=YES fi fi pRp="/data/conf/solr/search_api_solr/7/solrcore.properties" if [ -e "${pRp}" ]; then _PRP_TEST_ID=$(grep "solr7" ${pRp} 2>&1) if [[ ! "${_PRP_TEST_ID}" =~ "solr7" ]]; then sed -i "s/^solr\.install\.dir.*//g" ${pRp} sed -i "s/^solr\.contrib\.dir.*//g" ${pRp} echo "solr.install.dir=/opt/solr7" >> ${pRp} sed -i "/^$/d" ${pRp} echo "Fixed ${pRp}" _IF_RESTART_SOLR=YES fi fi pRp="/data/conf/solr/search_api_solr/8/solrcore.properties" if [ -e "${pRp}" ]; then _PRP_TEST_ID=$(grep "solr7" ${pRp} 2>&1) if [[ ! "${_PRP_TEST_ID}" =~ "solr7" ]]; then sed -i "s/^solr\.install\.dir.*//g" ${pRp} sed -i "s/^solr\.contrib\.dir.*//g" ${pRp} echo "solr.install.dir=/opt/solr7" >> ${pRp} sed -i "/^$/d" ${pRp} echo "Fixed ${pRp}" _IF_RESTART_SOLR=YES fi fi rStart="/var/solr7/logs/.restarted_fix_solr7_cnf.txt" if [ "${_IF_RESTART_SOLR}" = "YES" ] \ || [ ! -e "${rStart}" ]; then echo "Restarting Solr 7..." service solr7 restart touch ${rStart} fi fi } start_up() { fix_solr7_cnf if [ -d "/var/xdrago/conf/solr/search_api_solr/8" ]; then baseCpy="/var/xdrago/conf/solr/search_api_solr/8/schema.xml" liveCpy="/data/conf/solr/search_api_solr/8/schema.xml" check_config_diff "${baseCpy}" "${liveCpy}" if [ ! -e "/data/conf/solr/search_api_solr/8/solrconfig_extra.xml" ] \ || [ ! -e "/data/conf/solr/.ctrl.${_X_SE}.pid" ] \ || [ ! -z "${slrCnfUpdate}" ]; then rm -rf /data/conf/solr cp -af /var/xdrago/conf/solr /data/conf/ rm -f /data/conf/solr/.ctrl* touch /data/conf/solr/.ctrl.${_X_SE}.pid fi fi if [ -d "/var/xdrago/conf/solr/search_api_solr/7" ]; then baseCpy="/var/xdrago/conf/solr/search_api_solr/7/schema.xml" liveCpy="/data/conf/solr/search_api_solr/7/schema.xml" check_config_diff "${baseCpy}" "${liveCpy}" if [ ! -e "/data/conf/solr/search_api_solr/7/solrconfig_extra.xml" ] \ || [ ! -e "/data/conf/solr/.ctrl.${_X_SE}.pid" ] \ || [ ! -z "${slrCnfUpdate}" ]; then rm -rf /data/conf/solr cp -af /var/xdrago/conf/solr /data/conf/ rm -f /data/conf/solr/.ctrl* touch /data/conf/solr/.ctrl.${_X_SE}.pid fi fi if [ -d "/var/xdrago/conf/solr/apachesolr/7" ]; then baseCpy="/var/xdrago/conf/solr/apachesolr/7/schema.xml" liveCpy="/data/conf/solr/apachesolr/7/schema.xml" check_config_diff "${baseCpy}" "${liveCpy}" if [ ! -e "/data/conf/solr/apachesolr/7/solrconfig_extra.xml" ] \ || [ ! -e "/data/conf/solr/.ctrl.${_X_SE}.pid" ] \ || [ ! -z "${slrCnfUpdate}" ]; then rm -rf /data/conf/solr cp -af /var/xdrago/conf/solr /data/conf/ rm -f /data/conf/solr/.ctrl* touch /data/conf/solr/.ctrl.${_X_SE}.pid fi fi for User in `find /data/disk/ -maxdepth 1 -mindepth 1 | sort`; do count_cpu load_control if [ -e "${User}/config/server_master/nginx/vhost.d" ] \ && [ ! -e "${User}/log/proxied.pid" ] \ && [ ! -e "${User}/log/CANCELLED" ]; then if [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ]; then _HM_U=$(echo ${User} | cut -d'/' -f4 | awk '{ print $1}' 2>&1) _THIS_HM_SITE=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) echo "load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX}" echo "User ${User}" mkdir -p ${User}/log/ctrl if [ -e "/root/.${_HM_U}.octopus.cnf" ]; then source /root/.${_HM_U}.octopus.cnf _MY_EMAIL=${_MY_EMAIL//\\\@/\@} fi check_sites_list fi fi done } _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/solr/log find /var/backups/solr/*/* -mtime +0 -type f -exec rm -rf {} \; &> /dev/null start_up >/var/backups/solr/log/solr-${_NOW}.log 2>&1 exit 0
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/minute.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} pthOml="/var/xdrago/log/oom.incident.log" check_root() { if [ `whoami` = "root" ]; then if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi } check_root if [ -e "/root/.step.init.systemd.two.cnf" ]; then kill -9 $(ps aux | grep '[s]ystemd-udevd' | awk '{print $2}') &> /dev/null fi sql_restart() { touch /var/run/boa_run.pid echo "$(date 2>&1) $1 incident detected" >> ${pthOml} sleep 5 echo "$(date 2>&1) $1 incident response started" >> ${pthOml} kill -9 $(ps aux | grep '[w]khtmltopdf' | awk '{print $2}') killall sleep &> /dev/null killall php bash /var/xdrago/move_sql.sh wait echo "$(date 2>&1) $1 incident mysql restarted" >> ${pthOml} echo "$(date 2>&1) $1 incident response completed" >> ${pthOml} echo >> ${pthOml} sleep 5 rm -f /var/run/boa_run.pid exit 0 } if [ -e "/var/log/daemon.log" ]; then if [ `tail --lines=10 /var/log/daemon.log \ | grep --count "Too many connections"` -gt "0" ]; then sql_restart "BUSY" fi fi _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) if [ ! -z "${_IS_MYSQLD_RUNNING}" ] && [ ! -z "${_SQL_PSWD}" ]; then _MYSQL_CONN_TEST=$(mysql -u root -e "status" 2>&1) echo _MYSQL_CONN_TEST ${_MYSQL_CONN_TEST} if [[ "${_MYSQL_CONN_TEST}" =~ "Too many connections" ]]; then sql_restart "BUSY" fi fi if [ -e "/var/lib/mysql/ibtmp1" ] && [ ! -e "/var/run/boa_run.pid" ]; then _SQL_TEMP_SIZE_TEST=$(du -s -h /var/lib/mysql/ibtmp1) if [[ "${_SQL_TEMP_SIZE_TEST}" =~ "G" ]]; then echo ${_SQL_TEMP_SIZE_TEST} too big echo SQL restart forced echo "$(date 2>&1) ${_SQL_TEMP_SIZE_TEST} too big, SQL restart forced" >> \ /var/xdrago/log/giant.ibtmp1.incident.log sql_restart "BIGTMP" fi fi if [ -e "/etc/cron.daily/logrotate" ]; then _SYSLOG_SIZE_TEST=$(du -s -h /var/log/syslog) if [[ "${_SYSLOG_SIZE_TEST}" =~ "G" ]]; then echo ${_SYSLOG_SIZE_TEST} too big bash /etc/cron.daily/logrotate &> /dev/null echo system logs rotated echo "$(date 2>&1) ${_SYSLOG_SIZE_TEST} too big, logrotate forced" >> \ /var/xdrago/log/giant.syslog.incident.log fi fi check_pdnsd() { if [ -x "/usr/sbin/pdnsd" ] \ && [ ! -e "/etc/resolvconf/run/interface/lo.pdnsd" ]; then mkdir -p /etc/resolvconf/run/interface echo "nameserver 127.0.0.1" > /etc/resolvconf/run/interface/lo.pdnsd resolvconf -u &> /dev/null service pdnsd restart &> /dev/null pdnsd-ctl empty-cache &> /dev/null fi if [ -e "/etc/resolv.conf" ]; then _RESOLV_TEST=$(grep "nameserver 127.0.0.1" /etc/resolv.conf 2>&1) if [[ "$_RESOLV_TEST" =~ "nameserver 127.0.0.1" ]]; then _THIS_DNS_TEST=$(host files.aegir.cc 127.0.0.1 -w 3 2>&1) if [[ "${_THIS_DNS_TEST}" =~ "no servers could be reached" ]]; then service pdnsd stop &> /dev/null sleep 1 renice ${_B_NICE} -p $$ &> /dev/null perl /var/xdrago/proc_num_ctrl.cgi fi fi fi } check_pdnsd if [ -e "/var/log/php" ]; then if [ `tail --lines=500 /var/log/php/php*-fpm-error.log \ | grep --count "already listen on"` -gt "0" ]; then touch /var/run/fmp_wait.pid sleep 8 kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/php-logs/${_NOW}/ mv -f /var/log/php/* /var/backups/php-logs/${_NOW}/ rm -f /var/run/*.fpm.socket renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm start fi done sleep 8 rm -f /var/run/fmp_wait.pid echo "$(date 2>&1) FPM instances conflict detected" >> \ /var/xdrago/log/fpm.conflict.incident.log fi if [ `tail --lines=500 /var/log/php/php*-fpm-error.log \ | grep --count "process.max"` -gt "0" ]; then touch /var/run/fmp_wait.pid sleep 8 kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/php-logs/${_NOW}/ mv -f /var/log/php/* /var/backups/php-logs/${_NOW}/ rm -f /var/run/*.fpm.socket renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm start fi done sleep 8 rm -f /var/run/fmp_wait.pid echo "$(date 2>&1) Too many running FPM childs detected" >> \ /var/xdrago/log/fpm.childs.incident.log fi fi _PHPLOG_SIZE_TEST=$(du -s -h /var/log/php 2>&1) if [[ "$_PHPLOG_SIZE_TEST" =~ "G" ]]; then echo $_PHPLOG_SIZE_TEST too big touch /var/run/fmp_wait.pid rm -f /var/log/php/* renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm reload fi done if [ -e "/etc/init.d/php55-fpm" ]; then service php55-fpm stop fi if [ -e "/etc/init.d/php54-fpm" ]; then service php54-fpm stop fi if [ -e "/etc/init.d/php53-fpm" ]; then service php53-fpm stop fi sleep 8 rm -f /var/run/fmp_wait.pid echo "$(date 2>&1) Too big PHP error logs deleted: $_PHPLOG_SIZE_TEST" >> \ /var/xdrago/log/php.giant.logs.incident.log fi almost_oom_kill() { touch /var/run/boa_run.pid echo "$(date 2>&1) Almost OOM $1 detected" >> ${pthOml} sleep 5 echo "$(date 2>&1) Almost OOM incident response started" >> ${pthOml} kill -9 $(ps aux | grep '[w]khtmltopdf' | awk '{print $2}') echo "$(date 2>&1) Almost OOM wkhtmltopdf killed" >> ${pthOml} killall sleep &> /dev/null killall php echo "$(date 2>&1) Almost OOM php-cli killed" >> ${pthOml} echo "$(date 2>&1) Almost OOM incident response completed" >> ${pthOml} echo >> ${pthOml} sleep 5 rm -f /var/run/boa_run.pid exit 0 } oom_restart() { touch /var/run/boa_run.pid echo "$(date 2>&1) OOM $1 detected" >> ${pthOml} sleep 5 echo "$(date 2>&1) OOM incident response started" >> ${pthOml} kill -9 $(ps aux | grep '[w]khtmltopdf' | awk '{print $2}') echo "$(date 2>&1) OOM wkhtmltopdf killed" >> ${pthOml} killall sleep &> /dev/null killall php echo "$(date 2>&1) OOM php-cli killed" >> ${pthOml} mv -f /var/log/nginx/error.log /var/log/nginx/`date +%y%m%d-%H%M`-error.log kill -9 $(ps aux | grep '[n]ginx' | awk '{print $2}') echo "$(date 2>&1) OOM nginx killed" >> ${pthOml} kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') echo "$(date 2>&1) OOM php-fpm killed" >> ${pthOml} kill -9 $(ps aux | grep '[j]ava' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) OOM solr/jetty killed" >> ${pthOml} kill -9 $(ps aux | grep '[n]ewrelic-daemon' | awk '{print $2}') echo "$(date 2>&1) OOM newrelic-daemon killed" >> ${pthOml} kill -9 $(ps aux | grep '[r]edis-server' | awk '{print $2}') echo "$(date 2>&1) OOM redis-server killed" >> ${pthOml} bash /var/xdrago/move_sql.sh wait echo "$(date 2>&1) OOM mysql restarted" >> ${pthOml} echo "$(date 2>&1) OOM incident response completed" >> ${pthOml} echo >> ${pthOml} sleep 5 rm -f /var/run/boa_run.pid exit 0 } if [ -e "/var/log/nginx/error.log" ]; then if [ `tail --lines=500 /var/log/nginx/error.log \ | grep --count "Cannot allocate memory"` -gt "0" ]; then oom_restart "nginx" fi fi _RAM_TOTAL=$(free -mt | grep Mem: | cut -d: -f2 | awk '{ print $1}' 2>&1) _RAM_FREE_TEST=$(free -mt 2>&1) if [[ "${_RAM_FREE_TEST}" =~ "buffers/cache:" ]]; then _RAM_FREE=$(free -mt | grep /+ | cut -d: -f2 | awk '{ print $2}' 2>&1) else _RAM_FREE=$(free -mt | grep Mem: | cut -d: -f2 | awk '{ print $6}' 2>&1) fi _RAM_PCT_FREE=$(echo "scale=0; $(bc -l <<< "${_RAM_FREE} / ${_RAM_TOTAL} * 100")/1" | bc 2>&1) _RAM_PCT_FREE=${_RAM_PCT_FREE//[^0-9]/} echo _RAM_TOTAL is ${_RAM_TOTAL} echo _RAM_PCT_FREE is ${_RAM_PCT_FREE} if [ ! -z "${_RAM_PCT_FREE}" ] && [ "${_RAM_PCT_FREE}" -le "15" ]; then oom_restart "RAM" elif [ "${_RAM_PCT_FREE}" -le "20" ]; then if [ `ps aux | grep -v "grep" | grep --count "wkhtmltopdf"` -gt "2" ]; then almost_oom_kill "RAM" fi fi redis_oom_check() { if [ `tail --lines=500 /var/log/php/error_log_* \ | grep --count "RedisException"` -gt "0" ]; then service redis-server stop &> /dev/null killall -9 redis-server &> /dev/null rm -f /var/lib/redis/* service redis-server start &> /dev/null echo "$(date 2>&1) RedisException OOM detected" echo "$(date 2>&1) RedisException OOM detected" >> /var/xdrago/log/redis.watch.log touch /var/run/fmp_wait.pid sleep 8 _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/php-logs/${_NOW}/ mv -f /var/log/php/* /var/backups/php-logs/${_NOW}/ renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm reload fi done sleep 8 rm -f /var/run/fmp_wait.pid fi } redis_oom_check redis_slow_check() { if [ `tail --lines=500 /var/log/php/fpm-*-slow.log \ | grep --count "PhpRedis.php"` -gt "5" ]; then touch /var/run/fmp_wait.pid sleep 8 service redis-server stop &> /dev/null killall -9 redis-server &> /dev/null rm -f /var/lib/redis/* service redis-server start &> /dev/null _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/php-logs/${_NOW}/ mv -f /var/log/php/* /var/backups/php-logs/${_NOW}/ renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm reload fi done sleep 8 rm -f /var/run/fmp_wait.pid echo "$(date 2>&1) Slow PhpRedis detected" >> \ /var/xdrago/log/redis.slow.incident.log fi } redis_slow_check fpm_sockets_healing() { if [ `tail --lines=500 /var/log/php/php*-fpm-error.log \ | grep --count "Address already in use"` -gt "0" ]; then touch /var/run/fmp_wait.pid sleep 8 _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} mkdir -p /var/backups/php-logs/${_NOW}/ mv -f /var/log/php/* /var/backups/php-logs/${_NOW}/ kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') &> /dev/null renice ${_B_NICE} -p $$ &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm start fi done sleep 8 rm -f /var/run/fmp_wait.pid echo "$(date 2>&1) FPM Sockets conflict detected" >> \ /var/xdrago/log/fpm.sockets.incident.log fi } fpm_sockets_healing jetty_restart() { touch /var/run/boa_wait.pid sleep 5 kill -9 $(ps aux | grep '[j]etty' | awk '{print $2}') &> /dev/null rm -f /var/log/jetty{7,8,9}/* renice ${_B_NICE} -p $$ &> /dev/null if [ -e "/etc/default/jetty9" ] && [ -e "/etc/init.d/jetty9" ]; then service jetty9 start fi if [ -e "/etc/default/jetty8" ] && [ -e "/etc/init.d/jetty8" ]; then service jetty8 start fi if [ -e "/etc/default/jetty7" ] && [ -e "/etc/init.d/jetty7" ]; then service jetty7 start fi sleep 5 rm -f /var/run/boa_wait.pid } if [ -e "/var/log/jetty9" ]; then if [ `tail --lines=500 /var/log/jetty9/*stderrout.log \ | grep --count "Address already in use"` -gt "0" ]; then jetty_restart "zombie" echo "$(date 2>&1) Address already in use for jetty9" >> \ /var/xdrago/log/jetty.zombie.incident.log fi fi if [ -e "/var/log/jetty8" ]; then if [ `tail --lines=500 /var/log/jetty8/*stderrout.log \ | grep --count "Address already in use"` -gt "0" ]; then jetty_restart "zombie" echo "$(date 2>&1) Address already in use for jetty8" >> \ /var/xdrago/log/jetty.zombie.incident.log fi fi if [ -e "/var/log/jetty7" ]; then if [ `tail --lines=500 /var/log/jetty7/*stderrout.log \ | grep --count "Address already in use"` -gt "0" ]; then jetty_restart "zombie" echo "$(date 2>&1) Address already in use for jetty7" >> \ /var/xdrago/log/jetty.zombie.incident.log fi fi if [ `ps aux | grep -v "grep" | grep --count "php-fpm: master process"` -gt "9" ]; then kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) Too many PHP-FPM master processes killed" >> \ /var/xdrago/log/php-fpm-master-count.kill.log fi if [ `ps aux | grep -v "grep" | grep --count "dirmngr"` -gt "5" ]; then kill -9 $(ps aux | grep '[d]irmngr' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) Too many dirmngr processes killed" >> \ /var/xdrago/log/dirmngr-count.kill.log fi if [ `ps aux | grep -v "grep" | grep --count "gpg-agent"` -gt "5" ]; then kill -9 $(ps aux | grep '[g]pg-agent' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) Too many gpg-agent processes killed" >> \ /var/xdrago/log/gpg-agent-count.kill.log fi if [ ! -e "/root/.high_traffic.cnf" ] \ && [ ! -e "/root/.giant_traffic.cnf" ]; then perl /var/xdrago/monitor/check/segfault_alert fi mysql_proc_kill() { xtime=${xtime//[^0-9]/} echo "proc nr to monitor is $each by $xuser runnning for $xtime seconds" if [ ! -z "$xtime" ]; then if [ $xtime -gt $limit ]; then echo "proc to kill is $each by $xuser after $xtime" xkill=$(mysqladmin -u root kill $each 2>&1) times=$(date 2>&1) load=$(cat /proc/loadavg 2>&1) echo "$load" echo "$load" >> /var/xdrago/log/sql_watch.log echo $times $each $xuser $xtime $xkill echo "$times $each $xuser $xtime $xkill" >> /var/xdrago/log/sql_watch.log fi fi } mysql_proc_control() { if [ ! -z "${_SQLMONITOR}" ] && [ "${_SQLMONITOR}" = "YES" ]; then echo "$(date 2>&1)" >> /var/xdrago/log/mysqladmin.monitor.log echo "$(mysqladmin -u root proc -v 2>&1)" >> /var/xdrago/log/mysqladmin.monitor.log if [ ! -z "${_RAM_PCT_FREE}" ] && [ "${_RAM_PCT_FREE}" -lt "20" ]; then sql_restart "RAM" fi fi limit=300 xkill=null for each in `mysqladmin -u root proc \ | awk '{print $2, $4, $8, $12}' \ | awk '{print $1}'`; do each=${each//[^0-9]/} [ ! -z "$each" ] && echo "each is $each" if [ ! -z "$each" ]; then if [ "$each" -gt "5" ] \ && [ ! -z "$each" ]; then xtime=$(mysqladmin -u root proc \ | awk '{print $2, $4, $8, $12}' \ | grep $each \ | awk '{print $4}' 2>&1) xtime=${xtime//[^0-9]/} [ ! -z "$xtime" ] && echo "xtime is $xtime [ea:$each]" xuser=$(mysqladmin -u root proc \ | awk '{print $2, $4, $8, $12}' \ | grep $each \ | awk '{print $2}' 2>&1) xuser=${xuser//[^0-9a-z_]/} [ ! -z "$xuser" ] && echo "xuser is $xuser [xt:$xtime] [ea:$each]" if [ ! -z "$xtime" ]; then if [ -e "/root/.sql.blacklist.cnf" ]; then # cat /root/.sql.blacklist.cnf # xsqlfoo # db name/user causing problems, w/o # in front # xsqlbar # db name/user causing problems, w/o # in front # xsqlnew # db name/user causing problems, w/o # in front for _XQ in `cat /root/.sql.blacklist.cnf \ | cut -d '#' -f1 \ | sort \ | uniq`; do echo "ABUSE is ${_XQ}" echo "xuser is ${xuser}" if [ "$xuser" = "${_XQ}" ]; then echo "checking via mysql_proc_kill ${_XQ} [xt:$xtime] [ea:$each] to avoid issues" limit=10 mysql_proc_kill fi done else limit=3600 mysql_proc_kill fi fi fi fi; done } lsyncd_proc_control() { if [ -e "/var/log/lsyncd.log" ]; then if [ `tail --lines=100 /var/log/lsyncd.log \ | grep --count "Error: Terminating"` -gt "0" ]; then echo "$(date 2>&1) TRM lsyncd" >> /var/xdrago/log/lsyncd.monitor.log fi if [ `tail --lines=100 /var/log/lsyncd.log \ | grep --count "ERROR: Auto-resolving failed"` -gt "5" ]; then echo "$(date 2>&1) ERR lsyncd" >> /var/xdrago/log/lsyncd.monitor.log fi if [ `tail --lines=5000 /var/log/lsyncd.log \ | grep --count "Normal: Finished events list = 0"` -lt "1" ]; then echo "$(date 2>&1) NRM lsyncd" >> /var/xdrago/log/lsyncd.monitor.log fi fi if [ -e "/var/xdrago/log/lsyncd.monitor.log" ]; then if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf fi if [ `tail --lines=10 /var/xdrago/log/lsyncd.monitor.log \ | grep --count "TRM lsyncd"` -gt "3" ] && [ -n "${_MY_EMAIL}" ]; then mail -s "ALERT! lsyncd TRM failure on $(uname -n 2>&1)" ${_MY_EMAIL} < \ /var/xdrago/log/lsyncd.monitor.log _ARCHIVE_LOG=YES fi if [ `tail --lines=10 /var/xdrago/log/lsyncd.monitor.log \ | grep --count "ERR lsyncd"` -gt "3" ] && [ -n "${_MY_EMAIL}" ]; then mail -s "ALERT! lsyncd ERR failure on $(uname -n 2>&1)" ${_MY_EMAIL} < \ /var/xdrago/log/lsyncd.monitor.log _ARCHIVE_LOG=YES fi if [ `tail --lines=10 /var/xdrago/log/lsyncd.monitor.log \ | grep --count "NRM lsyncd"` -gt "3" ] && [ -n "${_MY_EMAIL}" ]; then mail -s "NOTICE: lsyncd NRM problem on $(uname -n 2>&1)" ${_MY_EMAIL} < \ /var/xdrago/log/lsyncd.monitor.log _ARCHIVE_LOG=YES fi if [ "$_ARCHIVE_LOG" = "YES" ]; then cat /var/xdrago/log/lsyncd.monitor.log >> \ /var/xdrago/log/lsyncd.warn.archive.log rm -f /var/xdrago/log/lsyncd.monitor.log fi fi } if [ -e "/var/run/boa_sql_backup.pid" ] \ || [ -e "/var/run/boa_sql_cluster_backup.pid" ] \ || [ -e "/var/run/boa_run.pid" ] \ || [ -e "/var/run/mysql_restart_running.pid" ]; then _SQL_CTRL=NO else _SQL_CTRL=YES fi if_redis_restart() { PrTestPower=$(grep "POWER" /root/.*.octopus.cnf 2>&1) PrTestPhantom=$(grep "PHANTOM" /root/.*.octopus.cnf 2>&1) PrTestCluster=$(grep "CLUSTER" /root/.*.octopus.cnf 2>&1) ReTest=$(ls /data/disk/*/static/control/run-redis-restart.pid | wc -l 2>&1) if [[ "${PrTestPower}" =~ "POWER" ]] \ || [[ "${PrTestPhantom}" =~ "PHANTOM" ]] \ || [[ "${PrTestCluster}" =~ "CLUSTER" ]] \ || [ -e "/root/.allow.redis.restart.cnf" ]; then if [ "${ReTest}" -ge "1" ]; then service redis-server restart wait rm -f /data/disk/*/static/control/run-redis-restart.pid echo "$(date 2>&1) Redis Server restart forced" >> \ /var/xdrago/log/redis-server-restart.event.log fi fi } if_redis_restart if [ -e "/root/.mysqladmin.monitor.cnf" ]; then _SQLMONITOR=YES fi lsyncd_proc_control [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control perl /var/xdrago/monitor/check/scan_nginx &> /dev/null sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control perl /var/xdrago/monitor/check/scan_nginx &> /dev/null sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control perl /var/xdrago/monitor/check/scan_nginx &> /dev/null sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control perl /var/xdrago/monitor/check/scan_nginx &> /dev/null sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control perl /var/xdrago/monitor/check/scan_nginx &> /dev/null sleep 5 [ "${_SQL_CTRL}" = "YES" ] && mysql_proc_control sleep 5 perl /var/xdrago/monitor/check/escapecheck &> /dev/null perl /var/xdrago/monitor/check/hackcheck &> /dev/null perl /var/xdrago/monitor/check/hackftp &> /dev/null perl /var/xdrago/monitor/check/scan_nginx &> /dev/null if [ ! -e "/root/.high_traffic.cnf" ] \ && [ ! -e "/root/.giant_traffic.cnf" ]; then perl /var/xdrago/monitor/check/locked &> /dev/null fi perl /var/xdrago/monitor/check/sqlcheck &> /dev/null echo DONE! exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/move_sql.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi create_locks() { echo "Creating locks..." touch /var/run/boa_wait.pid touch /var/run/fmp_wait.pid touch /var/run/mysql_restart_running.pid } remove_locks() { echo "Removing locks..." rm -f /var/run/boa_wait.pid rm -f /var/run/fmp_wait.pid rm -f /var/run/mysql_restart_running.pid } check_running() { if [ -e "/var/run/mysql_restart_running.pid" ]; then echo "MySQLD restart procedure in progress?" echo "Nothing to do, let's quit now. Bye!" exit 1 fi } start_sql() { check_running create_locks _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) if [ ! -z "${_IS_MYSQLD_RUNNING}" ]; then echo "MySQLD already running?" echo "Nothing to do. Bye!" remove_locks [ "$1" != "chain" ] && exit 1 fi echo "Starting MySQLD again..." renice ${_B_NICE} -p $$ &> /dev/null service mysql start &> /dev/null until [ ! -z "${_IS_MYSQLD_RUNNING}" ] \ && [ -e "/var/run/mysqld/mysqld.sock" ]; do _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) echo "Waiting for MySQLD graceful start..." sleep 3 done echo "MySQLD started" remove_locks echo "MySQLD start procedure completed" [ "$1" != "chain" ] && exit 0 } stop_sql() { check_running create_locks echo "Stopping Nginx now..." service nginx stop &> /dev/null until [ -z "${_IS_NGINX_RUNNING}" ]; do _IS_NGINX_RUNNING=$(ps aux | grep '[n]ginx' | awk '{print $2}' 2>&1) echo "Waiting for Nginx graceful shutdown..." sleep 3 done echo "Nginx stopped" echo "Stopping all PHP-FPM instances now..." _PHP_V="82 81 80 74 73 72 71 70 56 55 54 53" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm force-quit &> /dev/null fi done # kill -9 $(ps aux | grep '[p]hp-fpm' | awk '{print $2}') until [ -z "${_IS_FPM_RUNNING}" ]; do _IS_FPM_RUNNING=$(ps aux | grep '[p]hp-fpm' | awk '{print $2}' 2>&1) echo "Waiting for PHP-FPM graceful shutdown..." sleep 3 done echo "PHP-FPM stopped" _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) if [ ! -z "${_IS_MYSQLD_RUNNING}" ]; then if [ "${_DB_SERIES}" = "10.4" ] \ || [ "${_DB_SERIES}" = "10.3" ] \ || [ "${_DB_SERIES}" = "10.2" ] \ || [ "${_DB_SERIES}" = "5.7" ]; then echo "Preparing MySQLD for quick shutdown..." _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) mysql -u root -e "SET GLOBAL innodb_max_dirty_pages_pct = 0;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_change_buffering = 'none';" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity = 2000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity_max = 4000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_pct = 100;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_now = ON;" &> /dev/null fi echo "Stopping MySQLD now..." service mysql stop &> /dev/null else echo "MySQLD already stopped?" echo "Nothing to do. Bye!" remove_locks [ "$1" != "chain" ] && exit 1 fi until [ -z "${_IS_MYSQLD_RUNNING}" ]; do _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) echo "Waiting for MySQLD graceful shutdown..." sleep 3 done echo "MySQLD stopped" remove_locks echo "MySQLD stop procedure completed" [ "$1" != "chain" ] && exit 0 } restart_sql() { stop_sql "chain" start_sql "chain" remove_locks exit 0 } case "$1" in restart) restart_sql ;; start) start_sql "only" ;; stop) stop_sql "only" ;; *) restart_sql ;; esac ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/mysql_backup.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_heavy_tasks_maint.cnf" ]; then exit 0 fi echo "INFO: Starting silent usage report on `date`" bash /var/xdrago/usage.sh silent wait echo "INFO: Completing silent usage report on `date`" _VM_TEST=$(uname -a 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi if [ "${_VMFAMILY}" = "VS" ]; then n=$((RANDOM%600+8)) echo "INFO: Waiting $n seconds 1/2 on `date` before running backup..." sleep $n n=$((RANDOM%300+8)) echo "INFO: Waiting $n seconds 2/2 on `date` before running backup..." sleep $n fi echo "INFO: Starting dbs backup on `date`" if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi _BACKUPDIR=/data/disk/arch/sql _CHECK_HOST=$(uname -n 2>&1) _DATE=$(date +%y%m%d-%H%M%S 2>&1) _DOW=$(date +%u 2>&1) _DOW=${_DOW//[^1-7]/} _DOM=$(date +%e 2>&1) _DOM=${_DOM//[^0-9]/} _SAVELOCATION=${_BACKUPDIR}/${_CHECK_HOST}-${_DATE} if [ -e "/root/.my.optimize.cnf" ]; then _OPTIM=YES else _OPTIM=NO fi touch /var/run/boa_sql_backup.pid _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) create_locks() { echo "INFO: Creating locks for $1" touch /var/run/mysql_backup_running.pid } remove_locks() { echo "INFO: Removing locks for $1" rm -f /var/run/mysql_backup_running.pid } check_running() { until [ ! -z "${_IS_MYSQLD_RUNNING}" ] \ && [ -e "/var/run/mysqld/mysqld.sock" ]; do _IS_MYSQLD_RUNNING=$(ps aux | grep '[m]ysqld' | awk '{print $2}' 2>&1) if [ "${_DEBUG_MODE}" = "YES" ]; then echo "INFO: Waiting for MySQLD availability..." fi sleep 3 done } truncate_cache_tables() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^cache | uniq | sort 2>&1) for C in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${C}; EOFMYSQL done } truncate_watchdog_tables() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^watchdog$ 2>&1) for W in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${W}; EOFMYSQL done } truncate_accesslog_tables() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^accesslog$ 2>&1) for A in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${A}; EOFMYSQL done } truncate_batch_tables() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^batch$ 2>&1) for B in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${B}; EOFMYSQL done } truncate_queue_tables() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^queue$ 2>&1) for Q in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${Q}; EOFMYSQL done } truncate_views_data_export() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^views_data_export_index_ 2>&1) for V in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL DROP TABLE ${V}; EOFMYSQL done mysql ${_DB}<<EOFMYSQL TRUNCATE views_data_export_object_cache; EOFMYSQL } repair_this_database() { check_running mysqlcheck -u root --auto-repair --silent ${_DB} } optimize_this_database() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | uniq | sort 2>&1) for T in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL OPTIMIZE TABLE ${T}; EOFMYSQL done } convert_to_innodb() { check_running _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | uniq | sort 2>&1) for T in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL ALTER TABLE ${T} ENGINE=INNODB; EOFMYSQL done } backup_this_database_with_mydumper() { n=$((RANDOM%15+5)) echo waiting ${n} sec sleep ${n} check_running if [ ! -d "${_SAVELOCATION}/${_DB}" ]; then mkdir -p ${_SAVELOCATION}/${_DB} fi mydumper \ --database=${_DB} \ --host=localhost \ --user=root \ --password=${_SQL_PSWD} \ --port=3306 \ --outputdir=${_SAVELOCATION}/${_DB}/ \ --rows=50000 \ --build-empty-files \ --threads=4 \ --less-locking \ --long-query-guard=900 \ --verbose=1 } backup_this_database_with_mysqldump() { n=$((RANDOM%15+5)) echo waiting ${n} sec sleep ${n} check_running mysqldump \ --single-transaction \ --quick \ --no-autocommit \ --skip-add-locks \ --no-tablespaces \ --hex-blob ${_DB} \ > ${_SAVELOCATION}/${_DB}.sql } compress_backup() { if [ "${_MYQUICK_STATUS}" = "OK" ]; then for DbPath in `find ${_SAVELOCATION}/ -maxdepth 1 -mindepth 1 | sort`; do if [ -e "${DbPath}/metadata" ]; then DbName=$(echo ${DbPath} | cut -d'/' -f7 | awk '{ print $1}' 2>&1) cd ${_SAVELOCATION} tar cvfj ${DbName}-${_DATE}.tar.bz2 ${DbName} &> /dev/null rm -f -r ${DbName} fi done chmod 600 ${_SAVELOCATION}/* chmod 700 ${_SAVELOCATION} chmod 700 /data/disk/arch echo "INFO: Permissions fixed" else bzip2 ${_SAVELOCATION}/*.sql chmod 600 ${_BACKUPDIR}/*/* chmod 700 ${_BACKUPDIR}/* chmod 700 ${_BACKUPDIR} chmod 700 /data/disk/arch echo "INFO: Permissions fixed" fi } [ ! -a ${_SAVELOCATION} ] && mkdir -p ${_SAVELOCATION}; if [ "${_DB_SERIES}" = "10.4" ] \ || [ "${_DB_SERIES}" = "10.3" ] \ || [ "${_DB_SERIES}" = "10.2" ] \ || [ "${_DB_SERIES}" = "5.7" ]; then check_running mysql -u root -e "SET GLOBAL innodb_max_dirty_pages_pct = 0;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_change_buffering = 'none';" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity = 2000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity_max = 4000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_pct = 100;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_now = ON;" &> /dev/null fi _MYQUICK_STATUS= if [ -x "/usr/local/bin/mydumper" ]; then _DB_V=$(mysql -V 2>&1 \ | tr -d "\n" \ | cut -d" " -f6 \ | awk '{ print $1}' \ | cut -d"-" -f1 \ | awk '{ print $1}' \ | sed "s/[\,']//g" 2>&1) _MD_V=$(mydumper --version 2>&1 \ | tr -d "\n" \ | cut -d" " -f6 \ | awk '{ print $1}' \ | cut -d"-" -f1 \ | awk '{ print $1}' \ | sed "s/[\,']//g" 2>&1) if [ "${_DB_V}" = "${_MD_V}" ] \ && [ ! -e "/root/.mysql.force.legacy.backup.cnf" ]; then _MYQUICK_STATUS=OK echo "INFO: Installed MyQuick for ${_MD_V} (${_DB_V}) looks fine" fi fi for _DB in `mysql -e "show databases" -s | uniq | sort`; do if [ "${_DB}" != "Database" ] \ && [ "${_DB}" != "information_schema" ] \ && [ "${_DB}" != "performance_schema" ]; then check_running create_locks ${_DB} if [ "${_DB}" != "mysql" ]; then if [ -e "/var/lib/mysql/${_DB}/queue.ibd" ]; then _IS_GB=$(du -s -h /var/lib/mysql/${_DB}/queue.ibd | grep "G" 2>&1) if [[ "${_IS_GB}" =~ "queue" ]]; then truncate_queue_tables &> /dev/null echo "INFO: Truncated giant queue in ${_DB}" fi fi if [ -e "/var/lib/mysql/${_DB}/batch.ibd" ]; then _IS_GB=$(du -s -h /var/lib/mysql/${_DB}/batch.ibd | grep "G" 2>&1) if [[ "${_IS_GB}" =~ "batch" ]]; then truncate_batch_tables &> /dev/null echo "INFO: Truncated giant batch in ${_DB}" fi fi if [ -e "/var/lib/mysql/${_DB}/watchdog.ibd" ]; then _IS_GB=$(du -s -h /var/lib/mysql/${_DB}/watchdog.ibd | grep "G" 2>&1) if [[ "${_IS_GB}" =~ "watchdog" ]]; then truncate_watchdog_tables &> /dev/null echo "INFO: Truncated giant watchdog in ${_DB}" fi fi if [ -e "/var/lib/mysql/${_DB}/accesslog.ibd" ]; then _IS_GB=$(du -s -h /var/lib/mysql/${_DB}/accesslog.ibd | grep "G" 2>&1) if [[ "${_IS_GB}" =~ "accesslog" ]]; then truncate_accesslog_tables &> /dev/null echo "INFO: Truncated giant accesslog in ${_DB}" fi fi truncate_views_data_export &> /dev/null echo "INFO: Truncated not used views_data_export in ${_DB}" _CACHE_CLEANUP=NONE if [ "${_DOW}" = "6" ] && [ -e "/root/.my.batch_innodb.cnf" ]; then repair_this_database &> /dev/null echo "INFO: Repair task for ${_DB} completed" truncate_cache_tables &> /dev/null echo "INFO: All cache tables in ${_DB} truncated" convert_to_innodb &> /dev/null echo "INFO: InnoDB conversion task for ${_DB} completed" _CACHE_CLEANUP=DONE fi if [ "${_OPTIM}" = "YES" ] \ && [ "${_DOW}" = "7" ] \ && [ "${_DOM}" -ge "24" ] \ && [ "${_DOM}" -lt "31" ]; then repair_this_database &> /dev/null echo "INFO: Repair task for ${_DB} completed" truncate_cache_tables &> /dev/null echo "INFO: All cache tables in ${_DB} truncated" optimize_this_database &> /dev/null echo "INFO: Optimize task for ${_DB} completed" _CACHE_CLEANUP=DONE fi if [ "${_CACHE_CLEANUP}" != "DONE" ]; then truncate_cache_tables &> /dev/null echo "INFO: All cache tables in ${_DB} truncated" fi fi if [ "${_MYQUICK_STATUS}" = "OK" ]; then backup_this_database_with_mydumper &> /dev/null else backup_this_database_with_mysqldump &> /dev/null fi remove_locks ${_DB} echo "INFO: Backup completed for ${_DB}" echo fi done echo "INFO: Running all dbs usage report on `date`" du -s /var/lib/mysql/* > /root/.du.local.sql echo "INFO: Completing all dbs usage report on `date`" if [ "${_OPTIM}" = "YES" ] \ && [ "${_DOW}" = "7" ] \ && [ "${_DOM}" -ge "24" ] \ && [ "${_DOM}" -lt "31" ] \ && [ -e "/root/.my.restart_after_optimize.cnf" ] \ && [ ! -e "/var/run/boa_run.pid" ]; then if [ "${_DB_SERIES}" = "10.4" ] \ || [ "${_DB_SERIES}" = "10.3" ] \ || [ "${_DB_SERIES}" = "10.2" ] \ || [ "${_DB_SERIES}" = "5.7" ]; then check_running mysql -u root -e "SET GLOBAL innodb_max_dirty_pages_pct = 0;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_change_buffering = 'none';" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity = 2000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_io_capacity_max = 4000;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_pct = 100;" &> /dev/null mysql -u root -e "SET GLOBAL innodb_buffer_pool_dump_now = ON;" &> /dev/null fi echo "INFO: Running db server restart on `date`" bash /var/xdrago/move_sql.sh wait echo "INFO: Completing db server restart on `date`" fi echo "INFO: Completing all dbs backups on `date`" rm -f /var/run/boa_sql_backup.pid touch /var/xdrago/log/last-run-backup if [ "${_VMFAMILY}" = "VS" ]; then n=$((RANDOM%300+8)) echo "INFO: Waiting $n seconds on `date` before running compress..." sleep $n fi echo "INFO: Starting dbs backup compress on `date`" compress_backup &> /dev/null echo "INFO: Completing dbs backup compress on `date`" echo "INFO: Starting dbs backup cleanup on `date`" _DB_BACKUPS_TTL=${_DB_BACKUPS_TTL//[^0-9]/} if [ -z "${_DB_BACKUPS_TTL}" ]; then _DB_BACKUPS_TTL="7" fi find ${_BACKUPDIR} -mtime +${_DB_BACKUPS_TTL} -type d -exec rm -rf {} \; echo "INFO: Backups older than ${_DB_BACKUPS_TTL} days deleted" echo "INFO: Starting verbose usage report on `date`" bash /var/xdrago/usage.sh verbose wait echo "INFO: Completing verbose usage report on `date`" echo "INFO: ALL TASKS COMPLETED, BYE!" exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/mysql_cluster_backup.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root [ -e "/root/.proxy.cnf" ] && exit 0 [ ! -e "/root/.my.cluster_write_node.txt" ] && exit 0 [ ! -e "/root/.my.cluster_root_pwd.txt" ] && exit 0 if [ -e "/root/.my.cluster_root_pwd.txt" ]; then _SQL_PSWD=$(cat /root/.my.cluster_root_pwd.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) fi if [ -e "/root/.my.cluster_backup_proxysql.txt" ]; then _SQL_PORT="6033" _SQL_HOST="127.0.0.1" else _SQL_PORT="3306" if [ -e "/root/.my.cluster_write_node.txt" ]; then _SQL_HOST=$(cat /root/.my.cluster_write_node.txt 2>&1) _SQL_HOST=$(echo -n ${_SQL_HOST} | tr -d "\n" 2>&1) fi [ -z ${_SQL_HOST} ] && _SQL_HOST="127.0.0.1" && _SQL_PORT="3306" fi _C_SQL="mysql --user=root --password=${_SQL_PSWD} --host=${_SQL_HOST} --port=${_SQL_PORT} --protocol=tcp" echo "SQL --host=${_SQL_HOST} --port=${_SQL_PORT}" n=$((RANDOM%600+8)) echo "Waiting $n seconds on `date` before running backup..." sleep $n echo "Starting backup on `date`" if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi _BACKUPDIR=/data/disk/arch/cluster _CHECK_HOST=$(uname -n 2>&1) _DATE=$(date +%y%m%d-%H%M%S 2>&1) _DOW=$(date +%u 2>&1) _DOW=${_DOW//[^1-7]/} _DOM=$(date +%e 2>&1) _DOM=${_DOM//[^0-9]/} _SAVELOCATION=${_BACKUPDIR}/${_CHECK_HOST}-${_DATE} if [ -e "/root/.my.optimize.cnf" ]; then _OPTIM=YES else _OPTIM=NO fi _VM_TEST=$(uname -a 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi touch /var/run/boa_sql_cluster_backup.pid create_locks() { echo "Creating locks for $1" #touch /var/run/boa_wait.pid touch /var/run/mysql_cluster_backup_running.pid } remove_locks() { echo "Removing locks for $1" #rm -f /var/run/boa_wait.pid rm -f /var/run/mysql_cluster_backup_running.pid } check_running() { until [ ! -z "${_IS_PROXYSQL_RUNNING}" ] \ && [ -e "/var/lib/proxysql/proxysql.pid" ]; do _IS_PROXYSQL_RUNNING=$(ps aux | grep '[p]roxysql' | awk '{print $2}' 2>&1) echo "Waiting for ProxySQL availability..." sleep 3 done } truncate_cache_tables() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | grep ^cache | uniq | sort 2>&1) for C in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL TRUNCATE ${C}; EOFMYSQL sleep 1 done } truncate_watchdog_tables() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | grep ^watchdog$ 2>&1) for A in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL TRUNCATE ${A}; EOFMYSQL sleep 1 done } truncate_accesslog_tables() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | grep ^accesslog$ 2>&1) for A in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL TRUNCATE ${A}; EOFMYSQL sleep 1 done } truncate_queue_tables() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | grep ^queue$ 2>&1) for Q in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL TRUNCATE ${Q}; EOFMYSQL sleep 1 done } repair_this_database() { check_running mysqlcheck --host=${_SQL_HOST} --port=${_SQL_PORT} --protocol=tcp -u root --auto-repair --silent ${_DB} } optimize_this_database() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | uniq | sort 2>&1) for T in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL OPTIMIZE TABLE ${T}; EOFMYSQL done } convert_to_innodb() { check_running _TABLES=$(${_C_SQL} ${_DB} -e "show tables" -s | uniq | sort 2>&1) for T in ${_TABLES}; do ${_C_SQL} ${_DB}<<EOFMYSQL ALTER TABLE ${T} ENGINE=INNODB; EOFMYSQL done } backup_this_database_with_mydumper() { check_running if [ ! -d "${_SAVELOCATION}/${_DB}" ]; then mkdir -p ${_SAVELOCATION}/${_DB} fi mydumper \ --database=${_DB} \ --host=${_SQL_HOST} \ --user=root \ --password=${_SQL_PSWD} \ --port=${_SQL_PORT} \ --outputdir=${_SAVELOCATION}/${_DB}/ \ --rows=50000 \ --build-empty-files \ --threads=4 \ --less-locking \ --long-query-guard=900 \ --verbose=1 } backup_this_database_with_mysqldump() { check_running mysqldump \ --user=root \ --password=${_SQL_PSWD} \ --host=${_SQL_HOST} \ --port=${_SQL_PORT} \ --protocol=tcp \ --single-transaction \ --quick \ --no-autocommit \ --skip-add-locks \ --no-tablespaces \ --hex-blob ${_DB} \ > ${_SAVELOCATION}/${_DB}.sql } compress_backup() { if [ "${_MYQUICK_STATUS}" = "OK" ]; then for DbPath in `find ${_SAVELOCATION}/ -maxdepth 1 -mindepth 1 | sort`; do if [ -e "${DbPath}/metadata" ]; then DbName=$(echo ${DbPath} | cut -d'/' -f7 | awk '{ print $1}' 2>&1) cd ${_SAVELOCATION} tar cvfj ${DbName}-${_DATE}.tar.bz2 ${DbName} &> /dev/null rm -f -r ${DbName} fi done chmod 600 ${_SAVELOCATION}/* chmod 700 ${_SAVELOCATION} chmod 700 /data/disk/arch echo "Permissions fixed" else bzip2 ${_SAVELOCATION}/*.sql chmod 600 ${_BACKUPDIR}/*/* chmod 700 ${_BACKUPDIR}/* chmod 700 ${_BACKUPDIR} chmod 700 /data/disk/arch echo "Permissions fixed" fi } [ ! -a ${_SAVELOCATION} ] && mkdir -p ${_SAVELOCATION}; if [ "${_DB_SERIES}" = "10.4" ] \ || [ "${_DB_SERIES}" = "10.3" ] \ || [ "${_DB_SERIES}" = "10.2" ] \ || [ "${_DB_SERIES}" = "5.7" ]; then check_running ${_C_SQL} -e "SET GLOBAL innodb_max_dirty_pages_pct = 0;" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_change_buffering = 'none';" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 1;" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_io_capacity = 2000;" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_io_capacity_max = 4000;" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_buffer_pool_dump_pct = 100;" &> /dev/null ${_C_SQL} -e "SET GLOBAL innodb_buffer_pool_dump_now = ON;" &> /dev/null fi _MYQUICK_STATUS= if [ -x "/usr/local/bin/mydumper" ]; then _DB_V=$(mysql -V 2>&1 \ | tr -d "\n" \ | cut -d" " -f6 \ | awk '{ print $1}' \ | cut -d"-" -f1 \ | awk '{ print $1}' \ | sed "s/[\,']//g" 2>&1) _MD_V=$(mydumper --version 2>&1 \ | tr -d "\n" \ | cut -d" " -f6 \ | awk '{ print $1}' \ | cut -d"-" -f1 \ | awk '{ print $1}' \ | sed "s/[\,']//g" 2>&1) if [ "${_DB_V}" = "${_MD_V}" ] \ && [ ! -e "/root/.mysql.force.legacy.backup.cnf" ]; then _MYQUICK_STATUS=OK echo "INFO: Installed MyQuick for ${_MD_V} (${_DB_V}) looks fine" fi fi for _DB in `${_C_SQL} -e "show databases" -s | uniq | sort`; do if [ "${_DB}" != "Database" ] \ && [ "${_DB}" != "information_schema" ] \ && [ "${_DB}" != "performance_schema" ]; then check_running create_locks ${_DB} if [ "${_DB}" != "mysql" ]; then _IS_GB=$(${_C_SQL} --skip-column-names --silent -e "SELECT table_name 'Table Name', round(((data_length + index_length)/1024/1024),0) 'Table Size (MB)' FROM information_schema.TABLES WHERE table_schema = '${_DB}' AND table_name ='watchdog';" | cut -d'/' -f1 | awk '{ print $2}' | sed "s/[\/\s+]//g" | bc 2>&1) _IS_GB=${_IS_GB//[^0-9]/} _SQL_MAX_LIMIT="1024" if [ ! -z "${_IS_GB}" ]; then if [ "${_IS_GB}" -gt "${_SQL_MAX_LIMIT}" ]; then truncate_watchdog_tables &> /dev/null echo "Truncated giant ${_IS_GB} watchdog in ${_DB}" fi fi # truncate_accesslog_tables &> /dev/null # echo "Truncated not used accesslog in ${_DB}" # truncate_queue_tables &> /dev/null # echo "Truncated queue table in ${_DB}" _CACHE_CLEANUP=NONE # if [ "${_DOW}" = "6" ] && [ -e "/root/.my.batch_innodb.cnf" ]; then # repair_this_database &> /dev/null # echo "Repair task for ${_DB} completed" # truncate_cache_tables &> /dev/null # echo "All cache tables in ${_DB} truncated" # convert_to_innodb &> /dev/null # echo "InnoDB conversion task for ${_DB} completed" # _CACHE_CLEANUP=DONE # fi # if [ "${_OPTIM}" = "YES" ] \ # && [ "${_DOW}" = "7" ] \ # && [ "${_DOM}" -ge "24" ] \ # && [ "${_DOM}" -lt "31" ]; then # repair_this_database &> /dev/null # echo "Repair task for ${_DB} completed" # truncate_cache_tables &> /dev/null # echo "All cache tables in ${_DB} truncated" # optimize_this_database &> /dev/null # echo "Optimize task for ${_DB} completed" # _CACHE_CLEANUP=DONE # fi if [ "${_CACHE_CLEANUP}" != "DONE" ]; then truncate_cache_tables &> /dev/null echo "All cache tables in ${_DB} truncated" fi fi if [ "${_MYQUICK_STATUS}" = "OK" ]; then backup_this_database_with_mydumper &> /dev/null else backup_this_database_with_mysqldump &> /dev/null fi remove_locks ${_DB} echo "Backup completed for ${_DB}" echo " " fi done echo "MAIN TASKS COMPLETED" rm -f /var/run/boa_sql_cluster_backup.pid echo "CLEANUP" _DB_BACKUPS_TTL=${_DB_BACKUPS_TTL//[^0-9]/} if [ -z "${_DB_BACKUPS_TTL}" ]; then _DB_BACKUPS_TTL="30" fi find ${_BACKUPDIR} -mtime +${_DB_BACKUPS_TTL} -type d -exec rm -rf {} \; echo "Backups older than ${_DB_BACKUPS_TTL} days deleted" echo "Backups older than ${_DB_BACKUPS_TTL} days deleted" n=$((RANDOM%300+8)) echo "Waiting $n seconds on `date` before running compress..." sleep $n echo "Starting compress on `date`" echo "COMPRESS" compress_backup &> /dev/null touch /var/xdrago/log/last-run-cluster-backup echo "ALL TASKS COMPLETED" exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/mysql_hourly.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root rm -f /root/.pause_tasks_maint.cnf rm -f /root/.restrict_this_vm.cnf if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_heavy_tasks_maint.cnf" ]; then exit 0 fi _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) os_detection_minimal() { _THIS_RV=$(lsb_release -sc 2>&1) if [ "${_THIS_RV}" = "chimaera" ] \ || [ "${_THIS_RV}" = "beowulf" ] \ || [ "${_THIS_RV}" = "bullseye" ] \ || [ "${_THIS_RV}" = "buster" ]; then _APT_UPDATE="apt-get update --allow-releaseinfo-change" else _APT_UPDATE="apt-get update" fi } os_detection_minimal apt_clean_update() { apt-get clean -qq 2> /dev/null rm -rf /var/lib/apt/lists/* &> /dev/null ${_APT_UPDATE} -qq 2> /dev/null } if [ -x "/usr/bin/gpg2" ]; then _GPG=gpg2 else _GPG=gpg fi find_fast_mirror() { isNetc=$(which netcat 2>&1) if [ ! -x "${isNetc}" ] || [ -z "${isNetc}" ]; then if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi apt_clean_update apt-get install netcat ${aptYesUnth} &> /dev/null sleep 3 fi ffMirr=$(which ffmirror 2>&1) if [ -x "${ffMirr}" ]; then ffList="/var/backups/boa-mirrors-2023-01.txt" mkdir -p /var/backups if [ ! -e "${ffList}" ]; then echo "de.files.aegir.cc" > ${ffList} echo "ny.files.aegir.cc" >> ${ffList} echo "sg.files.aegir.cc" >> ${ffList} fi if [ -e "${ffList}" ]; then _CHECK_MIRROR=$(bash ${ffMirr} < ${ffList} 2>&1) _USE_MIR="${_CHECK_MIRROR}" [[ "${_USE_MIR}" =~ "printf" ]] && _USE_MIR="files.aegir.cc" else _USE_MIR="files.aegir.cc" fi else _USE_MIR="files.aegir.cc" fi urlDev="http://${_USE_MIR}/dev" urlHmr="http://${_USE_MIR}/versions/master/aegir" } find_fast_mirror truncate_watchdog_tables() { _TABLES=$(mysql ${_DB} -u root -e "show tables" -s | grep ^watchdog$ 2>&1) for A in ${_TABLES}; do mysql ${_DB}<<EOFMYSQL TRUNCATE ${A}; EOFMYSQL sleep 1 done } if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi _BACKUPDIR=/data/disk/arch/hourly _CHECK_HOST=$(uname -n 2>&1) _DATE=$(date +%y%m%d-%H%M%S 2>&1) _DOW=$(date +%u 2>&1) _DOW=${_DOW//[^1-7]/} _DOM=$(date +%e 2>&1) _DOM=${_DOM//[^0-9]/} _SAVELOCATION=${_BACKUPDIR}/${_CHECK_HOST}-${_DATE} _VM_TEST=$(uname -a 2>&1) _LOGDIR="/var/xdrago/log/hourly" _THIS_OS=$(lsb_release -si 2>&1) _OSR=$(lsb_release -sc 2>&1) if [[ "${_VM_TEST}" =~ "-beng" ]]; then _VMFAMILY="VS" else _VMFAMILY="XEN" fi if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then PrTestPhantom=$(grep "PHANTOM" /root/.*.octopus.cnf 2>&1) PrTestCluster=$(grep "CLUSTER" /root/.*.octopus.cnf 2>&1) InTest=$(ls /data/disk/*/static/control/cli.info | wc -l 2>&1) if [ "${InTest}" -le "5" ]; then if [[ "${PrTestPhantom}" =~ "PHANTOM" ]] \ || [[ "${PrTestCluster}" =~ "PHANTOM" ]]; then _HOURLY_DB_BACKUPS="YES" fi fi fi if [ -z "${_HOURLY_DB_BACKUPS}" ] \ || [ "${_HOURLY_DB_BACKUPS}" != "YES" ]; then rm -rf /data/disk/arch/hourly/* exit 1 fi if [ ! -e "/usr/bin/innobackupex" ]; then touch /usr/bin/innobackupex fi percList="/etc/apt/sources.list.d/percona-release.list" if [ ! -e "${percList}" ] \ || [ ! -e "/usr/bin/innobackupex" ]; then if [ ! -e "/etc/apt/apt.conf.d/00sandboxoff" ] \ && [ -e "/etc/apt/apt.conf.d" ]; then echo "APT::Sandbox::User \"root\";" > /etc/apt/apt.conf.d/00sandboxoff fi rm -f /etc/apt/sources.list.d/mariadb.* rm -f /etc/apt/sources.list.d/percona-.* rm -f /etc/apt/sources.list.d/xtrabackup.* percList="/etc/apt/sources.list.d/percona-release.list" _DB_SRC="repo.percona.com" percRepo="${_DB_SRC}/percona/apt" if [ -e "/root/.beowulf_to_chimaera_major_os_upgrade.info" ] \ || [ -e "/root/.bullseye_to_chimaera_major_os_upgrade.info" ]; then _REAL_OSR="chimaera" _REAL_OS="Devuan" else _REAL_OSR="${_OSR}" _REAL_OS="${_THIS_OS}" fi if [ "${_REAL_OSR}" = "chimaera" ]; then _SQL_OSR=bullseye elif [ "${_REAL_OSR}" = "beowulf" ]; then _SQL_OSR=buster else _SQL_OSR="${_REAL_OSR}" fi echo "## Percona APT Repository" > ${percList} echo "deb http://${percRepo} ${_SQL_OSR} main" >> ${percList} echo "deb-src http://${percRepo} ${_SQL_OSR} main" >> ${percList} echo -e 'Package: *\nPin: release o=Percona Development Team\nPin-Priority: 1001' > /etc/apt/preferences.d/00percona.pref apt_clean_update if [ -x "/usr/sbin/csf" ] \ && [ -e "/etc/csf/csf.deny" ]; then service lfd stop &> /dev/null wait kill -9 $(ps aux | grep '[C]onfigServer' | awk '{print $2}') &> /dev/null killall sleep &> /dev/null rm -f /etc/csf/csf.error csf -x &> /dev/null wait fi _KEYS_SIG="8507EFA5" _KEYS_SERVER_TEST=FALSE until [[ "${_KEYS_SERVER_TEST}" =~ "Percona" ]]; do if [ "${_DEBUG_MODE}" = "YES" ]; then echo "Retrieving ${_KEYS_SIG} key..." fi cd /var/opt _KEYS_FILE_TEST=FALSE until [[ "${_KEYS_FILE_TEST}" =~ "GnuPG" ]]; do rm -f percona-key.gpg* wget -q -U iCab ${urlDev}/percona-key.gpg _KEYS_FILE_TEST=$(grep GnuPG percona-key.gpg 2>&1) sleep 5 done cat percona-key.gpg | ${_GPG} --import &> /dev/null rm -f percona-key.gpg* ${_GPG} --keyserver pgpkeys.mit.edu --recv-key ${_KEYS_SIG} &> /dev/null ${_GPG} -a --export ${_KEYS_SIG} | apt-key add - &> /dev/null _KEYS_SERVER_TEST=$(${_GPG} --list-keys ${_KEYS_SIG} 2>&1) sleep 2 if [ `ps aux | grep -v "grep" | grep --count "dirmngr"` -gt "5" ]; then kill -9 $(ps aux | grep '[d]irmngr' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) Too many dirmngr processes killed" >> \ /var/xdrago/log/dirmngr-count.kill.log fi if [ `ps aux | grep -v "grep" | grep --count "gpg-agent"` -gt "5" ]; then kill -9 $(ps aux | grep '[g]pg-agent' | awk '{print $2}') &> /dev/null echo "$(date 2>&1) Too many gpg-agent processes killed" >> \ /var/xdrago/log/gpg-agent-count.kill.log fi done apt_clean_update if [ -x "/usr/sbin/csf" ] \ && [ -e "/etc/csf/csf.deny" ]; then csf -e &> /dev/null wait service lfd start &> /dev/null wait ### Linux kernel TCP SACK CVEs mitigation ### CVE-2019-11477 SACK Panic ### CVE-2019-11478 SACK Slowness ### CVE-2019-11479 Excess Resource Consumption Due to Low MSS Values if [ -x "/usr/sbin/csf" ] && [ -e "/etc/csf/csf.deny" ]; then _SACK_TEST=$(ip6tables --list | grep tcpmss 2>&1) if [[ ! "${_SACK_TEST}" =~ "tcpmss" ]]; then sysctl net.ipv4.tcp_mtu_probing=0 &> /dev/null iptables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/sull ip6tables -A INPUT -p tcp -m tcpmss --mss 1:500 -j DROP &> /dev/null fi fi fi apt_clean_update apt-get install percona-xtrabackup-24 -y fi if [ "${_VMFAMILY}" = "VS" ]; then n=$((RANDOM%300+8)) echo "Waiting $n seconds 1/2 on `date` before running backup..." sleep $n n=$((RANDOM%180+8)) echo "Waiting $n seconds 2/2 on `date` before running backup..." sleep $n fi echo "Starting backup on `date`" touch /var/run/boa_live_sql_backup.pid [ ! -a ${_SAVELOCATION} ] && mkdir -p ${_SAVELOCATION}; [ ! -a ${_LOGDIR} ] && mkdir -p ${_LOGDIR}; for _DB in `mysql -e "show databases" -s | uniq | sort`; do if [ "${_DB}" != "Database" ] \ && [ "${_DB}" != "information_schema" ] \ && [ "${_DB}" != "performance_schema" ]; then if [ "${_DB}" != "mysql" ]; then if [ -e "/var/lib/mysql/${_DB}/watchdog.ibd" ]; then _IS_GB=$(du -s -h /var/lib/mysql/${_DB}/watchdog.ibd | grep "G" 2>&1) if [[ "${_IS_GB}" =~ "watchdog" ]]; then truncate_watchdog_tables &> /dev/null echo "Truncated giant watchdog for ${_DB}" fi fi fi fi done innobackupex --user=root --no-timestamp ${_SAVELOCATION} >${_LOGDIR}/XtraBackupA-${_DATE}.log 2>&1 _BACKUP_RESULT=$(tail --lines=3 ${_LOGDIR}/XtraBackupA-${_DATE}.log | tr -d "\n" 2>&1) if [[ "${_BACKUP_RESULT}" =~ "completed OK" ]]; then echo "XtraBackup 1/2 completed OK on `date`" else echo "XtraBackup 1/2 FAILED on `date`" fi sleep 5 innobackupex --apply-log ${_SAVELOCATION} >${_LOGDIR}/XtraBackupB-${_DATE}.log 2>&1 _BACKUP_RESULT=$(tail --lines=3 ${_LOGDIR}/XtraBackupB-${_DATE}.log | tr -d "\n" 2>&1) if [[ "${_BACKUP_RESULT}" =~ "completed OK" ]]; then echo "XtraBackup 2/2 completed OK on `date`" else echo "XtraBackup 2/2 FAILED on `date`" fi sleep 5 find ${_BACKUPDIR} -mtime +1 -type d -exec rm -rf {} \; find ${_BACKUPDIR} -mtime +1 -type f -exec rm -rf {} \; find ${_LOGDIR} -mtime +1 -type f -exec rm -rf {} \; echo "Backups older than 2 days deleted" cd ${_BACKUPDIR}/ tar cvfj ${_CHECK_HOST}-${_DATE}.tar.bz2 ${_CHECK_HOST}-${_DATE} &> /dev/null _BACKUP_LATEST=$(tar -czf - ${_CHECK_HOST}-${_DATE}.tar.bz2 | wc -c 2>&1) echo "XtraBackup compressed size: ${_BACKUP_LATEST}" _BACKUP_ALL=$(du -s -h ${_BACKUPDIR} 2>&1) echo "XtraBackup total size: ${_BACKUP_ALL}" du -s -h ${_BACKUPDIR}/* rm -f ${_BACKUPDIR}/latest ln -s ${_BACKUPDIR}/${_CHECK_HOST}-${_DATE} ${_BACKUPDIR}/latest rm -f ${_BACKUPDIR}/latest.tar.bz2 ln -s ${_BACKUPDIR}/${_CHECK_HOST}-${_DATE}.tar.bz2 ${_BACKUPDIR}/latest.tar.bz2 chmod 700 ${_BACKUPDIR} chmod 700 /data/disk/arch echo "Permissions fixed" rm -rf ${_CHECK_HOST}-${_DATE} rm -f /var/run/boa_live_sql_backup.pid touch /var/xdrago/log/last-run-live-mysql-backup echo "ALL TASKS COMPLETED" exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/mysql_repair.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi touch /var/run/boa_wait.pid sleep 8 dir=/var/xdrago/log/mysql_optimize mkdir -p $dir _SQL_PSWD=$(cat /root/.my.pass.txt 2>&1) _SQL_PSWD=$(echo -n ${_SQL_PSWD} | tr -d "\n" 2>&1) /usr/bin/mysqlcheck -u root -Aa >> $dir/all.a.`date +%y%m%d-%H%M%S` /usr/bin/mysqlcheck -u root -A --auto-repair >> $dir/all.r.`date +%y%m%d-%H%M%S` /usr/bin/mysqlcheck -u root -Ao >> $dir/all.o.`date +%y%m%d-%H%M%S` rm -f /var/run/boa_wait.pid exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/purge_binlogs.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_tasks_maint.cnf" ]; then exit 0 fi count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] \ || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi } load_control() { if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_SPIDER_RATIO=${_CPU_SPIDER_RATIO//[^0-9]/} fi if [ -z "${_CPU_SPIDER_RATIO}" ]; then _CPU_SPIDER_RATIO=6 fi _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) _O_LOAD=$(( _O_LOAD / _CPU_NR )) _O_LOAD_MAX=$(( 99 * _CPU_SPIDER_RATIO )) } action() { count_cpu load_control if [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ]; then echo load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX} /usr/bin/mysql mysql<<EOFMYSQL PURGE MASTER LOGS BEFORE DATE_SUB( NOW( ), INTERVAL 1 HOUR); EOFMYSQL touch /var/xdrago/log/purge_binlogs.done fi } if [ -e "/var/run/boa_wait.pid" ]; then touch /var/xdrago/log/wait-purge.pid exit 0 else action touch /var/xdrago/log/last-run-purge exit 0 fi ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/runner.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} ###-------------SYSTEM-----------------### check_root() { if [ `whoami` = "root" ]; then chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_tasks_maint.cnf" ]; then exit 0 fi count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi } load_control() { if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_SPIDER_RATIO=${_CPU_SPIDER_RATIO//[^0-9]/} fi if [ -z "${_CPU_SPIDER_RATIO}" ]; then _CPU_SPIDER_RATIO=6 fi _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) _O_LOAD=$(( _O_LOAD / _CPU_NR )) _O_LOAD_MAX=$(( 99 * _CPU_SPIDER_RATIO )) } action() { for Runner in `find /var/xdrago -maxdepth 1 -mindepth 1 -type f \ | grep run- \ | uniq \ | sort`; do count_cpu load_control if [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ]; then echo load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX} if [ ! -e "/var/run/boa_wait.pid" ] \ && [ ! -e "/var/run/manage_rvm_users.pid" ]; then echo running ${Runner} bash ${Runner} n=$((RANDOM%9+2)) echo waiting $n sec sleep $n else echo "Another BOA task is running, we have to wait..." fi else echo load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX} fi done } ###-------------SYSTEM-----------------### if [ -e "/var/run/boa_wait.pid" ] \ || [ -e "/var/run/manage_rvm_users.pid" ] \ || [ -e "/var/run/boa_cron_wait.pid" ]; then touch /var/xdrago/log/wait-runner.pid echo "Another BOA task is running, we will try again later..." exit 0 elif [ `ps aux | grep -v "grep" \ | grep --count "n7 bash.*runner"` -gt "8" ]; then touch /var/xdrago/log/wait-runner.pid echo "Too many Aegir tasks running now, we will try again later..." exit 0 else if [ -e "/root/.wbhd.clstr.cnf" ] \ || [ -e "/root/.dbhd.clstr.cnf" ]; then echo "Aegir tasks ignored on this cluster node" exit 0 fi if [ -e "/root/.slow.cron.cnf" ]; then touch /var/run/boa_cron_wait.pid sleep 15 action sleep 15 rm -f /var/run/boa_cron_wait.pid elif [ -e "/root/.fast.cron.cnf" ]; then rm -f /var/run/boa_cron_wait.pid action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action sleep 5 action else action fi exit 0 fi ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/second.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} pthVhstd="/var/aegir/config/server_master/nginx/vhost.d" if [ -e "/root/.proxy.cnf" ]; then exit 0 fi hold() { service nginx stop &> /dev/null killall -9 nginx &> /dev/null sleep 1 killall -9 nginx &> /dev/null _PHP_V="82 81 80 74 73 72 71 70 56 55 54 53" for e in ${_PHP_V}; do if [ -e "/etc/init.d/php${e}-fpm" ]; then service php${e}-fpm force-quit &> /dev/null fi done killall -9 php-fpm php-cgi &> /dev/null echo "$(date 2>&1)" >> /var/xdrago/log/second.hold.log ### echo "load is ${_O_LOAD}:${_F_LOAD} while ### maxload is ${_O_LOAD_MAX}:${_F_LOAD_MAX}" } terminate() { if [ ! -e "/var/run/boa_run.pid" ]; then killall -9 php drush.php wget curl &> /dev/null echo "$(date 2>&1)" >> /var/xdrago/log/second.terminate.log fi } nginx_high_load_on() { mv -f /data/conf/nginx_high_load_off.conf /data/conf/nginx_high_load.conf service nginx reload &> /dev/null } nginx_high_load_off() { mv -f /data/conf/nginx_high_load.conf /data/conf/nginx_high_load_off.conf service nginx reload &> /dev/null } check_vhost_health() { if [ -e "$1"* ]; then echo vhost $1 exists vhostPlaceTest=$(grep "### access" $1* 2>&1) vhostAllowTest=$(grep "allow .*;" $1* 2>&1) vhostDenyTest=$(grep "deny .*;" $1* 2>&1) if [[ "${vhostPlaceTest}" =~ "access" ]] \ && [[ "${vhostDenyTest}" =~ "deny" ]]; then if [[ "${vhostAllowTest}" =~ "allow" ]]; then vhostHealthTest=YES else vhostHealthTest=YES fi else vhostHealthTest=NO sed -i "s/### access .*//g; \ s/allow .*;//g; \ s/deny .*;//g; \ s/ *$//g; /^$/d" $1* &> /dev/null wait sed -i "s/limit_conn .*/limit_conn limreq 555;\n \ ### access none\n deny all;/g" $1* &> /dev/null wait fi else echo vhost $1 does not exist fi } ip_auth_access_update() { touch /var/run/.auth.IP.list.pid if [ -e "/var/backups/.auth.IP.list.tmp" ]; then if [ -e "${pthVhstd}/adminer."* ]; then sed -i "s/### access .*//g; \ s/allow .*;//g; \ s/deny .*;//g; \ s/ *$//g; /^$/d" \ ${pthVhstd}/adminer.* &> /dev/null wait sed -i "s/limit_conn .*/limit_conn limreq 555;\n \ ### access update/g" \ ${pthVhstd}/adminer.* &> /dev/null wait fi if [ -e "${pthVhstd}/chive."* ]; then sed -i "s/### access .*//g; \ s/allow .*;//g; \ s/deny .*;//g; \ s/ *$//g; /^$/d" \ ${pthVhstd}/chive.* &> /dev/null wait sed -i "s/limit_conn .*/limit_conn limreq 555;\n \ ### access update/g" \ ${pthVhstd}/chive.* &> /dev/null wait fi if [ -e "${pthVhstd}/cgp."* ]; then sed -i "s/### access .*//g; \ s/allow .*;//g; \ s/deny .*;//g; \ s/ *$//g; /^$/d" \ ${pthVhstd}/cgp.* &> /dev/null wait sed -i "s/limit_conn .*/limit_conn limreq 555;\n \ ### access update/g" \ ${pthVhstd}/cgp.* &> /dev/null wait fi if [ -e "${pthVhstd}/sqlbuddy."* ]; then sed -i "s/### access .*//g; \ s/allow .*;//g; \ s/deny .*;//g; \ s/ *$//g; /^$/d" \ ${pthVhstd}/sqlbuddy.* &> /dev/null wait sed -i "s/limit_conn .*/limit_conn limreq 555;\n \ ### access update/g" \ ${pthVhstd}/sqlbuddy.* &> /dev/null wait fi sleep 1 sed -i '/ ### access .*/ {r /var/backups/.auth.IP.list.tmp d;};' ${pthVhstd}/adminer.* &> /dev/null wait sed -i '/ ### access .*/ {r /var/backups/.auth.IP.list.tmp d;};' ${pthVhstd}/chive.* &> /dev/null wait sed -i '/ ### access .*/ {r /var/backups/.auth.IP.list.tmp d;};' ${pthVhstd}/cgp.* &> /dev/null wait sed -i '/ ### access .*/ {r /var/backups/.auth.IP.list.tmp d;};' ${pthVhstd}/sqlbuddy.* &> /dev/null wait mv -f ${pthVhstd}/sed* /var/backups/ check_vhost_health "${pthVhstd}/adminer." check_vhost_health "${pthVhstd}/chive." check_vhost_health "${pthVhstd}/cgp." check_vhost_health "${pthVhstd}/sqlbuddy." ngxTest=$(service nginx configtest 2>&1) if [[ "${ngxTest}" =~ "successful" ]]; then service nginx reload &> /dev/null else service nginx reload &> /var/backups/.auth.IP.list.ops sed -i "s/allow .*;//g; s/ *$//g; /^$/d" \ ${pthVhstd}/adminer.* &> /dev/null wait sed -i "s/allow .*;//g; s/ *$//g; /^$/d" \ ${pthVhstd}/chive.* &> /dev/null wait sed -i "s/allow .*;//g; s/ *$//g; /^$/d" \ ${pthVhstd}/cgp.* &> /dev/null wait sed -i "s/allow .*;//g; s/ *$//g; /^$/d" \ ${pthVhstd}/sqlbuddy.* &> /dev/null wait check_vhost_health "${pthVhstd}/adminer." check_vhost_health "${pthVhstd}/chive." check_vhost_health "${pthVhstd}/cgp." check_vhost_health "${pthVhstd}/sqlbuddy." service nginx reload &> /dev/null fi fi rm -f /var/backups/.auth.IP.list for _IP in `who --ips \ | sed 's/.*tty.*//g; s/.*root.*hvc.*//g; s/^[0-9]+$//g' \ | cut -d: -f2 \ | cut -d' ' -f2 \ | sed 's/.*\/.*:S.*//g; s/:S.*//g; s/(//g' \ | tr -d "\s" \ | sort \ | uniq`;do _IP=${_IP//[^0-9.]/}; if [[ "${_IP}" =~ "." ]]; then echo " allow ${_IP};" \ >> /var/backups/.auth.IP.list;fi;done if [ -e "/root/.ip.protected.vhost.whitelist.cnf" ]; then for _IP in `cat /root/.ip.protected.vhost.whitelist.cnf \ | sort \ | uniq`;do _IP=${_IP//[^0-9.]/}; if [[ "${_IP}" =~ "." ]]; then echo " allow ${_IP};" \ >> /var/backups/.auth.IP.list;fi;done fi sed -i "s/\.;/;/g; s/allow ;//g; s/ *$//g; /^$/d" \ /var/backups/.auth.IP.list &> /dev/null wait if [ -e "/var/backups/.auth.IP.list" ]; then allowTestList=$(grep allow /var/backups/.auth.IP.list 2>&1) fi if [[ "${allowTestList}" =~ "allow" ]]; then echo " deny all;" >> /var/backups/.auth.IP.list echo " ### access live" >> /var/backups/.auth.IP.list else echo " deny all;" > /var/backups/.auth.IP.list echo " ### access none" >> /var/backups/.auth.IP.list fi sleep 1 rm -f /var/run/.auth.IP.list.pid } manage_ip_auth_access() { for _IP in `who --ips \ | sed 's/.*tty.*//g; s/.*root.*hvc.*//g; s/^[0-9]+$//g' \ | cut -d: -f2 \ | cut -d' ' -f2 \ | sed 's/.*\/.*:S.*//g; s/:S.*//g; s/(//g' \ | tr -d "\s" \ | sort \ | uniq`;do _IP=${_IP//[^0-9.]/}; if [[ "${_IP}" =~ "." ]]; then echo " allow ${_IP};" \ >> /var/backups/.auth.IP.list.tmp;fi;done if [ -e "/root/.ip.protected.vhost.whitelist.cnf" ]; then for _IP in `cat /root/.ip.protected.vhost.whitelist.cnf \ | sort \ | uniq`;do _IP=${_IP//[^0-9.]/}; if [[ "${_IP}" =~ "." ]]; then echo " allow ${_IP};" \ >> /var/backups/.auth.IP.list.tmp;fi;done fi sed -i "s/\.;/;/g; s/allow ;//g; s/ *$//g; /^$/d" \ /var/backups/.auth.IP.list.tmp &> /dev/null wait if [ -e "/var/backups/.auth.IP.list.tmp" ]; then allowTestTmp=$(grep allow /var/backups/.auth.IP.list.tmp 2>&1) fi if [[ "${allowTestTmp}" =~ "allow" ]]; then echo " deny all;" >> /var/backups/.auth.IP.list.tmp echo " ### access live" >> /var/backups/.auth.IP.list.tmp else echo " deny all;" > /var/backups/.auth.IP.list.tmp echo " ### access none" >> /var/backups/.auth.IP.list.tmp fi if [ ! -e "/var/run/.auth.IP.list.pid" ]; then if [ ! -e "/var/backups/.auth.IP.list" ]; then ip_auth_access_update else if [ -e "/var/backups/.auth.IP.list.tmp" ]; then diffTestIf=$(diff -w -B /var/backups/.auth.IP.list.tmp \ /var/backups/.auth.IP.list 2>&1) if [ ! -z "${diffTestIf}" ]; then ip_auth_access_update fi fi fi fi if [ -L "/var/backups/.vhost.d.mstr" ]; then if [ ! -d "/var/backups/.vhost.d.wbhd" ]; then mkdir -p /var/backups/.vhost.d.wbhd chmod 700 /var/backups/.vhost.d.wbhd cp -af /var/backups/.vhost.d.mstr/* /var/backups/.vhost.d.wbhd/ fi diffClstrTest=$(diff -w -B /var/backups/.vhost.d.wbhd \ /var/backups/.vhost.d.mstr 2>&1) if [ ! -z "${diffClstrTest}" ]; then service nginx reload &> /dev/null rm -rf /var/backups/.vhost.d.wbhd mkdir -p /var/backups/.vhost.d.wbhd chmod 700 /var/backups/.vhost.d.wbhd cp -af /var/backups/.vhost.d.mstr/* /var/backups/.vhost.d.wbhd/ fi fi if [[ "${allowTestTmp}" =~ "allow" ]]; then vhostStatusAdminer=TRUE vhostStatusChive=TRUE vhostStatusCgp=TRUE vhostStatusBuddy=TRUE if [ -e "${pthVhstd}/adminer."* ]; then vhostStatusAdminer=FALSE vhostTestAdminer=$(grep allow ${pthVhstd}/adminer.* 2>&1) if [[ "${vhostTestAdminer}" =~ "allow" ]]; then vhostStatusAdminer=TRUE fi fi if [ -e "${pthVhstd}/chive."* ]; then vhostStatusChive=FALSE vhostTestChive=$(grep allow ${pthVhstd}/chive.* 2>&1) if [[ "${vhostTestChive}" =~ "allow" ]]; then vhostStatusChive=TRUE fi fi if [ -e "${pthVhstd}/cgp."* ]; then vhostStatusCgp=FALSE vhostTestCgp=$(grep allow ${pthVhstd}/cgp.* 2>&1) if [[ "${vhostTestCgp}" =~ "allow" ]]; then vhostStatusCgp=TRUE fi fi if [ -e "${pthVhstd}/sqlbuddy."* ]; then vhostStatusBuddy=FALSE vhostTestBuddy=$(grep allow ${pthVhstd}/sqlbuddy.* 2>&1) if [[ "${vhostTestBuddy}" =~ "allow" ]]; then vhostStatusBuddy=TRUE fi fi if [ "${vhostStatusAdminer}" = "FALSE" ] \ || [ "${vhostStatusChive}" = "FALSE" ] \ || [ "${vhostStatusCgp}" = "FALSE" ] \ || [ "${vhostStatusBuddy}" = "FALSE" ]; then ip_auth_access_update fi fi rm -f /var/backups/.auth.IP.list.tmp } proc_control() { if [ "${_O_LOAD}" -ge "${_O_LOAD_MAX}" ]; then hold elif [ "${_F_LOAD}" -ge "${_F_LOAD_MAX}" ]; then hold else echo "load is ${_O_LOAD}:${_F_LOAD} while \ maxload is ${_O_LOAD_MAX}:${_F_LOAD_MAX}" echo ...OK now running proc_num_ctrl... renice ${_B_NICE} -p $$ &> /dev/null perl /var/xdrago/proc_num_ctrl.cgi touch /var/xdrago/log/proc_num_ctrl.done.pid echo CTL done fi } load_control() { _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) echo _O_LOAD is ${_O_LOAD} _O_LOAD=$(( _O_LOAD / _CPU_NR )) echo _O_LOAD per CPU is ${_O_LOAD} _F_LOAD=$(awk '{print $2*100}' /proc/loadavg 2>&1) echo _F_LOAD is ${_F_LOAD} _F_LOAD=$(( _F_LOAD / _CPU_NR )) echo _F_LOAD per CPU is ${_F_LOAD} _O_LOAD_SPR=$(( 100 * _CPU_SPIDER_RATIO )) echo _O_LOAD_SPR is ${_O_LOAD_SPR} _F_LOAD_SPR=$(( _O_LOAD_SPR / 9 )) _F_LOAD_SPR=$(( _F_LOAD_SPR * 7 )) echo _F_LOAD_SPR is ${_F_LOAD_SPR} _O_LOAD_MAX=$(( 100 * _CPU_MAX_RATIO )) echo _O_LOAD_MAX is ${_O_LOAD_MAX} _F_LOAD_MAX=$(( _O_LOAD_MAX / 9 )) _F_LOAD_MAX=$(( _F_LOAD_MAX * 7 )) echo _F_LOAD_MAX is ${_F_LOAD_MAX} _O_LOAD_CRT=$(( _CPU_CRIT_RATIO * 100 )) echo _O_LOAD_CRT is ${_O_LOAD_CRT} _F_LOAD_CRT=$(( _O_LOAD_CRT / 9 )) _F_LOAD_CRT=$(( _F_LOAD_CRT * 7 )) echo _F_LOAD_CRT is ${_F_LOAD_CRT} if [ "${_O_LOAD}" -ge "${_O_LOAD_SPR}" ] \ && [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ] \ && [ -e "/data/conf/nginx_high_load_off.conf" ]; then nginx_high_load_on elif [ "${_F_LOAD}" -ge "${_F_LOAD_SPR}" ] \ && [ "${_F_LOAD}" -lt "${_F_LOAD_MAX}" ] \ && [ -e "/data/conf/nginx_high_load_off.conf" ]; then nginx_high_load_on elif [ "${_O_LOAD}" -lt "${_O_LOAD_SPR}" ] \ && [ "${_F_LOAD}" -lt "${_F_LOAD_SPR}" ] \ && [ -e "/data/conf/nginx_high_load.conf" ]; then nginx_high_load_off fi if [ "${_O_LOAD}" -ge "${_O_LOAD_CRT}" ]; then terminate elif [ "${_F_LOAD}" -ge "${_F_LOAD_CRT}" ]; then terminate fi proc_control } check_fastcgi_temp() { _FASTCGI_SIZE_TEST=$(du -s -h /usr/fastcgi_temp/*/*/* | grep G 2> /dev/null) if [[ "${_FASTCGI_SIZE_TEST}" =~ "G" ]]; then echo "fastcgi_temp too big" echo "$(date 2>&1) fastcgi_temp too big, cleanup forced START" >> \ /var/xdrago/log/giant.fastcgi.incident.log echo "$(date 2>&1) ${_FASTCGI_SIZE_TEST}" >> \ /var/xdrago/log/giant.fastcgi.incident.log rm -f /usr/fastcgi_temp/*/*/* hold echo "$(date 2>&1) fastcgi_temp too big, cleanup forced END" >> \ /var/xdrago/log/giant.fastcgi.incident.log fi } count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi } if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_SPIDER_RATIO=${_CPU_SPIDER_RATIO//[^0-9]/} _CPU_MAX_RATIO=${_CPU_MAX_RATIO//[^0-9]/} _CPU_CRIT_RATIO=${_CPU_CRIT_RATIO//[^0-9]/} _B_NICE=${_B_NICE//[^0-9]/} fi if [ -z "${_CPU_SPIDER_RATIO}" ]; then _CPU_SPIDER_RATIO=3 fi if [ -z "${_CPU_MAX_RATIO}" ]; then _CPU_MAX_RATIO=6 fi if [ -z "${_CPU_CRIT_RATIO}" ]; then _CPU_CRIT_RATIO=9 fi if [ -z "${_B_NICE}" ]; then _B_NICE=10 fi if [ ! -e "/var/tmp/fpm" ]; then mkdir -p /var/tmp/fpm chmod 777 /var/tmp/fpm fi check_fastcgi_temp count_cpu load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control sleep 3 load_control manage_ip_auth_access echo Done ! exit 0 ###EOF2023###
0
ALLM/M3/boa/aegir/tools
ALLM/M3/boa/aegir/tools/system/usage.sh
#!/bin/bash HOME=/root SHELL=/bin/bash PATH=/usr/local/bin:/usr/local/sbin:/opt/local/bin:/usr/bin:/usr/sbin:/bin:/sbin export PATH=${PATH} export SHELL=${SHELL} export HOME=${HOME} check_root() { if [ `whoami` = "root" ]; then ionice -c2 -n7 -p $$ renice 19 -p $$ chmod a+w /dev/null if [ ! -e "/dev/fd" ]; then if [ -e "/proc/self/fd" ]; then rm -rf /dev/fd ln -s /proc/self/fd /dev/fd fi fi else echo "ERROR: This script should be ran as a root user" exit 1 fi _DF_TEST=$(df -kTh / -l \ | grep '/' \ | sed 's/\%//g' \ | awk '{print $6}' 2> /dev/null) _DF_TEST=${_DF_TEST//[^0-9]/} if [ ! -z "${_DF_TEST}" ] && [ "${_DF_TEST}" -gt "90" ]; then echo "ERROR: Your disk space is almost full !!! ${_DF_TEST}/100" echo "ERROR: We can not proceed until it is below 90/100" exit 1 fi } check_root if [ -e "/root/.proxy.cnf" ]; then exit 0 fi if [ -e "/root/.pause_heavy_tasks_maint.cnf" ]; then exit 0 fi ###-------------SYSTEM-----------------### fix_clear_cache() { if [ -e "${Plr}/profiles/hostmaster" ]; then su -s /bin/bash - ${_THIS_U} -c "drush8 @hostmaster cache-clear all" &> /dev/null fi } check_account_exceptions() { _DEV_EXC=NO chckStringA="omega8.cc" chckStringB="omega8cc" chckStringC="mixomax" chckStringE="emaylx" case ${_CLIENT_EMAIL} in *"$chckStringA"*) _DEV_EXC=YES ;; *"$chckStringB"*) _DEV_EXC=YES ;; *"$chckStringC"*) _DEV_EXC=YES ;; *"$chckStringE"*) _DEV_EXC=YES ;; *) ;; esac } read_account_data() { _CLIENT_CORES= _EXTRA_ENGINE= _ENGINE_NR= _CLIENT_EMAIL= _CLIENT_OPTION= _DSK_CLU_LIMIT=1 if [ -e "/data/disk/${_THIS_U}/log/email.txt" ]; then _CLIENT_EMAIL=$(cat /data/disk/${_THIS_U}/log/email.txt 2>&1) _CLIENT_EMAIL=$(echo -n ${_CLIENT_EMAIL} | tr -d "\n" 2>&1) check_account_exceptions fi if [ -e "/root/.debug.email.txt" ]; then _CLIENT_EMAIL="omega8cc@gmail.com" fi if [ -e "/data/disk/${_THIS_U}/log/cores.txt" ]; then _CLIENT_CORES=$(cat /data/disk/${_THIS_U}/log/cores.txt 2>&1) _CLIENT_CORES=$(echo -n ${_CLIENT_CORES} | tr -d "\n" 2>&1) fi if [ -e "/data/disk/${_THIS_U}/log/diskspace.txt" ]; then _DSK_CLU_LIMIT=$(cat /data/disk/${_THIS_U}/log/diskspace.txt 2>&1) _DSK_CLU_LIMIT=$(echo -n ${_DSK_CLU_LIMIT} | tr -d "\n" 2>&1) fi if [ "${_CLIENT_CORES}" -gt "1" ]; then _ENGINE_NR="Engines" else _ENGINE_NR="Engine" fi if [ -e "/data/disk/${_THIS_U}/log/option.txt" ]; then _CLIENT_OPTION=$(cat /data/disk/${_THIS_U}/log/option.txt 2>&1) _CLIENT_OPTION=$(echo -n ${_CLIENT_OPTION} | tr -d "\n" 2>&1) fi if [ -e "/data/disk/${_THIS_U}/log/extra.txt" ]; then mv -f /data/disk/${_THIS_U}/log/extra.txt /data/disk/${_THIS_U}/log/extra_edge.txt fi if [ -e "/data/disk/${_THIS_U}/log/extra_edge.txt" ]; then _EXTRA_ENGINE=$(cat /data/disk/${_THIS_U}/log/extra_edge.txt 2>&1) _EXTRA_ENGINE=$(echo -n ${_EXTRA_ENGINE} | tr -d "\n" 2>&1) _ENGINE_NR="${_ENGINE_NR} + ${_EXTRA_ENGINE} x EDGE" fi if [ -e "/data/disk/${_THIS_U}/log/extra_power.txt" ]; then _EXTRA_ENGINE=$(cat /data/disk/${_THIS_U}/log/extra_power.txt 2>&1) _EXTRA_ENGINE=$(echo -n ${_EXTRA_ENGINE} | tr -d "\n" 2>&1) _ENGINE_NR="${_ENGINE_NR} + ${_EXTRA_ENGINE} x POWER" fi if [ -e "/data/disk/${_THIS_U}/static/control/cli.info" ]; then _CLIENT_CLI=$(cat /data/disk/${_THIS_U}/static/control/cli.info 2>&1) _CLIENT_CLI=$(echo -n ${_CLIENT_CLI} | tr -d "\n" 2>&1) fi if [ -e "/data/disk/${_THIS_U}/static/control/fpm.info" ]; then _CLIENT_FPM=$(cat /data/disk/${_THIS_U}/static/control/fpm.info 2>&1) _CLIENT_FPM=$(echo -n ${_CLIENT_FPM} | tr -d "\n" 2>&1) fi } send_notice_php() { _MY_EMAIL="support@omega8.cc" _BCC_EMAIL="omega8cc@gmail.com" _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "URGENT: Please switch your Aegir instance to PHP 7.4 [${_THIS_U}]" ${_CLIENT_EMAIL} Hello, Our monitoring detected that you are still using deprecated and no longer supported PHP version: $1 We have provided a few years of extended support for this PHP version, but now we can't extend it any further, because your system has to be upgraded to newest Debian version, which doesn't support many deprecated PHP versions. The upgrade will happen in the first week of May, 2023, and there are no exceptions possible to avoid it. This means that all Aegir instances still running PHP $1 will stop working if not switched to one of currently supported versions: 8.1, 8.0, 7.4 To switch PHP-FPM version on command line, please type: echo 7.4 > ~/static/control/fpm.info You can find more details at: https://learn.omega8.cc/node/330 We are working hard to provide secure and fast hosting for your Drupal sites, and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc -- This email has been sent by your Aegir system monitor EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "URGENT: Please switch your Aegir instance to PHP 7.4 [${_THIS_U}]" ${_CLIENT_EMAIL} Hello, Our monitoring detected that you are still using deprecated and no longer supported PHP version: $1 We have provided over a year of extended support for this PHP version, but now we can't extend it any further, because your system has to be upgraded to newest Debian version, which doesn't support deprecated PHP versions. The upgrade will happen in the first week of May, 2023, and there are no exceptions possible to avoid it. This means that all Aegir instances still running PHP $1 will stop working if not switched to one of currently supported versions: 8.1, 8.0, 7.4 To switch PHP-FPM version on command line, please type: echo 7.4 > ~/static/control/fpm.info You can find more details at: https://learn.omega8.cc/node/330 We are working hard to provide secure and fast hosting for your Drupal sites, and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc -- This email has been sent by your Aegir system monitor EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "URGENT: Please switch your Aegir instance to PHP 7.4 [${_THIS_U}]" ${_CLIENT_EMAIL} Hello, Our monitoring detected that you are still using deprecated and no longer supported PHP version: $1 We have provided over a year of extended support for this PHP version, but now we can't extend it any further, because your system has to be upgraded to newest Debian version, which doesn't support deprecated PHP versions. The upgrade will happen in the first week of May, 2023, and there are no exceptions possible to avoid it. This means that all Aegir instances still running PHP $1 will stop working if not switched to one of currently supported versions: 8.1, 8.0, 7.4 To switch PHP-FPM version on command line, please type: echo 7.4 > ~/static/control/fpm.info You can find more details at: https://learn.omega8.cc/node/330 We are working hard to provide secure and fast hosting for your Drupal sites, and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc -- This email has been sent by your Aegir system monitor EOF fi echo "INFO: PHP notice sent to ${_CLIENT_EMAIL} [${_THIS_U}]: OK" } detect_deprecated_php() { _PHP_FPM_VERSION= if [ -e "${User}/static/control/fpm.info" ] \ && [ ! -e "${User}/log/proxied.pid" ] \ && [ ! -e "${User}/log/CANCELLED" ]; then _PHP_FPM_VERSION=$(cat ${User}/static/control/fpm.info 2>&1) _PHP_FPM_VERSION=$(echo -n ${_PHP_FPM_VERSION} | tr -d "\n" 2>&1) if [ "${_PHP_FPM_VERSION}" = "5.5" ] \ || [ "${_PHP_FPM_VERSION}" = "5.4" ] \ || [ "${_PHP_FPM_VERSION}" = "5.3" ] \ || [ "${_PHP_FPM_VERSION}" = "5.2" ]; then echo Deprecated PHP-FPM ${_PHP_FPM_VERSION} detected in ${_THIS_U} read_account_data if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_php ${_PHP_FPM_VERSION} fi fi fi } send_notice_core() { _MY_EMAIL="support@omega8.cc" _BCC_EMAIL="omega8cc@gmail.com" _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "URGENT: Please migrate ${Dom} site to Pressflow (LTS)" ${_CLIENT_EMAIL} Hello, Our system detected that you are using vanilla Drupal core for site ${Dom}. The platform root directory for this site is: ${Plr} Using non-Pressflow 5.x or 6.x core is not allowed on our servers, unless it is a temporary result of your site import, but every imported site should be migrated to Pressflow based platform as soon as possible. If the site is not migrated to Pressflow based platform in seven (7) days, it may cause service interruption. We are working hard to deliver top performance hosting for your Drupal sites and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc. -- This email has been sent by your Aegir platform core monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "URGENT: Please migrate ${Dom} site to Pressflow (LTS)" ${_CLIENT_EMAIL} Hello, Our system detected that you are using vanilla Drupal core for site ${Dom}. The platform root directory for this site is: ${Plr} Using non-Pressflow 5.x or 6.x core is not allowed on our servers, unless it is a temporary result of your site import, but every imported site should be migrated to Pressflow based platform as soon as possible. If the site is not migrated to Pressflow based platform in seven (7) days, it may cause service interruption. We are working hard to deliver top performance hosting for your Drupal sites and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc. -- This email has been sent by your Aegir platform core monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "URGENT: Please migrate ${Dom} site to Pressflow (LTS)" ${_CLIENT_EMAIL} Hello, Our system detected that you are using vanilla Drupal core for site ${Dom}. The platform root directory for this site is: ${Plr} Using non-Pressflow 5.x or 6.x core is not allowed on our servers, unless it is a temporary result of your site import, but every imported site should be migrated to Pressflow based platform as soon as possible. If the site is not migrated to Pressflow based platform in seven (7) days, it may cause service interruption. We are working hard to deliver top performance hosting for your Drupal sites and we appreciate your efforts to meet the requirements, which are an integral part of the quality you can expect from Omega8.cc. -- This email has been sent by your Aegir platform core monitor. EOF fi echo "INFO: Pressflow notice sent to ${_CLIENT_EMAIL} [${_THIS_U}]: OK" } detect_vanilla_core() { if [ ! -e "${Plr}/core" ]; then if [ -e "${Plr}/web.config" ]; then _DO_NOTHING=YES else if [ -e "${Plr}/modules/watchdog" ]; then if [ ! -e "/boot/grub/grub.cfg" ] \ && [ ! -e "/boot/grub/menu.lst" ] \ && [[ "${Plr}" =~ "static" ]] \ && [ ! -e "${Plr}/modules/cookie_cache_bypass" ]; then if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then echo Vanilla Drupal 5.x Platform detected in ${Plr} read_account_data if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_core fi fi fi else if [ ! -e "${Plr}/modules/path_alias_cache" ] \ && [ -e "${Plr}/modules/user" ] \ && [[ "${Plr}" =~ "static" ]]; then echo Vanilla Drupal 6.x Platform detected in ${Plr} if [ ! -e "/boot/grub/grub.cfg" ] \ && [ ! -e "/boot/grub/menu.lst" ]; then if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then read_account_data if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_core fi fi fi fi fi fi fi } count() { for Site in `find ${User}/config/server_master/nginx/vhost.d \ -maxdepth 1 -mindepth 1 -type f | sort`; do #echo Counting Site $Site Dom=$(echo $Site | cut -d'/' -f9 | awk '{ print $1}' 2>&1) #echo "${_THIS_U},${Dom},vhost-exists" _DEV_URL=NO searchStringB=".dev." searchStringC=".devel." searchStringD=".temp." searchStringE=".tmp." searchStringF=".temporary." searchStringG=".test." searchStringH=".testing." searchStringI=".stage." searchStringJ=".staging." case ${Dom} in *"$searchStringB"*) _DEV_URL=YES ;; *"$searchStringC"*) _DEV_URL=YES ;; *"$searchStringD"*) _DEV_URL=YES ;; *"$searchStringE"*) _DEV_URL=YES ;; *"$searchStringF"*) _DEV_URL=YES ;; *"$searchStringG"*) _DEV_URL=YES ;; *"$searchStringH"*) _DEV_URL=YES ;; *"$searchStringI"*) _DEV_URL=YES ;; *"$searchStringJ"*) _DEV_URL=YES ;; *) ;; esac if [ -e "${User}/.drush/${Dom}.alias.drushrc.php" ]; then #echo "${_THIS_U},${Dom},drushrc-exists" Dir=$(cat ${User}/.drush/${Dom}.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) Plr=$(cat ${User}/.drush/${Dom}.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) detect_vanilla_core fix_clear_cache #echo Dir is ${Dir} if [ -e "${Dir}/drushrc.php" ] \ && [ -e "${Dir}/files" ] \ && [ -e "${Dir}/private" ] \ && [ ! -e "${Plr}/profiles/hostmaster" ]; then if [ ! -e "${Dir}/modules" ]; then mkdir ${Dir}/modules fi #echo "${_THIS_U},${Dom},sitedir-exists" Dat=$(cat ${Dir}/drushrc.php \ | grep "options\['db_name'\] = " \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,';]//g" 2>&1) #echo Dat is ${Dat} if [ ! -z "${Dat}" ] && [ -e "${Dir}" ]; then if [ -L "${Dir}/files" ] || [ -L "${Dir}/private" ]; then DirSize=$(du -L -s ${Dir} 2>&1) else DirSize=$(du -s ${Dir} 2>&1) fi DirSize=$(echo "${DirSize}" \ | cut -d'/' -f1 \ | awk '{ print $1}' \ | sed "s/[\/\s+]//g" 2>&1) SumDir=$(( SumDir + DirSize )) echo "${_THIS_U},${Dom},DirSize:${DirSize}" fi if [ ! -z "${Dat}" ]; then if [ -e "/root/.du.sql" ]; then DatSize=$(grep "/var/lib/mysql/${Dat}$" /root/.du.sql 2>&1) elif [ -e "/root/.du.local.sql" ]; then DatSize=$(grep "/var/lib/mysql/${Dat}$" /root/.du.local.sql 2>&1) elif [ -e "/var/lib/mysql/${Dat}" ]; then DatSize=$(du -s /var/lib/mysql/${Dat} 2>&1) fi DatSize=$(echo "${DatSize}" \ | cut -d'/' -f1 \ | awk '{ print $1}' \ | sed "s/[\/\s+]//g" 2>&1) if [ "${_DEV_URL}" = "YES" ]; then SkipDt=$(( SkipDt + DatSize )) echo "${_THIS_U},${Dom},DatSize:${DatSize}:${Dat},skip" else SumDat=$(( SumDat + DatSize )) echo "${_THIS_U},${Dom},DatSize:${DatSize}:${Dat}" fi else echo "Database ${Dat} for ${Dom} does not exist" fi fi fi done } send_notice_sql() { _MODE=$1 if [ "${_MODE}" = "DEV" ]; then _SQL_LIM=${_SQL_DEV_LIMIT} _SQL_NOW=${SkipDtH} else _SQL_LIM=${_SQL_MIN_LIMIT} _SQL_NOW=${SumDatH} fi _MY_EMAIL="billing@omega8.cc" _BCC_EMAIL="omega8cc@gmail.com" _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "NOTICE: Your ${_MODE} DB Usage on [${_THIS_U}] is too high: ${_SQL_NOW} MB" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed databases space for ${_MODE} sites is ${_SQL_LIM} MB, but you are currently using ${_SQL_NOW} MB of databases space. Please reduce your usage by deleting no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/pricing Note that we do not count(*) any site identified as temporary dev/test, by having in its main name a special keyword with two dots on both sides: .dev. .devel. .temp. .tmp. .temporary. .test. .testing. .stage. .staging. For example, a site with main name: abc.test.foo.com is by default excluded from your allocated resources limits (not counted for billing purposes), as long as the total databases space used by such sites is no greater than three times (3x) your limit for LIVE sites listed on our order pages. However, if we discover that anyone is using this method to hide real usage via listed keywords in the main site name and adding live domain(s) as aliases, such account will be suspended without any warning. -- This email has been sent by your Aegir resources usage daily monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "NOTICE: Your ${_MODE} DB Usage on [${_THIS_U}] is too high: ${_SQL_NOW} MB" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed databases space for ${_MODE} sites is ${_SQL_LIM} MB, but you are currently using ${_SQL_NOW} MB of databases space. Please reduce your usage by deleting no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/pricing Note that we do not count(*) any site identified as temporary dev/test, by having in its main name a special keyword with two dots on both sides: .dev. .devel. .temp. .tmp. .temporary. .test. .testing. .stage. .staging. For example, a site with main name: abc.test.foo.com is by default excluded from your allocated resources limits (not counted for billing purposes), as long as the total databases space used by such sites is no greater than three times (3x) your limit for LIVE sites listed on our order pages. However, if we discover that anyone is using this method to hide real usage via listed keywords in the main site name and adding live domain(s) as aliases, such account will be suspended without any warning. -- This email has been sent by your Aegir resources usage daily monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "NOTICE: Your ${_MODE} DB Usage on [${_THIS_U}] is too high: ${_SQL_NOW} MB" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed databases space for ${_MODE} sites is ${_SQL_LIM} MB, but you are currently using ${_SQL_NOW} MB of databases space. Please reduce your usage by deleting no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/pricing Note that we do not count(*) any site identified as temporary dev/test, by having in its main name a special keyword with two dots on both sides: .dev. .devel. .temp. .tmp. .temporary. .test. .testing. .stage. .staging. For example, a site with main name: abc.test.foo.com is by default excluded from your allocated resources limits (not counted for billing purposes), as long as the total databases space used by such sites is no greater than three times (3x) your limit for LIVE sites listed on our order pages. However, if we discover that anyone is using this method to hide real usage via listed keywords in the main site name and adding live domain(s) as aliases, such account will be suspended without any warning. -- This email has been sent by your Aegir resources usage daily monitor. EOF fi echo "INFO: Notice sent to ${_CLIENT_EMAIL} [${_THIS_U}]: OK" } send_notice_disk() { _MY_EMAIL="billing@omega8.cc" _BCC_EMAIL="omega8cc@gmail.com" _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "NOTICE: Your Disk Usage on [${_THIS_U}] is too high" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed disk space is ${_DSK_MIN_LIMIT} MB. You are currently using ${HomSizH} MB of disk space. Please reduce your usage by deleting old backups, files, and no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/buy Note that unlike with database space limits, for files related disk space we count all your sites, including also all dev/tmp sites, if they exist, even if they are marked as disabled in your Aegir control panel. -- This email has been sent by your Aegir resources usage daily monitor. EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "NOTICE: Your Disk Usage on [${_THIS_U}] is too high" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed disk space is ${_DSK_MIN_LIMIT} MB. You are currently using ${HomSizH} MB of disk space. Please reduce your usage by deleting old backups, files, and no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/buy Note that unlike with database space limits, for files related disk space we count all your sites, including also all dev/tmp sites, if they exist, even if they are marked as disabled in your Aegir control panel. -- This email has been sent by your Aegir resources usage daily monitor. EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "NOTICE: Your Disk Usage on [${_THIS_U}] is too high" ${_CLIENT_EMAIL} Hello, You are using more resources than allocated in your subscription. You have currently ${_CLIENT_CORES} Aegir ${_CLIENT_OPTION} ${_ENGINE_NR}. Your allowed disk space is ${_DSK_MIN_LIMIT} MB. You are currently using ${HomSizH} MB of disk space. Please reduce your usage by deleting old backups, files, and no longer used sites, or purchase enough Aegir Engines to cover your current usage. You can purchase more Aegir Engines easily online: https://omega8.cc/buy Note that unlike with database space limits, for files related disk space we count all your sites, including also all dev/tmp sites, if they exist, even if they are marked as disabled in your Aegir control panel. -- This email has been sent by your Aegir resources usage daily monitor. EOF fi echo "INFO: Notice sent to ${_CLIENT_EMAIL} [${_THIS_U}]: OK" } send_notice_gprd() { _MY_EMAIL="support@omega8.cc" _BCC_EMAIL="omega8cc@gmail.com" _CLIENT_EMAIL=${_CLIENT_EMAIL//\\\@/\@} _MAILX_TEST=$(mail -V 2>&1) if [[ "${_MAILX_TEST}" =~ "GNU Mailutils" ]]; then cat <<EOF | mail -e -a "From: ${_MY_EMAIL}" -a "Bcc: ${_BCC_EMAIL}" \ -s "GDPR compliance for your Aegir account" ${_CLIENT_EMAIL} Hello, Yes, yet another GDPR email, but it's important that you read and understand how this new law affects your hosting with us. The General Data Protection Regulation (GDPR) is a new European privacy law that goes into effect on May 25, 2018. The GDPR will replace the EU Data Protection Directive, also known as Directive 95/46/EC, and will apply a single data protection law throughout the EU. Data protection laws govern the way that businesses collect, use, and share personal data about individuals. Among other things, they require businesses to process an individual’s personal data fairly and lawfully, allow individuals to exercise legal rights in respect of their personal data (for example, to access, correct or delete their personal data), and ensure appropriate security protections are put in place to protect the personal data they process. We have taken steps to ensure that we will be compliant with the GDPR by May 25, 2018. Please read all details on our website at: https://omega8.cc/gdpr https://omega8.cc/gdpr-faq https://omega8.cc/gdpr-dpa https://omega8.cc/gdpr-portability Please contact us if you have any questions: https://omega8.cc/contact Thank you for your attention. --- Omega8.cc EOF elif [[ "${_MAILX_TEST}" =~ "invalid" ]]; then cat <<EOF | mail -a "From: ${_MY_EMAIL}" -e -b ${_BCC_EMAIL} \ -s "GDPR compliance for your Aegir account" ${_CLIENT_EMAIL} Hello, Yes, yet another GDPR email, but it's important that you read and understand how this new law affects your hosting with us. The General Data Protection Regulation (GDPR) is a new European privacy law that goes into effect on May 25, 2018. The GDPR will replace the EU Data Protection Directive, also known as Directive 95/46/EC, and will apply a single data protection law throughout the EU. Data protection laws govern the way that businesses collect, use, and share personal data about individuals. Among other things, they require businesses to process an individual’s personal data fairly and lawfully, allow individuals to exercise legal rights in respect of their personal data (for example, to access, correct or delete their personal data), and ensure appropriate security protections are put in place to protect the personal data they process. We have taken steps to ensure that we will be compliant with the GDPR by May 25, 2018. Please read all details on our website at: https://omega8.cc/gdpr https://omega8.cc/gdpr-faq https://omega8.cc/gdpr-dpa https://omega8.cc/gdpr-portability Please contact us if you have any questions: https://omega8.cc/contact Thank you for your attention. --- Omega8.cc EOF else cat <<EOF | mail -r ${_MY_EMAIL} -e -b ${_BCC_EMAIL} \ -s "GDPR compliance for your Aegir account" ${_CLIENT_EMAIL} Hello, Yes, yet another GDPR email, but it's important that you read and understand how this new law affects your hosting with us. The General Data Protection Regulation (GDPR) is a new European privacy law that goes into effect on May 25, 2018. The GDPR will replace the EU Data Protection Directive, also known as Directive 95/46/EC, and will apply a single data protection law throughout the EU. Data protection laws govern the way that businesses collect, use, and share personal data about individuals. Among other things, they require businesses to process an individual’s personal data fairly and lawfully, allow individuals to exercise legal rights in respect of their personal data (for example, to access, correct or delete their personal data), and ensure appropriate security protections are put in place to protect the personal data they process. We have taken steps to ensure that we will be compliant with the GDPR by May 25, 2018. Please read all details on our website at: https://omega8.cc/gdpr https://omega8.cc/gdpr-faq https://omega8.cc/gdpr-dpa https://omega8.cc/gdpr-portability Please contact us if you have any questions: https://omega8.cc/contact Thank you for your attention. --- Omega8.cc EOF fi echo "INFO: GDPR notice sent to ${_CLIENT_EMAIL} [${_THIS_U}]: OK" } check_limits() { _SQL_MIN_LIMIT=0 _SQL_MAX_LIMIT=0 _SQL_DEV_LIMIT=0 _DSK_MIN_LIMIT=0 _DSK_MAX_LIMIT=0 _DSK_CLU_LIMIT=1 read_account_data if [ "${_CLIENT_OPTION}" = "CLUSTER" ]; then if [ -z "${_DSK_CLU_LIMIT}" ]; then _DSK_CLU_LIMIT=1 fi _SQL_MIN_LIMIT=51200 _DSK_MIN_LIMIT=102400 _DSK_MAX_LIMIT=107520 _SQL_DEV_EXTRA=2 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 2048 )) _DSK_MIN_LIMIT=$(( _DSK_MIN_LIMIT *= _DSK_CLU_LIMIT )) _DSK_MAX_LIMIT=$(( _DSK_MAX_LIMIT *= _DSK_CLU_LIMIT )) elif [ "${_CLIENT_OPTION}" = "LITE" ]; then if [ -z "${_DSK_CLU_LIMIT}" ]; then _DSK_CLU_LIMIT=1 fi _SQL_MIN_LIMIT=5120 _DSK_MIN_LIMIT=51200 _DSK_MAX_LIMIT=53760 _SQL_DEV_EXTRA=3 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 1024 )) _DSK_MIN_LIMIT=$(( _DSK_MIN_LIMIT *= _DSK_CLU_LIMIT )) _DSK_MAX_LIMIT=$(( _DSK_MAX_LIMIT *= _DSK_CLU_LIMIT )) elif [ "${_CLIENT_OPTION}" = "PHANTOM" ]; then if [ -z "${_DSK_CLU_LIMIT}" ]; then _DSK_CLU_LIMIT=1 fi _SQL_MIN_LIMIT=10240 _DSK_MIN_LIMIT=102400 _DSK_MAX_LIMIT=107520 _SQL_DEV_EXTRA=2 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 2048 )) _DSK_MIN_LIMIT=$(( _DSK_MIN_LIMIT *= _DSK_CLU_LIMIT )) _DSK_MAX_LIMIT=$(( _DSK_MAX_LIMIT *= _DSK_CLU_LIMIT )) elif [ "${_CLIENT_OPTION}" = "POWER" ]; then _SQL_MIN_LIMIT=5120 _DSK_MIN_LIMIT=51200 _SQL_DEV_EXTRA=3 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 1024 )) _DSK_MAX_LIMIT=$(( _DSK_MIN_LIMIT + 2560 )) elif [ "${_CLIENT_OPTION}" = "EDGE" ] \ || [ "${_CLIENT_OPTION}" = "SSD" ] \ || [ "${_CLIENT_OPTION}" = "CLASSIC" ]; then _CLIENT_OPTION=EDGE _SQL_MIN_LIMIT=1024 _DSK_MIN_LIMIT=15360 _SQL_DEV_EXTRA=2 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 512 )) _DSK_MAX_LIMIT=$(( _DSK_MIN_LIMIT + 1280 )) elif [ "${_CLIENT_OPTION}" = "MINI" ]; then _SQL_MIN_LIMIT=1024 _DSK_MIN_LIMIT=15360 _SQL_DEV_EXTRA=1 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 512 )) _DSK_MAX_LIMIT=$(( _DSK_MIN_LIMIT + 1280 )) elif [ "${_CLIENT_OPTION}" = "MICRO" ]; then _SQL_MIN_LIMIT=512 _DSK_MIN_LIMIT=5120 _SQL_DEV_EXTRA=1 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 256 )) _DSK_MAX_LIMIT=$(( _DSK_MIN_LIMIT + 640 )) else _SQL_MIN_LIMIT=512 _DSK_MIN_LIMIT=7680 _SQL_DEV_EXTRA=1 _SQL_MAX_LIMIT=$(( _SQL_MIN_LIMIT + 256 )) _DSK_MAX_LIMIT=$(( _DSK_MIN_LIMIT + 640 )) fi _SQL_MIN_LIMIT=$(( _SQL_MIN_LIMIT *= _CLIENT_CORES )) _DSK_MIN_LIMIT=$(( _DSK_MIN_LIMIT *= _CLIENT_CORES )) _SQL_MAX_LIMIT=$(( _SQL_MAX_LIMIT *= _CLIENT_CORES )) _DSK_MAX_LIMIT=$(( _DSK_MAX_LIMIT *= _CLIENT_CORES )) _SQL_DEV_LIMIT=${_SQL_MIN_LIMIT} _SQL_DEV_LIMIT=$(( _SQL_DEV_LIMIT *= _CLIENT_CORES )) _SQL_DEV_LIMIT=$(( _SQL_DEV_LIMIT *= _SQL_DEV_EXTRA )) if [ ! -z "${_EXTRA_ENGINE}" ]; then if [ -e "/data/disk/${_THIS_U}/log/extra_edge.txt" ]; then _SQL_ADD_LIMIT=1024 _DSK_ADD_LIMIT=15360 elif [ -e "/data/disk/${_THIS_U}/log/extra_power.txt" ]; then _SQL_ADD_LIMIT=5120 _DSK_ADD_LIMIT=51200 fi _SQL_ADD_LIMIT=$(( _SQL_ADD_LIMIT *= _EXTRA_ENGINE )) _DSK_ADD_LIMIT=$(( _DSK_ADD_LIMIT *= _EXTRA_ENGINE )) _SQL_MIN_LIMIT=$(( _SQL_MIN_LIMIT + _SQL_ADD_LIMIT )) _DSK_MIN_LIMIT=$(( _DSK_MIN_LIMIT + _DSK_ADD_LIMIT )) _SQL_MAX_LIMIT=$(( _SQL_MAX_LIMIT + _SQL_ADD_LIMIT )) _DSK_MAX_LIMIT=$(( _DSK_MAX_LIMIT + _DSK_ADD_LIMIT )) echo _EXTRA_ENGINE is ${_EXTRA_ENGINE} fi echo _CLIENT_CORES is ${_CLIENT_CORES} echo _SQL_MIN_LIMIT is ${_SQL_MIN_LIMIT} echo _SQL_MAX_LIMIT is ${_SQL_MAX_LIMIT} echo _SQL_DEV_LIMIT is ${_SQL_DEV_LIMIT} echo _DSK_MIN_LIMIT is ${_DSK_MIN_LIMIT} echo _DSK_MAX_LIMIT is ${_DSK_MAX_LIMIT} if [ "${SumDatH}" -gt "${_SQL_MAX_LIMIT}" ]; then if [ ! -e "${User}/log/CANCELLED" ] \ && [ ! -e "${User}/log/proxied.pid" ]; then if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_sql "LIVE" fi fi echo SQL Usage for ${_THIS_U} above limits elif [ "${SkipDtH}" -gt "${_SQL_DEV_LIMIT}" ]; then if [ ! -e "${User}/log/CANCELLED" ] \ && [ ! -e "${User}/log/proxied.pid" ]; then if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_sql "DEV" fi fi echo SQL Usage for ${_THIS_U} above limits else echo SQL Usage for ${_THIS_U} below limits fi if [ "${HomSizH}" -gt "${_DSK_MAX_LIMIT}" ]; then if [ ! -e "${User}/log/CANCELLED" ] \ && [ ! -e "${User}/log/proxied.pid" ]; then if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_disk fi fi echo Disk Usage for ${_THIS_U} above limits else echo Disk Usage for ${_THIS_U} below limits fi if [ ! -e "${User}/log/GDPRsent.log" ]; then if [ ! -e "${User}/log/CANCELLED" ] \ && [ ! -e "${User}/log/proxied.pid" ]; then if [ "${_THIS_MODE}" = "verbose" ]; then send_notice_gprd touch ${User}/log/GDPRsent.log echo GDPR info for ${_THIS_U} sent fi fi fi } count_cpu() { _CPU_INFO=$(grep -c processor /proc/cpuinfo 2>&1) _CPU_INFO=${_CPU_INFO//[^0-9]/} _NPROC_TEST=$(which nproc 2>&1) if [ -z "${_NPROC_TEST}" ]; then _CPU_NR="${_CPU_INFO}" else _CPU_NR=$(nproc 2>&1) fi _CPU_NR=${_CPU_NR//[^0-9]/} if [ ! -z "${_CPU_NR}" ] \ && [ ! -z "${_CPU_INFO}" ] \ && [ "${_CPU_NR}" -gt "${_CPU_INFO}" ] \ && [ "${_CPU_INFO}" -gt "0" ]; then _CPU_NR="${_CPU_INFO}" fi if [ -z "${_CPU_NR}" ] || [ "${_CPU_NR}" -lt "1" ]; then _CPU_NR=1 fi } load_control() { if [ -e "/root/.barracuda.cnf" ]; then source /root/.barracuda.cnf _CPU_MAX_RATIO=${_CPU_MAX_RATIO//[^0-9]/} fi if [ -z "${_CPU_MAX_RATIO}" ]; then _CPU_MAX_RATIO=6 fi _O_LOAD=$(awk '{print $1*100}' /proc/loadavg 2>&1) _O_LOAD=$(( _O_LOAD / _CPU_NR )) _O_LOAD_MAX=$(( 100 * _CPU_MAX_RATIO )) } sub_count_usr_home() { if [ -e "$1" ]; then HqmSiz=$(du -s $1 2>&1) HqmSiz=$(echo "${HqmSiz}" \ | cut -d'/' -f1 \ | awk '{ print $1}' \ | sed "s/[\/\s+]//g" 2>&1) HxmSiz=$(( HxmSiz + HqmSiz )) ### echo $1 disk usage is $HqmSiz ### echo HxmSiz total is $HxmSiz fi } action() { for User in `find /data/disk/ -maxdepth 1 -mindepth 1 | sort`; do count_cpu load_control if [ -e "${User}/config/server_master/nginx/vhost.d" ]; then if [ "${_O_LOAD}" -lt "${_O_LOAD_MAX}" ]; then SumDir=0 SumDat=0 SkipDt=0 HomSiz=0 HxmSiz=0 HqmSiz=0 _THIS_U=$(echo ${User} | cut -d'/' -f4 | awk '{ print $1}' 2>&1) _THIS_HM_SITE=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "site_path'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) _THIS_HM_PLR=$(cat ${User}/.drush/hostmaster.alias.drushrc.php \ | grep "root'" \ | cut -d: -f2 \ | awk '{ print $3}' \ | sed "s/[\,']//g" 2>&1) echo load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX} if [ ! -e "${User}/log/skip-force-cleanup.txt" ]; then cd ${User} echo "Remove various tmp/dot files breaking du command" find . -name ".DS_Store" -type f | xargs rm -rf &> /dev/null find . -name "*~" -type f | xargs rm -rf &> /dev/null find . -name "*#" -type f | xargs rm -rf &> /dev/null find . -name ".#*" -type f | xargs rm -rf &> /dev/null find . -name "*--" -type f | xargs rm -rf &> /dev/null find . -name "._*" -type f | xargs rm -rf &> /dev/null find . -name "*~" -type l | xargs rm -rf &> /dev/null find . -name "*#" -type l | xargs rm -rf &> /dev/null find . -name ".#*" -type l | xargs rm -rf &> /dev/null find . -name "*--" -type l | xargs rm -rf &> /dev/null find . -name "._*" -type l | xargs rm -rf &> /dev/null fi echo Counting User ${User} _DOW=$(date +%u 2>&1) _DOW=${_DOW//[^1-7]/} if [ "${_DOW}" = "2" ]; then detect_deprecated_php fi count if [ -d "/home/${_THIS_U}.ftp" ]; then for uH in `find /home/${_THIS_U}.* -maxdepth 0 -mindepth 0 | sort`; do if [ -d "${uH}" ]; then sub_count_usr_home ${uH} fi done for uR in `find /var/solr7/data/oct.${_THIS_U}.* -maxdepth 0 -mindepth 0 | sort`; do if [ -d "${uR}" ]; then sub_count_usr_home ${uR} fi done for uO in `find /opt/solr4/${_THIS_U}.* -maxdepth 0 -mindepth 0 | sort`; do if [ -d "${uO}" ]; then sub_count_usr_home ${uO} fi done fi if [ -L "${User}" ]; then HomSiz=$(du -D -s ${User} 2>&1) else HomSiz=$(du -s ${User} 2>&1) fi HomSiz=$(echo "${HomSiz}" \ | cut -d'/' -f1 \ | awk '{ print $1}' \ | sed "s/[\/\s+]//g" 2>&1) HomSiz=$(( HomSiz + HxmSiz )) HomSizH=$(echo "scale=0; ${HomSiz}/1024" | bc 2>&1) SumDatH=$(echo "scale=0; ${SumDat}/1024" | bc 2>&1) SkipDtH=$(echo "scale=0; ${SkipDt}/1024" | bc 2>&1) SumDirH=$(echo "scale=0; ${SumDir}/1024" | bc 2>&1) echo HomSiz is ${HomSiz} or ${HomSizH} MB echo SumDir is ${SumDir} or ${SumDirH} MB echo SumDat is ${SumDat} or ${SumDatH} MB echo SkipDt is ${SkipDt} or ${SkipDtH} MB if [[ "${_CHECK_HOST}" =~ ".host8." ]] \ || [[ "${_CHECK_HOST}" =~ ".boa.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".o8.io"($) ]] \ || [[ "${_CHECK_HOST}" =~ ".aegir.cc"($) ]]; then check_limits if [ -e "${_THIS_HM_SITE}" ]; then su -s /bin/bash - ${_THIS_U} -c "drush8 @hostmaster \ variable-set --always-set site_footer 'Usage on ${_DATE} \ | ALL Files <strong>${HomSizH}</strong> MB \ | LIVE Dbs <strong>${SumDatH}</strong> MB \ | DEV Dbs <strong>${SkipDtH}</strong> MB \ | <strong>${_CLIENT_CORES}</strong> \ Aegir ${_CLIENT_OPTION} ${_ENGINE_NR} \ | CLI <strong>${_CLIENT_CLI}</strong> \ | FPM <strong>${_CLIENT_FPM}</strong>'" &> /dev/null if [ ! -e "${User}/log/CANCELLED" ] \ && [ "${_DEV_EXC}" = "NO" ] \ && [ ! -e "${User}/log/proxied.pid" ]; then eMail=${_CLIENT_EMAIL//\\\@/\@} AegirUrl=$(cat ${User}/log/domain.txt 2>&1) if [ "${HomSizH}" -gt "${_DSK_MAX_LIMIT}" ]; then Files="!x!FilesAll" else Files="FilesAll" fi if [ "${SumDatH}" -gt "${_SQL_MAX_LIMIT}" ]; then DbsL="!x!DbsLive" else DbsL="DbsLive" fi if [ "${SkipDtH}" -gt "${_SQL_DEV_LIMIT}" ]; then DbsD="!x!DbsDev" else DbsD="DbsDev" fi if [ "${_THIS_MODE}" = "verbose" ] || [ -z "${_THIS_MODE}" ]; then _LOG_FILE="usage-latest-verbose.log" elif [ "${_THIS_MODE}" = "silent" ]; then _LOG_FILE="usage-latest-silent.log" fi echo "${AegirUrl},${Files}:${HomSizH},${DbsL}:${SumDatH},${DbsD}:${SkipDtH},${eMail},Subs:${_CLIENT_OPTION}:${_CLIENT_CORES},${_THIS_U}" >> /var/xdrago/log/usage/${_LOG_FILE} fi TmDir="${_THIS_HM_PLR}/profiles/hostmaster/themes/aegir/eldir" PgTpl="${TmDir}/page.tpl.php" EldirF="0001-Print-site_footer-if-defined.patch" TplPatch="/var/xdrago/conf/${EldirF}" if [ -e "${PgTpl}" ] && [ -e "${TplPatch}" ]; then _IS_SF=$(grep "site_footer" ${PgTpl} 2>&1) if [[ ! "${_IS_SF}" =~ "site_footer" ]]; then cd ${TmDir} patch -p1 < ${TplPatch} &> /dev/null cd fi fi su -s /bin/bash - ${_THIS_U} \ -c "drush8 @hostmaster cache-clear all" &> /dev/null fi else if [ -e "${_THIS_HM_SITE}" ]; then su -s /bin/bash - ${_THIS_U} \ -c "drush8 @hostmaster variable-set \ --always-set site_footer ''" &> /dev/null su -s /bin/bash - ${_THIS_U} \ -c "drush8 @hostmaster cache-clear all" &> /dev/null fi fi echo "Done for ${User}" else echo "load is ${_O_LOAD} while maxload is ${_O_LOAD_MAX}" echo "...we have to wait..." fi echo echo fi done } ###--------------------### echo "INFO: Starting usage monitoring on `date`" _NOW=$(date +%y%m%d-%H%M%S 2>&1) _NOW=${_NOW//[^0-9-]/} _DATE=$(date 2>&1) _CHECK_HOST=$(uname -n 2>&1) mkdir -p /var/xdrago/log/usage if [ "${1}" = "verbose" ] || [ -z "${1}" ]; then _THIS_MODE="verbose" rm -f /var/xdrago/log/usage/usage-latest-verbose.log elif [ "${1}" = "silent" ]; then _THIS_MODE="silent" rm -f /var/xdrago/log/usage/usage-latest-silent.log fi action >/var/xdrago/log/usage/usage-${_NOW}.log 2>&1 echo "INFO: Completing usage monitoring on `date`" exit 0 ###EOF2023###