pvincent
10 months ago
82 changed files with 7172 additions and 6 deletions
-
8.vscode/extensions.json
-
35.vscode/settings.json
-
661LICENSE
-
10README.md
-
652lib/functions.sh
-
294lib/harden.sh
-
18lib/images/bullseye-miaou.sh
-
19lib/images/buster-miaou.sh
-
5lib/init.sh
-
409lib/install.sh
-
218recipes/cagettepei/crud.sh
-
187recipes/cagettepei/install.sh
-
124recipes/dmz/install.sh
-
21recipes/dokuwiki/install.sh
-
180recipes/dolibarr/crud.sh
-
56recipes/dolibarr/install.sh
-
65recipes/mariadb/install.sh
-
180recipes/odoo12/crud.sh
-
158recipes/odoo12/install.sh
-
196recipes/odoo15/crud.sh
-
129recipes/odoo15/install.sh
-
68recipes/postgresql/install.sh
-
202recipes/wordpress/crud.sh
-
83recipes/wordpress/install.sh
-
221scripts/db-maria
-
200scripts/db-psql
-
180scripts/lxc-miaou-create
-
88scripts/lxc-miaou-enable-ssh
-
3scripts/lxc-sort-by-disk
-
3scripts/lxc-sort-by-mem
-
12scripts/lxd-restart-dnsmasq
-
479scripts/miaou
-
80scripts/ssl_check
-
18templates/apps/cagettepei/cagettepei-batch
-
8templates/apps/cagettepei/cagettepei-host.j2
-
10templates/apps/cagettepei/systemd/cagettepei-batch-day.service
-
10templates/apps/cagettepei/systemd/cagettepei-batch-day.timer
-
10templates/apps/cagettepei/systemd/cagettepei-batch-minute.service
-
10templates/apps/cagettepei/systemd/cagettepei-batch-minute.timer
-
23templates/apps/dolibarr/host.j2
-
BINtemplates/apps/odoo12/favicon/favicon-beta.ico
-
BINtemplates/apps/odoo12/favicon/favicon-dev.ico
-
BINtemplates/apps/odoo12/favicon/favicon-prod.ico
-
17templates/apps/odoo12/odoo.conf.j2
-
14templates/apps/odoo12/odoo.service.j2
-
31templates/apps/odoo12/odoo12-addon-install
-
33templates/apps/odoo15/odoo-addon-install
-
17templates/apps/odoo15/odoo.conf.j2
-
14templates/apps/odoo15/odoo.service.j2
-
44templates/apps/wordpress/wp-backup
-
37templates/apps/wordpress/wp-host.j2
-
176templates/apps/wordpress/wp-tool
-
5templates/autopostgresqlbackup/cron.daily
-
122templates/autopostgresqlbackup/default.conf
-
666templates/autopostgresqlbackup/script
-
162templates/bottom/bottom.toml
-
14templates/dev-container-ssh/sshd_config.j2
-
10templates/etc/defaults.yaml.j2
-
3templates/etc/miaou.yaml.j2
-
26templates/etc/miaou.yaml.sample
-
23templates/hardened/firewall.table
-
14templates/hardened/hardened.yaml.sample
-
2templates/hardened/mailer/aliases.j2
-
6templates/hardened/mailer/mail.rc.j2
-
26templates/hardened/mailer/msmtprc.j2
-
31templates/hardened/motd/10-header
-
15templates/hardened/motd/40-machineinfo
-
15templates/hardened/motd/80-users
-
5templates/hardened/nftables.conf
-
17templates/hardened/pam/alert_ssh_password.sh
-
16templates/hardened/sshd_config.j2
-
6templates/hardened/sudoers.j2
-
12templates/hardened/systemd/on_startup.service
-
7templates/monit/containers.j2
-
6templates/monit/hosts.j2
-
27templates/network-manager/50-miaou-resolver
-
35templates/nftables/lxd.table.j2
-
6templates/nftables/nat.table.j2
-
16templates/nginx/_default.j2
-
37templates/nginx/hosts.j2
-
67templates/nginx/snippets/banner_beta.conf
-
65templates/nginx/snippets/banner_dev.conf
@ -0,0 +1,8 @@ |
|||
{ |
|||
"recommendations": [ |
|||
"mads-hartmann.bash-ide-vscode", |
|||
"mkhl.shfmt", |
|||
"samuelcolvin.jinjahtml", |
|||
"jgclark.vscode-todo-highlight" |
|||
], |
|||
} |
@ -0,0 +1,35 @@ |
|||
{ |
|||
"cSpell.words": [ |
|||
"chattr", |
|||
"dnsmasq", |
|||
"dpkg", |
|||
"echoerr", |
|||
"mkdir", |
|||
"resolv", |
|||
"rfkill", |
|||
"tera" |
|||
], |
|||
"cSpell.enableFiletypes": [ |
|||
"!plaintext" |
|||
], |
|||
"ltex.language": "en", |
|||
"outline.showVariables": false, |
|||
"editor.formatOnSave": true, |
|||
"todohighlight.enableDiagnostics": true, |
|||
"todohighlight.include": [ |
|||
"**/*.js", |
|||
"**/*.jsx", |
|||
"**/*.ts", |
|||
"**/*.tsx", |
|||
"**/*.html", |
|||
"**/*.css", |
|||
"**/*.scss", |
|||
"**/*.php", |
|||
"**/*.rb", |
|||
"**/*.txt", |
|||
"**/*.mdown", |
|||
"**/*.md", |
|||
"**/*.sh", |
|||
"**/scripts/*" |
|||
] |
|||
} |
@ -0,0 +1,661 @@ |
|||
GNU AFFERO GENERAL PUBLIC LICENSE |
|||
Version 3, 19 November 2007 |
|||
|
|||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> |
|||
Everyone is permitted to copy and distribute verbatim copies |
|||
of this license document, but changing it is not allowed. |
|||
|
|||
Preamble |
|||
|
|||
The GNU Affero General Public License is a free, copyleft license for |
|||
software and other kinds of works, specifically designed to ensure |
|||
cooperation with the community in the case of network server software. |
|||
|
|||
The licenses for most software and other practical works are designed |
|||
to take away your freedom to share and change the works. By contrast, |
|||
our General Public Licenses are intended to guarantee your freedom to |
|||
share and change all versions of a program--to make sure it remains free |
|||
software for all its users. |
|||
|
|||
When we speak of free software, we are referring to freedom, not |
|||
price. Our General Public Licenses are designed to make sure that you |
|||
have the freedom to distribute copies of free software (and charge for |
|||
them if you wish), that you receive source code or can get it if you |
|||
want it, that you can change the software or use pieces of it in new |
|||
free programs, and that you know you can do these things. |
|||
|
|||
Developers that use our General Public Licenses protect your rights |
|||
with two steps: (1) assert copyright on the software, and (2) offer |
|||
you this License which gives you legal permission to copy, distribute |
|||
and/or modify the software. |
|||
|
|||
A secondary benefit of defending all users' freedom is that |
|||
improvements made in alternate versions of the program, if they |
|||
receive widespread use, become available for other developers to |
|||
incorporate. Many developers of free software are heartened and |
|||
encouraged by the resulting cooperation. However, in the case of |
|||
software used on network servers, this result may fail to come about. |
|||
The GNU General Public License permits making a modified version and |
|||
letting the public access it on a server without ever releasing its |
|||
source code to the public. |
|||
|
|||
The GNU Affero General Public License is designed specifically to |
|||
ensure that, in such cases, the modified source code becomes available |
|||
to the community. It requires the operator of a network server to |
|||
provide the source code of the modified version running there to the |
|||
users of that server. Therefore, public use of a modified version, on |
|||
a publicly accessible server, gives the public access to the source |
|||
code of the modified version. |
|||
|
|||
An older license, called the Affero General Public License and |
|||
published by Affero, was designed to accomplish similar goals. This is |
|||
a different license, not a version of the Affero GPL, but Affero has |
|||
released a new version of the Affero GPL which permits relicensing under |
|||
this license. |
|||
|
|||
The precise terms and conditions for copying, distribution and |
|||
modification follow. |
|||
|
|||
TERMS AND CONDITIONS |
|||
|
|||
0. Definitions. |
|||
|
|||
"This License" refers to version 3 of the GNU Affero General Public License. |
|||
|
|||
"Copyright" also means copyright-like laws that apply to other kinds of |
|||
works, such as semiconductor masks. |
|||
|
|||
"The Program" refers to any copyrightable work licensed under this |
|||
License. Each licensee is addressed as "you". "Licensees" and |
|||
"recipients" may be individuals or organizations. |
|||
|
|||
To "modify" a work means to copy from or adapt all or part of the work |
|||
in a fashion requiring copyright permission, other than the making of an |
|||
exact copy. The resulting work is called a "modified version" of the |
|||
earlier work or a work "based on" the earlier work. |
|||
|
|||
A "covered work" means either the unmodified Program or a work based |
|||
on the Program. |
|||
|
|||
To "propagate" a work means to do anything with it that, without |
|||
permission, would make you directly or secondarily liable for |
|||
infringement under applicable copyright law, except executing it on a |
|||
computer or modifying a private copy. Propagation includes copying, |
|||
distribution (with or without modification), making available to the |
|||
public, and in some countries other activities as well. |
|||
|
|||
To "convey" a work means any kind of propagation that enables other |
|||
parties to make or receive copies. Mere interaction with a user through |
|||
a computer network, with no transfer of a copy, is not conveying. |
|||
|
|||
An interactive user interface displays "Appropriate Legal Notices" |
|||
to the extent that it includes a convenient and prominently visible |
|||
feature that (1) displays an appropriate copyright notice, and (2) |
|||
tells the user that there is no warranty for the work (except to the |
|||
extent that warranties are provided), that licensees may convey the |
|||
work under this License, and how to view a copy of this License. If |
|||
the interface presents a list of user commands or options, such as a |
|||
menu, a prominent item in the list meets this criterion. |
|||
|
|||
1. Source Code. |
|||
|
|||
The "source code" for a work means the preferred form of the work |
|||
for making modifications to it. "Object code" means any non-source |
|||
form of a work. |
|||
|
|||
A "Standard Interface" means an interface that either is an official |
|||
standard defined by a recognized standards body, or, in the case of |
|||
interfaces specified for a particular programming language, one that |
|||
is widely used among developers working in that language. |
|||
|
|||
The "System Libraries" of an executable work include anything, other |
|||
than the work as a whole, that (a) is included in the normal form of |
|||
packaging a Major Component, but which is not part of that Major |
|||
Component, and (b) serves only to enable use of the work with that |
|||
Major Component, or to implement a Standard Interface for which an |
|||
implementation is available to the public in source code form. A |
|||
"Major Component", in this context, means a major essential component |
|||
(kernel, window system, and so on) of the specific operating system |
|||
(if any) on which the executable work runs, or a compiler used to |
|||
produce the work, or an object code interpreter used to run it. |
|||
|
|||
The "Corresponding Source" for a work in object code form means all |
|||
the source code needed to generate, install, and (for an executable |
|||
work) run the object code and to modify the work, including scripts to |
|||
control those activities. However, it does not include the work's |
|||
System Libraries, or general-purpose tools or generally available free |
|||
programs which are used unmodified in performing those activities but |
|||
which are not part of the work. For example, Corresponding Source |
|||
includes interface definition files associated with source files for |
|||
the work, and the source code for shared libraries and dynamically |
|||
linked subprograms that the work is specifically designed to require, |
|||
such as by intimate data communication or control flow between those |
|||
subprograms and other parts of the work. |
|||
|
|||
The Corresponding Source need not include anything that users |
|||
can regenerate automatically from other parts of the Corresponding |
|||
Source. |
|||
|
|||
The Corresponding Source for a work in source code form is that |
|||
same work. |
|||
|
|||
2. Basic Permissions. |
|||
|
|||
All rights granted under this License are granted for the term of |
|||
copyright on the Program, and are irrevocable provided the stated |
|||
conditions are met. This License explicitly affirms your unlimited |
|||
permission to run the unmodified Program. The output from running a |
|||
covered work is covered by this License only if the output, given its |
|||
content, constitutes a covered work. This License acknowledges your |
|||
rights of fair use or other equivalent, as provided by copyright law. |
|||
|
|||
You may make, run and propagate covered works that you do not |
|||
convey, without conditions so long as your license otherwise remains |
|||
in force. You may convey covered works to others for the sole purpose |
|||
of having them make modifications exclusively for you, or provide you |
|||
with facilities for running those works, provided that you comply with |
|||
the terms of this License in conveying all material for which you do |
|||
not control copyright. Those thus making or running the covered works |
|||
for you must do so exclusively on your behalf, under your direction |
|||
and control, on terms that prohibit them from making any copies of |
|||
your copyrighted material outside their relationship with you. |
|||
|
|||
Conveying under any other circumstances is permitted solely under |
|||
the conditions stated below. Sublicensing is not allowed; section 10 |
|||
makes it unnecessary. |
|||
|
|||
3. Protecting Users' Legal Rights From Anti-Circumvention Law. |
|||
|
|||
No covered work shall be deemed part of an effective technological |
|||
measure under any applicable law fulfilling obligations under article |
|||
11 of the WIPO copyright treaty adopted on 20 December 1996, or |
|||
similar laws prohibiting or restricting circumvention of such |
|||
measures. |
|||
|
|||
When you convey a covered work, you waive any legal power to forbid |
|||
circumvention of technological measures to the extent such circumvention |
|||
is effected by exercising rights under this License with respect to |
|||
the covered work, and you disclaim any intention to limit operation or |
|||
modification of the work as a means of enforcing, against the work's |
|||
users, your or third parties' legal rights to forbid circumvention of |
|||
technological measures. |
|||
|
|||
4. Conveying Verbatim Copies. |
|||
|
|||
You may convey verbatim copies of the Program's source code as you |
|||
receive it, in any medium, provided that you conspicuously and |
|||
appropriately publish on each copy an appropriate copyright notice; |
|||
keep intact all notices stating that this License and any |
|||
non-permissive terms added in accord with section 7 apply to the code; |
|||
keep intact all notices of the absence of any warranty; and give all |
|||
recipients a copy of this License along with the Program. |
|||
|
|||
You may charge any price or no price for each copy that you convey, |
|||
and you may offer support or warranty protection for a fee. |
|||
|
|||
5. Conveying Modified Source Versions. |
|||
|
|||
You may convey a work based on the Program, or the modifications to |
|||
produce it from the Program, in the form of source code under the |
|||
terms of section 4, provided that you also meet all of these conditions: |
|||
|
|||
a) The work must carry prominent notices stating that you modified |
|||
it, and giving a relevant date. |
|||
|
|||
b) The work must carry prominent notices stating that it is |
|||
released under this License and any conditions added under section |
|||
7. This requirement modifies the requirement in section 4 to |
|||
"keep intact all notices". |
|||
|
|||
c) You must license the entire work, as a whole, under this |
|||
License to anyone who comes into possession of a copy. This |
|||
License will therefore apply, along with any applicable section 7 |
|||
additional terms, to the whole of the work, and all its parts, |
|||
regardless of how they are packaged. This License gives no |
|||
permission to license the work in any other way, but it does not |
|||
invalidate such permission if you have separately received it. |
|||
|
|||
d) If the work has interactive user interfaces, each must display |
|||
Appropriate Legal Notices; however, if the Program has interactive |
|||
interfaces that do not display Appropriate Legal Notices, your |
|||
work need not make them do so. |
|||
|
|||
A compilation of a covered work with other separate and independent |
|||
works, which are not by their nature extensions of the covered work, |
|||
and which are not combined with it such as to form a larger program, |
|||
in or on a volume of a storage or distribution medium, is called an |
|||
"aggregate" if the compilation and its resulting copyright are not |
|||
used to limit the access or legal rights of the compilation's users |
|||
beyond what the individual works permit. Inclusion of a covered work |
|||
in an aggregate does not cause this License to apply to the other |
|||
parts of the aggregate. |
|||
|
|||
6. Conveying Non-Source Forms. |
|||
|
|||
You may convey a covered work in object code form under the terms |
|||
of sections 4 and 5, provided that you also convey the |
|||
machine-readable Corresponding Source under the terms of this License, |
|||
in one of these ways: |
|||
|
|||
a) Convey the object code in, or embodied in, a physical product |
|||
(including a physical distribution medium), accompanied by the |
|||
Corresponding Source fixed on a durable physical medium |
|||
customarily used for software interchange. |
|||
|
|||
b) Convey the object code in, or embodied in, a physical product |
|||
(including a physical distribution medium), accompanied by a |
|||
written offer, valid for at least three years and valid for as |
|||
long as you offer spare parts or customer support for that product |
|||
model, to give anyone who possesses the object code either (1) a |
|||
copy of the Corresponding Source for all the software in the |
|||
product that is covered by this License, on a durable physical |
|||
medium customarily used for software interchange, for a price no |
|||
more than your reasonable cost of physically performing this |
|||
conveying of source, or (2) access to copy the |
|||
Corresponding Source from a network server at no charge. |
|||
|
|||
c) Convey individual copies of the object code with a copy of the |
|||
written offer to provide the Corresponding Source. This |
|||
alternative is allowed only occasionally and noncommercially, and |
|||
only if you received the object code with such an offer, in accord |
|||
with subsection 6b. |
|||
|
|||
d) Convey the object code by offering access from a designated |
|||
place (gratis or for a charge), and offer equivalent access to the |
|||
Corresponding Source in the same way through the same place at no |
|||
further charge. You need not require recipients to copy the |
|||
Corresponding Source along with the object code. If the place to |
|||
copy the object code is a network server, the Corresponding Source |
|||
may be on a different server (operated by you or a third party) |
|||
that supports equivalent copying facilities, provided you maintain |
|||
clear directions next to the object code saying where to find the |
|||
Corresponding Source. Regardless of what server hosts the |
|||
Corresponding Source, you remain obligated to ensure that it is |
|||
available for as long as needed to satisfy these requirements. |
|||
|
|||
e) Convey the object code using peer-to-peer transmission, provided |
|||
you inform other peers where the object code and Corresponding |
|||
Source of the work are being offered to the general public at no |
|||
charge under subsection 6d. |
|||
|
|||
A separable portion of the object code, whose source code is excluded |
|||
from the Corresponding Source as a System Library, need not be |
|||
included in conveying the object code work. |
|||
|
|||
A "User Product" is either (1) a "consumer product", which means any |
|||
tangible personal property which is normally used for personal, family, |
|||
or household purposes, or (2) anything designed or sold for incorporation |
|||
into a dwelling. In determining whether a product is a consumer product, |
|||
doubtful cases shall be resolved in favor of coverage. For a particular |
|||
product received by a particular user, "normally used" refers to a |
|||
typical or common use of that class of product, regardless of the status |
|||
of the particular user or of the way in which the particular user |
|||
actually uses, or expects or is expected to use, the product. A product |
|||
is a consumer product regardless of whether the product has substantial |
|||
commercial, industrial or non-consumer uses, unless such uses represent |
|||
the only significant mode of use of the product. |
|||
|
|||
"Installation Information" for a User Product means any methods, |
|||
procedures, authorization keys, or other information required to install |
|||
and execute modified versions of a covered work in that User Product from |
|||
a modified version of its Corresponding Source. The information must |
|||
suffice to ensure that the continued functioning of the modified object |
|||
code is in no case prevented or interfered with solely because |
|||
modification has been made. |
|||
|
|||
If you convey an object code work under this section in, or with, or |
|||
specifically for use in, a User Product, and the conveying occurs as |
|||
part of a transaction in which the right of possession and use of the |
|||
User Product is transferred to the recipient in perpetuity or for a |
|||
fixed term (regardless of how the transaction is characterized), the |
|||
Corresponding Source conveyed under this section must be accompanied |
|||
by the Installation Information. But this requirement does not apply |
|||
if neither you nor any third party retains the ability to install |
|||
modified object code on the User Product (for example, the work has |
|||
been installed in ROM). |
|||
|
|||
The requirement to provide Installation Information does not include a |
|||
requirement to continue to provide support service, warranty, or updates |
|||
for a work that has been modified or installed by the recipient, or for |
|||
the User Product in which it has been modified or installed. Access to a |
|||
network may be denied when the modification itself materially and |
|||
adversely affects the operation of the network or violates the rules and |
|||
protocols for communication across the network. |
|||
|
|||
Corresponding Source conveyed, and Installation Information provided, |
|||
in accord with this section must be in a format that is publicly |
|||
documented (and with an implementation available to the public in |
|||
source code form), and must require no special password or key for |
|||
unpacking, reading or copying. |
|||
|
|||
7. Additional Terms. |
|||
|
|||
"Additional permissions" are terms that supplement the terms of this |
|||
License by making exceptions from one or more of its conditions. |
|||
Additional permissions that are applicable to the entire Program shall |
|||
be treated as though they were included in this License, to the extent |
|||
that they are valid under applicable law. If additional permissions |
|||
apply only to part of the Program, that part may be used separately |
|||
under those permissions, but the entire Program remains governed by |
|||
this License without regard to the additional permissions. |
|||
|
|||
When you convey a copy of a covered work, you may at your option |
|||
remove any additional permissions from that copy, or from any part of |
|||
it. (Additional permissions may be written to require their own |
|||
removal in certain cases when you modify the work.) You may place |
|||
additional permissions on material, added by you to a covered work, |
|||
for which you have or can give appropriate copyright permission. |
|||
|
|||
Notwithstanding any other provision of this License, for material you |
|||
add to a covered work, you may (if authorized by the copyright holders of |
|||
that material) supplement the terms of this License with terms: |
|||
|
|||
a) Disclaiming warranty or limiting liability differently from the |
|||
terms of sections 15 and 16 of this License; or |
|||
|
|||
b) Requiring preservation of specified reasonable legal notices or |
|||
author attributions in that material or in the Appropriate Legal |
|||
Notices displayed by works containing it; or |
|||
|
|||
c) Prohibiting misrepresentation of the origin of that material, or |
|||
requiring that modified versions of such material be marked in |
|||
reasonable ways as different from the original version; or |
|||
|
|||
d) Limiting the use for publicity purposes of names of licensors or |
|||
authors of the material; or |
|||
|
|||
e) Declining to grant rights under trademark law for use of some |
|||
trade names, trademarks, or service marks; or |
|||
|
|||
f) Requiring indemnification of licensors and authors of that |
|||
material by anyone who conveys the material (or modified versions of |
|||
it) with contractual assumptions of liability to the recipient, for |
|||
any liability that these contractual assumptions directly impose on |
|||
those licensors and authors. |
|||
|
|||
All other non-permissive additional terms are considered "further |
|||
restrictions" within the meaning of section 10. If the Program as you |
|||
received it, or any part of it, contains a notice stating that it is |
|||
governed by this License along with a term that is a further |
|||
restriction, you may remove that term. If a license document contains |
|||
a further restriction but permits relicensing or conveying under this |
|||
License, you may add to a covered work material governed by the terms |
|||
of that license document, provided that the further restriction does |
|||
not survive such relicensing or conveying. |
|||
|
|||
If you add terms to a covered work in accord with this section, you |
|||
must place, in the relevant source files, a statement of the |
|||
additional terms that apply to those files, or a notice indicating |
|||
where to find the applicable terms. |
|||
|
|||
Additional terms, permissive or non-permissive, may be stated in the |
|||
form of a separately written license, or stated as exceptions; |
|||
the above requirements apply either way. |
|||
|
|||
8. Termination. |
|||
|
|||
You may not propagate or modify a covered work except as expressly |
|||
provided under this License. Any attempt otherwise to propagate or |
|||
modify it is void, and will automatically terminate your rights under |
|||
this License (including any patent licenses granted under the third |
|||
paragraph of section 11). |
|||
|
|||
However, if you cease all violation of this License, then your |
|||
license from a particular copyright holder is reinstated (a) |
|||
provisionally, unless and until the copyright holder explicitly and |
|||
finally terminates your license, and (b) permanently, if the copyright |
|||
holder fails to notify you of the violation by some reasonable means |
|||
prior to 60 days after the cessation. |
|||
|
|||
Moreover, your license from a particular copyright holder is |
|||
reinstated permanently if the copyright holder notifies you of the |
|||
violation by some reasonable means, this is the first time you have |
|||
received notice of violation of this License (for any work) from that |
|||
copyright holder, and you cure the violation prior to 30 days after |
|||
your receipt of the notice. |
|||
|
|||
Termination of your rights under this section does not terminate the |
|||
licenses of parties who have received copies or rights from you under |
|||
this License. If your rights have been terminated and not permanently |
|||
reinstated, you do not qualify to receive new licenses for the same |
|||
material under section 10. |
|||
|
|||
9. Acceptance Not Required for Having Copies. |
|||
|
|||
You are not required to accept this License in order to receive or |
|||
run a copy of the Program. Ancillary propagation of a covered work |
|||
occurring solely as a consequence of using peer-to-peer transmission |
|||
to receive a copy likewise does not require acceptance. However, |
|||
nothing other than this License grants you permission to propagate or |
|||
modify any covered work. These actions infringe copyright if you do |
|||
not accept this License. Therefore, by modifying or propagating a |
|||
covered work, you indicate your acceptance of this License to do so. |
|||
|
|||
10. Automatic Licensing of Downstream Recipients. |
|||
|
|||
Each time you convey a covered work, the recipient automatically |
|||
receives a license from the original licensors, to run, modify and |
|||
propagate that work, subject to this License. You are not responsible |
|||
for enforcing compliance by third parties with this License. |
|||
|
|||
An "entity transaction" is a transaction transferring control of an |
|||
organization, or substantially all assets of one, or subdividing an |
|||
organization, or merging organizations. If propagation of a covered |
|||
work results from an entity transaction, each party to that |
|||
transaction who receives a copy of the work also receives whatever |
|||
licenses to the work the party's predecessor in interest had or could |
|||
give under the previous paragraph, plus a right to possession of the |
|||
Corresponding Source of the work from the predecessor in interest, if |
|||
the predecessor has it or can get it with reasonable efforts. |
|||
|
|||
You may not impose any further restrictions on the exercise of the |
|||
rights granted or affirmed under this License. For example, you may |
|||
not impose a license fee, royalty, or other charge for exercise of |
|||
rights granted under this License, and you may not initiate litigation |
|||
(including a cross-claim or counterclaim in a lawsuit) alleging that |
|||
any patent claim is infringed by making, using, selling, offering for |
|||
sale, or importing the Program or any portion of it. |
|||
|
|||
11. Patents. |
|||
|
|||
A "contributor" is a copyright holder who authorizes use under this |
|||
License of the Program or a work on which the Program is based. The |
|||
work thus licensed is called the contributor's "contributor version". |
|||
|
|||
A contributor's "essential patent claims" are all patent claims |
|||
owned or controlled by the contributor, whether already acquired or |
|||
hereafter acquired, that would be infringed by some manner, permitted |
|||
by this License, of making, using, or selling its contributor version, |
|||
but do not include claims that would be infringed only as a |
|||
consequence of further modification of the contributor version. For |
|||
purposes of this definition, "control" includes the right to grant |
|||
patent sublicenses in a manner consistent with the requirements of |
|||
this License. |
|||
|
|||
Each contributor grants you a non-exclusive, worldwide, royalty-free |
|||
patent license under the contributor's essential patent claims, to |
|||
make, use, sell, offer for sale, import and otherwise run, modify and |
|||
propagate the contents of its contributor version. |
|||
|
|||
In the following three paragraphs, a "patent license" is any express |
|||
agreement or commitment, however denominated, not to enforce a patent |
|||
(such as an express permission to practice a patent or covenant not to |
|||
sue for patent infringement). To "grant" such a patent license to a |
|||
party means to make such an agreement or commitment not to enforce a |
|||
patent against the party. |
|||
|
|||
If you convey a covered work, knowingly relying on a patent license, |
|||
and the Corresponding Source of the work is not available for anyone |
|||
to copy, free of charge and under the terms of this License, through a |
|||
publicly available network server or other readily accessible means, |
|||
then you must either (1) cause the Corresponding Source to be so |
|||
available, or (2) arrange to deprive yourself of the benefit of the |
|||
patent license for this particular work, or (3) arrange, in a manner |
|||
consistent with the requirements of this License, to extend the patent |
|||
license to downstream recipients. "Knowingly relying" means you have |
|||
actual knowledge that, but for the patent license, your conveying the |
|||
covered work in a country, or your recipient's use of the covered work |
|||
in a country, would infringe one or more identifiable patents in that |
|||
country that you have reason to believe are valid. |
|||
|
|||
If, pursuant to or in connection with a single transaction or |
|||
arrangement, you convey, or propagate by procuring conveyance of, a |
|||
covered work, and grant a patent license to some of the parties |
|||
receiving the covered work authorizing them to use, propagate, modify |
|||
or convey a specific copy of the covered work, then the patent license |
|||
you grant is automatically extended to all recipients of the covered |
|||
work and works based on it. |
|||
|
|||
A patent license is "discriminatory" if it does not include within |
|||
the scope of its coverage, prohibits the exercise of, or is |
|||
conditioned on the non-exercise of one or more of the rights that are |
|||
specifically granted under this License. You may not convey a covered |
|||
work if you are a party to an arrangement with a third party that is |
|||
in the business of distributing software, under which you make payment |
|||
to the third party based on the extent of your activity of conveying |
|||
the work, and under which the third party grants, to any of the |
|||
parties who would receive the covered work from you, a discriminatory |
|||
patent license (a) in connection with copies of the covered work |
|||
conveyed by you (or copies made from those copies), or (b) primarily |
|||
for and in connection with specific products or compilations that |
|||
contain the covered work, unless you entered into that arrangement, |
|||
or that patent license was granted, prior to 28 March 2007. |
|||
|
|||
Nothing in this License shall be construed as excluding or limiting |
|||
any implied license or other defenses to infringement that may |
|||
otherwise be available to you under applicable patent law. |
|||
|
|||
12. No Surrender of Others' Freedom. |
|||
|
|||
If conditions are imposed on you (whether by court order, agreement or |
|||
otherwise) that contradict the conditions of this License, they do not |
|||
excuse you from the conditions of this License. If you cannot convey a |
|||
covered work so as to satisfy simultaneously your obligations under this |
|||
License and any other pertinent obligations, then as a consequence you may |
|||
not convey it at all. For example, if you agree to terms that obligate you |
|||
to collect a royalty for further conveying from those to whom you convey |
|||
the Program, the only way you could satisfy both those terms and this |
|||
License would be to refrain entirely from conveying the Program. |
|||
|
|||
13. Remote Network Interaction; Use with the GNU General Public License. |
|||
|
|||
Notwithstanding any other provision of this License, if you modify the |
|||
Program, your modified version must prominently offer all users |
|||
interacting with it remotely through a computer network (if your version |
|||
supports such interaction) an opportunity to receive the Corresponding |
|||
Source of your version by providing access to the Corresponding Source |
|||
from a network server at no charge, through some standard or customary |
|||
means of facilitating copying of software. This Corresponding Source |
|||
shall include the Corresponding Source for any work covered by version 3 |
|||
of the GNU General Public License that is incorporated pursuant to the |
|||
following paragraph. |
|||
|
|||
Notwithstanding any other provision of this License, you have |
|||
permission to link or combine any covered work with a work licensed |
|||
under version 3 of the GNU General Public License into a single |
|||
combined work, and to convey the resulting work. The terms of this |
|||
License will continue to apply to the part which is the covered work, |
|||
but the work with which it is combined will remain governed by version |
|||
3 of the GNU General Public License. |
|||
|
|||
14. Revised Versions of this License. |
|||
|
|||
The Free Software Foundation may publish revised and/or new versions of |
|||
the GNU Affero General Public License from time to time. Such new versions |
|||
will be similar in spirit to the present version, but may differ in detail to |
|||
address new problems or concerns. |
|||
|
|||
Each version is given a distinguishing version number. If the |
|||
Program specifies that a certain numbered version of the GNU Affero General |
|||
Public License "or any later version" applies to it, you have the |
|||
option of following the terms and conditions either of that numbered |
|||
version or of any later version published by the Free Software |
|||
Foundation. If the Program does not specify a version number of the |
|||
GNU Affero General Public License, you may choose any version ever published |
|||
by the Free Software Foundation. |
|||
|
|||
If the Program specifies that a proxy can decide which future |
|||
versions of the GNU Affero General Public License can be used, that proxy's |
|||
public statement of acceptance of a version permanently authorizes you |
|||
to choose that version for the Program. |
|||
|
|||
Later license versions may give you additional or different |
|||
permissions. However, no additional obligations are imposed on any |
|||
author or copyright holder as a result of your choosing to follow a |
|||
later version. |
|||
|
|||
15. Disclaimer of Warranty. |
|||
|
|||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY |
|||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT |
|||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY |
|||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, |
|||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM |
|||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF |
|||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
|||
|
|||
16. Limitation of Liability. |
|||
|
|||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING |
|||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS |
|||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY |
|||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE |
|||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF |
|||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD |
|||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), |
|||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF |
|||
SUCH DAMAGES. |
|||
|
|||
17. Interpretation of Sections 15 and 16. |
|||
|
|||
If the disclaimer of warranty and limitation of liability provided |
|||
above cannot be given local legal effect according to their terms, |
|||
reviewing courts shall apply local law that most closely approximates |
|||
an absolute waiver of all civil liability in connection with the |
|||
Program, unless a warranty or assumption of liability accompanies a |
|||
copy of the Program in return for a fee. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
How to Apply These Terms to Your New Programs |
|||
|
|||
If you develop a new program, and you want it to be of the greatest |
|||
possible use to the public, the best way to achieve this is to make it |
|||
free software which everyone can redistribute and change under these terms. |
|||
|
|||
To do so, attach the following notices to the program. It is safest |
|||
to attach them to the start of each source file to most effectively |
|||
state the exclusion of warranty; and each file should have at least |
|||
the "copyright" line and a pointer to where the full notice is found. |
|||
|
|||
zourit-admin |
|||
Copyright (C) 2021 zourit |
|||
|
|||
This program is free software: you can redistribute it and/or modify |
|||
it under the terms of the GNU Affero General Public License as published |
|||
by the Free Software Foundation, either version 3 of the License, or |
|||
(at your option) any later version. |
|||
|
|||
This program is distributed in the hope that it will be useful, |
|||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
|||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|||
GNU Affero General Public License for more details. |
|||
|
|||
You should have received a copy of the GNU Affero General Public License |
|||
along with this program. If not, see <https://www.gnu.org/licenses/>. |
|||
|
|||
Also add information on how to contact you by electronic and paper mail. |
|||
|
|||
If your software can interact with users remotely through a computer |
|||
network, you should also make sure that it provides a way for users to |
|||
get its source. For example, if your program is a web application, its |
|||
interface could display a "Source" link that leads users to an archive |
|||
of the code. There are many ways you could offer source, and different |
|||
solutions will be better for different programs; see section 13 for the |
|||
specific requirements. |
|||
|
|||
You should also get your employer (if you work as a programmer) or school, |
|||
if any, to sign a "copyright disclaimer" for the program, if necessary. |
|||
For more information on this, and how to apply and follow the GNU AGPL, see |
|||
<https://www.gnu.org/licenses/>. |
@ -0,0 +1,652 @@ |
|||
#!/bin/bash |
|||
|
|||
RED='\e[0;41m\e[1;37m' |
|||
GREEN='\033[0;32m' |
|||
YELLOW='\033[0;33m' |
|||
PURPLE='\033[0;35m' |
|||
DARK='\e[100m' |
|||
NC='\033[0m' # No Color |
|||
TO_BE_DEFINED="TO BE DEFINED" |
|||
|
|||
# BOLD='\033[1m' |
|||
# DIM='\e[2m\e[0;90m' |
|||
|
|||
function echo() { |
|||
[[ -n ${PREFIX:-} ]] && printf "${DARK}%25.25s${NC} " "${PREFIX}" |
|||
builtin echo "$@" |
|||
} |
|||
|
|||
function check_normal_user() { |
|||
[[ $(id -u) -lt 1000 ]] && echoerr "normal user (>1000) expected, please connect as a normal user then call again!" && exit 100 |
|||
return 0 |
|||
} |
|||
|
|||
function sudo_required() { |
|||
check_normal_user |
|||
command -v sudo &>/dev/null && |
|||
id -G | grep -q sudo && echoerr "command <sudo> not found, please install as so: \`apt install -y sudo\`" && exit 1 |
|||
if ! sudo -n true &>/dev/null; then |
|||
if [[ -n "${1:-}" ]]; then |
|||
echowarnn "[sudo] requiring authorized access for: [ $1 ]" |
|||
else |
|||
echowarnn "[sudo] requiring authorized access for further processing" |
|||
fi |
|||
fi |
|||
sudo -vp ' : ' |
|||
} |
|||
|
|||
# idempotent cargo install <package1 package2 ...> |
|||
function idem_cargo_install() { |
|||
for i in "$@"; do |
|||
if [ ! -f ~/.cargo/bin/"$i" ]; then |
|||
cargo install "$i" |
|||
fi |
|||
done |
|||
} |
|||
|
|||
# display error in red |
|||
function echoerr() { |
|||
echo -e "${RED}$*${NC}" >&2 |
|||
} |
|||
|
|||
function echoerrn() { |
|||
echo -en "${RED}$*${NC}" >&2 |
|||
} |
|||
|
|||
# display warn in yellow |
|||
function echowarn() { |
|||
echo -e "${YELLOW}$*${NC}" >&2 |
|||
} |
|||
|
|||
function echowarnn() { |
|||
echo -en "${YELLOW}$*${NC}" >&2 |
|||
} |
|||
|
|||
# display error in green |
|||
function echoinfo() { |
|||
echo -e "${GREEN}$*${NC}" >&2 |
|||
} |
|||
|
|||
function echoinfon() { |
|||
echo -en "${GREEN}$*${NC}" >&2 |
|||
} |
|||
|
|||
# test whether <ip> is a valid ipv4 address? |
|||
function valid_ipv4() { |
|||
local ip="$1" |
|||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then |
|||
IFS='.' read -ra ADDR <<<"$ip" |
|||
[[ ${ADDR[0]} -le 255 && ${ADDR[1]} -le 255 && ${ADDR[2]} -le 255 && ${ADDR[3]} -le 255 ]] |
|||
return $? |
|||
fi |
|||
return 1 |
|||
} |
|||
|
|||
function enable_trace() { |
|||
trap 'trap_error $? ${LINENO:-0} ${BASH_LINENO:-0} ${BASH_COMMAND:-empty} $(printf "::%s" ${FUNCNAME[@]})' ERR |
|||
} |
|||
|
|||
function disable_trace() { |
|||
trap - ERR |
|||
} |
|||
|
|||
function prepare_nftables() { |
|||
local PREFIX="miaou:nftables" |
|||
|
|||
if [[ ! -f /etc/nftables.rules.d/firewall.table ]]; then |
|||
echo "installing nftables ..." |
|||
sudo apt install -y nftables |
|||
sudo cp -f "$MIAOU_BASEDIR/templates/hardened/nftables.conf" /etc/ |
|||
sudo mkdir -p /etc/nftables.rules.d |
|||
sudo cp -f "$MIAOU_BASEDIR/templates/hardened/firewall.table" /etc/nftables.rules.d/ |
|||
sudo systemctl restart nftables |
|||
sudo systemctl enable nftables |
|||
echo "OK" |
|||
else |
|||
echo "nftables already installed!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
function miaou_init() { |
|||
# shellcheck source=/dev/null |
|||
[[ -f /opt/debian-bash/lib/functions.sh ]] && source /opt/debian-bash/lib/functions.sh |
|||
|
|||
# shellcheck source=/dev/null |
|||
. "$MIAOU_BASEDIR/lib/functions.sh" |
|||
|
|||
export MIAOU_CONFIGDIR="$HOME/.config/miaou" |
|||
|
|||
set -Eeuo pipefail |
|||
enable_trace |
|||
trap 'ctrl_c $? ${LINENO:-0} ${BASH_LINENO:-0} ${BASH_COMMAND:-empty} $(printf "::%s" ${FUNCNAME[@]})' INT |
|||
} |
|||
|
|||
function ctrl_c() { |
|||
PREFIX="miaou:trap" echoerr "Ctrl + C happened, exiting!!! $*" |
|||
exit 125 |
|||
} |
|||
|
|||
# extract source code error triggered on trap error <error_code> <error_line> |
|||
function trap_error() { |
|||
ERRORS_COUNT=0 |
|||
if [[ -f "$MIAOU_CONFIGDIR"/error_count ]]; then |
|||
ERRORS_COUNT=$(cat "$MIAOU_CONFIGDIR"/error_count) |
|||
else |
|||
mkdir -p "$MIAOU_CONFIGDIR" |
|||
printf 0 >"$MIAOU_CONFIGDIR"/error_count |
|||
fi |
|||
ERRORS_COUNT=$((ERRORS_COUNT + 1)) |
|||
printf '%s' $ERRORS_COUNT >"$MIAOU_CONFIGDIR"/error_count |
|||
|
|||
local PREFIX="" |
|||
|
|||
# local file="${0:-}" |
|||
local err=$1 # error status |
|||
local line=$2 # LINENO |
|||
local linecallfunc=${3:-} |
|||
local command="${4:-}" |
|||
local funcstack="${5:-}" |
|||
local caller |
|||
|
|||
caller=$(caller | cut -d' ' -f2) |
|||
|
|||
# echo >&2 |
|||
# if [ "$funcstack" != "::" ]; then |
|||
# echo -e "${RED}ERROR <$err>, due to command <$command> at line $line from <$caller>, stack=${funcstack}${NC}" >&2 |
|||
# else |
|||
# echo >&2 "ERROR DETECTED" |
|||
# fi |
|||
|
|||
# echo |
|||
# echo -e "${PURPLE}$caller:$line ${NC}EXIT ${RED}<$err>${NC}" >&2 |
|||
# echo -e "${PURPLE}------------------------------------------ ${NC}" >&2 |
|||
if [[ $ERRORS_COUNT == 1 ]]; then |
|||
echo |
|||
echo -e "${RED}ERROR <$err>, due to command <$command $funcstack>${NC}" >&2 |
|||
fi |
|||
echo -e "${PURPLE}$ERRORS_COUNT: $caller:$line ${RED}$command $funcstack${NC}" >&2 |
|||
# echo -e "${PURPLE}----------------------------- ${PURPLE}EXIT CODE ${PURPLE}--------------${PURPLE} $err ${NC}" >&2 |
|||
|
|||
# if [[ $line -gt 2 ]]; then |
|||
# sed "$((line - 2))q;d" "$caller" >&2 |
|||
# sed "$((line - 1))q;d" "$caller" >&2 |
|||
# fi |
|||
# echo -ne "${BOLD}" >&2 |
|||
# sed "${line}q;d" "$caller" >&2 |
|||
# echo -e "${PURPLE}------------------------------------------ ${NC}" >&2 |
|||
} |
|||
|
|||
# exist_command(cmd1, ...) |
|||
# test all commands exist, else fail |
|||
function exist_command() { |
|||
for i in "$@"; do |
|||
command -v "$i" &>/dev/null || return 50 |
|||
done |
|||
} |
|||
|
|||
# test whether container <ct> is up and running? |
|||
function container_running() { |
|||
arg1_required "$@" |
|||
container_exists "$1" && lxc list "$1" -c ns -f csv | head -n1 | grep -q "$1,RUNNING" |
|||
lxc exec "$1" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
if [[ ! -f /root/cloud-status.json ]]; then |
|||
cloud-init status --wait >/dev/null |
|||
fi |
|||
EOF |
|||
} |
|||
|
|||
# test arg1 required |
|||
function arg1_required() { |
|||
[[ -z "${1:-}" ]] && echoerr "ERROR: arg#1 expected!" && return 125 |
|||
return 0 |
|||
} |
|||
|
|||
# test arg2 required |
|||
function arg2_required() { |
|||
[[ -z "${2:-}" ]] && echoerr "ERROR: arg#2 expected!" && return 125 |
|||
return 0 |
|||
} |
|||
|
|||
# test whether container <ct> exists yet? |
|||
function container_exists() { |
|||
arg1_required "$@" |
|||
lxc list "$1" -c n -f csv | grep -q "^$1\$" |
|||
} |
|||
|
|||
# build debian image with prebuild debian-bash and various useful settings |
|||
# ARG1=release [bullseye, buster] |
|||
function build_miaou_image() { |
|||
local RELEASE="$1" |
|||
local IMAGE_LABEL="$RELEASE-miaou" |
|||
local PREFIX="miaou:image" |
|||
|
|||
local DEB_REPOSITORY |
|||
DEB_REPOSITORY=$(grep ^deb /etc/apt/sources.list | head -n1 | cut -d ' ' -f2 | cut -d '/' -f3) |
|||
|
|||
if ! lxc image -cl list -f csv | grep -q "$IMAGE_LABEL"; then |
|||
|
|||
echo "building lxc image <$IMAGE_LABEL> ... " |
|||
echo "image will reuse same local repository <$DEB_REPOSITORY>" |
|||
creation_date=$(date +%s) |
|||
sudo /opt/debian-bash/tools/idem_apt_install debootstrap |
|||
|
|||
cat <<EOF1 | sudo bash |
|||
set -euo pipefail |
|||
rm -rf /tmp/$IMAGE_LABEL{,-image} |
|||
mkdir -p /tmp/$IMAGE_LABEL{,-image} |
|||
debootstrap $RELEASE /tmp/$IMAGE_LABEL http://$DEB_REPOSITORY/debian |
|||
|
|||
echo |
|||
echo "DEBOOTSTRAP ... OK" |
|||
echo |
|||
|
|||
cat <<EOF2 | chroot /tmp/$IMAGE_LABEL |
|||
set -euo pipefail |
|||
echo "image prepare source.list from $DEB_REPOSITORY" |
|||
if [[ "$RELEASE" == "buster" ]]; then |
|||
cat <<EOF3 >/etc/apt/sources.list |
|||
deb http://$DEB_REPOSITORY/debian $RELEASE main contrib |
|||
deb http://$DEB_REPOSITORY/debian $RELEASE-updates main contrib |
|||
deb http://$DEB_REPOSITORY/debian-security/ $RELEASE/updates main contrib |
|||
EOF3 |
|||
else |
|||
cat <<EOF3 >/etc/apt/sources.list |
|||
deb http://$DEB_REPOSITORY/debian $RELEASE main contrib |
|||
deb http://$DEB_REPOSITORY/debian $RELEASE-updates main contrib |
|||
deb http://$DEB_REPOSITORY/debian-security/ $RELEASE-security main contrib |
|||
EOF3 |
|||
fi |
|||
|
|||
echo APT UPDATE |
|||
|
|||
apt update && apt dist-upgrade -y |
|||
apt install -y curl wget file git sudo bash-completion |
|||
curl https://git.artcode.re/pvincent/debian-bash/raw/branch/master/install.sh | sudo bash -s -- --host |
|||
ln -sf /usr/share/zoneinfo/Indian/Reunion /etc/localtime |
|||
cat <<EOF3 >/etc/network/interfaces |
|||
# This file describes the network interfaces available on your system |
|||
# and how to activate them. For more information, see interfaces(5). |
|||
|
|||
# The loopback network interface |
|||
auto lo |
|||
iface lo inet loopback |
|||
|
|||
auto eth0 |
|||
iface eth0 inet dhcp |
|||
|
|||
source /etc/network/interfaces.d/* |
|||
EOF3 |
|||
echo "deboostrap ready!" |
|||
EOF2 |
|||
cd /tmp/$IMAGE_LABEL-image |
|||
tar -czf rootfs.tar.gz -C /tmp/$IMAGE_LABEL . |
|||
cat <<EOF2 >metadata.yaml |
|||
architecture: "x86_64" |
|||
creation_date: $creation_date |
|||
properties: |
|||
architecture: "x86_64" |
|||
description: "Debian $RELEASE for miaou instances" |
|||
os: "debian" |
|||
release: "$RELEASE" |
|||
EOF2 |
|||
tar -czf metadata.tar.gz metadata.yaml |
|||
EOF1 |
|||
lxc image import "/tmp/$IMAGE_LABEL-image/metadata.tar.gz" "/tmp/$IMAGE_LABEL-image/rootfs.tar.gz" --alias "$IMAGE_LABEL" |
|||
echo "image <$IMAGE_LABEL> successfully built!" |
|||
echo DONE |
|||
else |
|||
echo "image <$IMAGE_LABEL> already built!" |
|||
fi |
|||
} |
|||
|
|||
# execute remote scripting onto one LXC container <CONTAINER> [COMMANDS, ...] |
|||
# may use one command like: `lxc_exec ct1 uname -a` |
|||
# or pipe like so: ` |
|||
# cat <<EOF | lxc_exec ct1 |
|||
# ls -l |
|||
# uname -a |
|||
# echo [\$0] [\$1] [\$2] # toto titi tata |
|||
# EOF |
|||
# ` |
|||
function lxc_exec() { |
|||
arg1_required "$@" |
|||
container="$1" |
|||
shift |
|||
|
|||
declare -a ARGUMENTS |
|||
ARGUMENTS=(toto titi tata) # might be overriden with interesting stuff! |
|||
|
|||
if ((${#} == 0)); then |
|||
multiline="" |
|||
while read -r line; do |
|||
if [[ ! "$line" =~ ^\# ]] && [[ ! "$line" =~ ^[[:space:]]*$ ]]; then |
|||
if [[ "$line" =~ .*\;$ ]] || [[ "$line" =~ do$ ]] || [[ "$line" =~ then$ ]] || [[ "$line" =~ else$ ]]; then |
|||
multiline+="${line} " # append space in case of ending with either '; do then else' |
|||
else |
|||
multiline+="${line};" # append ; for multiple commands |
|||
fi |
|||
fi |
|||
done |
|||
# echo "DEBUG: multiline = [$multiline]" |
|||
# echo DEBUG: lxc exec "$container" -- bash -lc "$multiline" "${ARGUMENTS[@]}" |
|||
lxc exec "$container" -- bash -lc "$multiline" "${ARGUMENTS[@]}" |
|||
else |
|||
lxc exec "$container" -- bash -lc "$*" "${ARGUMENTS[@]}" |
|||
fi |
|||
} |
|||
|
|||
# check container exist and running |
|||
function check_container() { |
|||
arg1_required "$@" |
|||
local CT="$1" |
|||
container_exists "$CT" |
|||
container_running "$CT" |
|||
} |
|||
|
|||
function launch_container() { |
|||
arg1_required "$@" |
|||
local ct="$1" |
|||
if ! container_exists "$ct"; then |
|||
echo "container <$ct> about to be created ..." |
|||
local extra_release="${2:-}" |
|||
if [[ -n "$extra_release" ]] && ! lxc image info "${extra_release}-miaou" >/dev/null; then |
|||
echoerrn "unknown extra_release <${extra_release}-miaou>!\nHINT : please add it into /etc/miaou/defaults.yaml, then re-install miaou!" |
|||
exit 128 |
|||
fi |
|||
|
|||
if [[ -n "$extra_release" ]]; then |
|||
echoerrn "FIXME: lxc-miaou-create -o release=bookworm should be implemented ...." |
|||
lxc-miaou-create "$ct" "$extra_release" |
|||
else |
|||
lxc-miaou-create "$ct" |
|||
fi |
|||
echo "DONE" |
|||
fi |
|||
|
|||
if ! container_running "$ct"; then |
|||
echowarn "container <$ct> seems to be asleep, starting ..." |
|||
lxc start "$ct" |
|||
echowarn DONE |
|||
fi |
|||
} |
|||
|
|||
function load_yaml_from_expanded { |
|||
arg1_required "$@" |
|||
yaml_key="$1" |
|||
yaml_file="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
yaml_value=$(yq ".$yaml_key" "$yaml_file") |
|||
if [[ -n "$yaml_value" ]] && [[ "$yaml_value" != "null" ]] && [[ "$yaml_value" != "$TO_BE_DEFINED" ]]; then |
|||
PREFIX="" echo "$yaml_value" |
|||
else |
|||
echoerr "undefined value for key: <$yaml_key> from file: <$yaml_file>" |
|||
return 98 |
|||
fi |
|||
} |
|||
|
|||
function check_yaml_defined_value { |
|||
yaml_file="$1" |
|||
yaml_key="$2" |
|||
yaml_value=$(yq ".$yaml_key" "$yaml_file") |
|||
if [[ -n "$yaml_value" ]] && [[ "$yaml_value" != "null" ]] && [[ "$yaml_value" != "$TO_BE_DEFINED" ]]; then |
|||
return 0 |
|||
else |
|||
echoerr "undefined value for key: <$yaml_key> from file: <$yaml_file>" |
|||
return 99 |
|||
fi |
|||
} |
|||
|
|||
# halt unless current user is root |
|||
function root_required() { |
|||
[[ $(id -u) == 0 ]] || (echoerr "root required" && return 1) |
|||
} |
|||
|
|||
# arg#1: environment variable |
|||
# read from environment or ask entry before exporting new variable |
|||
function env_or_ask { |
|||
if [[ -n ${1+x} ]]; then |
|||
if printenv "$1" >/dev/null; then |
|||
echo "value defined as $(printenv "$1")" |
|||
else |
|||
printf "Please define %20s: " "$1" |
|||
read -r |
|||
export "$1=\"$REPLY\"" >/dev/null |
|||
fi |
|||
else |
|||
echoerr "env_or_ask requires one argument: <VARIABLE_NAME>" && exit 5 |
|||
fi |
|||
} |
|||
|
|||
# install_debian_bash() |
|||
# grab and install related project |
|||
function install_debian_bash() { |
|||
local PREFIX="debian-bash:install" |
|||
if [[ ! -d /opt/debian-bash ]]; then |
|||
echo "installing curl wget commands ..." |
|||
apt install -y curl wget |
|||
|
|||
echo "installing debian-bash..." |
|||
curl https://git.artcode.re/pvincent/debian-bash/raw/branch/master/install.sh | sudo bash -s -- --host |
|||
export PATH=$PATH:/opt/debian-bash/tools/ |
|||
echo "OK" |
|||
else |
|||
# /opt/debian-bash/tools/debian_bash_upgrade |
|||
echo "addon <debian-bash> already installed!" |
|||
fi |
|||
# shellcheck source=/dev/null |
|||
source /etc/bash.bashrc |
|||
|
|||
sudo /opt/debian-bash/tools/idem_apt_install bash-completion |
|||
} |
|||
|
|||
function add_toolbox_sudoers { |
|||
local PREFIX="toolbox:sudoers" |
|||
echo -n "creating sudoers file to allow sudo as command from /TOOLBOX... " |
|||
sudo mkdir -p /etc/sudoers.d |
|||
if [[ ! -f /etc/sudoers.d/add_TOOLBOX_to_PATH ]]; then |
|||
sudo tee /etc/sudoers.d/add_TOOLBOX_to_PATH &>/dev/null <<EOF |
|||
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/TOOLBOX" |
|||
EOF |
|||
PREFIX="" echo "updated!" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
} |
|||
|
|||
function prepare_toolbox() { |
|||
local PREFIX="toolbox:prepare" |
|||
sudo mkdir -p /TOOLBOX |
|||
|
|||
if ! command -v cargo &>/dev/null; then |
|||
echo -n "installing <cargo> ... " |
|||
curl -sSf https://sh.rustup.rs | sh -s -- -y |
|||
# shellcheck source=/dev/null |
|||
source "$HOME/.cargo/env" |
|||
/opt/debian-bash/tools/append_or_replace "^PATH=\$PATH:\$HOME/\\.cargo/bin" "PATH=\$PATH:\$HOME/.cargo/bin" ~/.bashrc |
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "command <cargo> already installed!" |
|||
fi |
|||
|
|||
echo -n "installing <fd> ... " |
|||
if [ ! -f "/TOOLBOX/fd" ]; then |
|||
idem_cargo_install fd-find |
|||
sudo cp "$HOME"/.cargo/bin/fd /TOOLBOX/fd |
|||
PREFIX="" echo "successfully installed!" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <viu> ... " |
|||
if [ ! -f "/TOOLBOX/viu" ]; then |
|||
idem_cargo_install viu |
|||
sudo cp "$HOME"/.cargo/bin/viu /TOOLBOX/ |
|||
PREFIX="" echo "successfully installed!" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <rg> alias <ripgrep> ... " |
|||
if [ ! -f "/TOOLBOX/rg" ]; then |
|||
|
|||
sudo /opt/debian-bash/tools/idem_apt_install ripgrep |
|||
sudo ln /usr/bin/rg /TOOLBOX/ |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <ag> alias <silversearcher-ag> ... " |
|||
if [ ! -f "/TOOLBOX/ag" ]; then |
|||
sudo /opt/debian-bash/tools/idem_apt_install silversearcher-ag |
|||
sudo ln /usr/bin/ag /TOOLBOX/ |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <bandwhich> ... " |
|||
if [ ! -f "/TOOLBOX/bandwhich" ]; then |
|||
idem_cargo_install bandwhich |
|||
sudo cp "$HOME"/.cargo/bin/bandwhich /TOOLBOX/bandwhich |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <btm> alias <bottom> ... " |
|||
if [ ! -f "/TOOLBOX/btm" ]; then |
|||
VERSION=$(wget_semver github ClementTsang/bottom) |
|||
cd /tmp |
|||
wget "https://github.com/ClementTsang/bottom/releases/download/$VERSION/bottom_x86_64-unknown-linux-musl.tar.gz" |
|||
tar -xzvf bottom_x86_64-unknown-linux-musl.tar.gz |
|||
sudo cp btm /usr/local/bin/ |
|||
sudo ln /usr/local/bin/btm /TOOLBOX/ |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <micro> ... " |
|||
if [ ! -f "/TOOLBOX/micro" ]; then |
|||
cd /tmp || (echoerr "/tmp wrong permission" && exit 101) |
|||
curl -q https://getmic.ro | GETMICRO_REGISTER=n sh |
|||
sudo mv micro /TOOLBOX/micro |
|||
sudo chown root:root /TOOLBOX/micro |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <ncdu> ... " |
|||
if [ ! -f "/TOOLBOX/ncdu" ]; then |
|||
sudo /opt/debian-bash/tools/idem_apt_install ncdu |
|||
sudo cp /usr/bin/ncdu /TOOLBOX/ncdu |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <unzip> ... " |
|||
if [ ! -f "/TOOLBOX/unzip" ]; then |
|||
sudo /opt/debian-bash/tools/idem_apt_install unzip |
|||
sudo cp /usr/bin/unzip /TOOLBOX/unzip |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <tree> ... " |
|||
if [ ! -f "/TOOLBOX/tree" ]; then |
|||
sudo /opt/debian-bash/tools/idem_apt_install tree |
|||
sudo cp /bin/tree /TOOLBOX/tree |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <duf> ... " |
|||
if [ ! -f "/TOOLBOX/duf" ]; then |
|||
VERSION=$(/opt/debian-bash/tools/wget_semver github muesli/duf) |
|||
VERSION_WITHOUT_V=${VERSION#v} |
|||
wget -O /tmp/duf.deb "https://github.com/muesli/duf/releases/download/${VERSION}/duf_${VERSION_WITHOUT_V}_linux_amd64.deb" |
|||
sudo dpkg -i /tmp/duf.deb |
|||
sudo cp /bin/duf /TOOLBOX/duf |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <curl> ... " |
|||
if [ ! -f "/TOOLBOX/curl" ]; then |
|||
sudo wget -O /TOOLBOX/curl "https://github.com/moparisthebest/static-curl/releases/latest/download/curl-amd64" |
|||
sudo chmod +x /TOOLBOX/curl |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
echo -n "installing <wget> ... " |
|||
if [ ! -f "/TOOLBOX/wget" ]; then |
|||
sudo ln -f /usr/bin/wget /TOOLBOX/wget |
|||
PREFIX="" echo "successfully installed" |
|||
else |
|||
PREFIX="" echo "already done!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
# install_mandatory_commands |
|||
function install_mandatory_commands() { |
|||
local PREFIX="mandatory:commands" |
|||
|
|||
sudo /opt/debian-bash/tools/idem_apt_install dnsutils build-essential curl mariadb-client postgresql-client |
|||
|
|||
if ! exist_command tera; then |
|||
echo "installing <tera> ..." |
|||
|
|||
local version=v0.2.4 |
|||
wget -q "https://github.com/chevdor/tera-cli/releases/download/${version}/tera-cli_linux_amd64.deb" -O /tmp/tera-cli_linux_amd64.deb |
|||
sudo dpkg -i /tmp/tera-cli_linux_amd64.deb |
|||
else |
|||
echo "command <tera> already installed!" |
|||
fi |
|||
|
|||
if ! exist_command yq; then |
|||
local version binary |
|||
version='v4.35.2' |
|||
binary='yq_linux_amd64' |
|||
|
|||
sudo sh -c "wget https://github.com/mikefarah/yq/releases/download/${version}/${binary}.tar.gz -O - |\ |
|||
tar -xz ./${binary} && sudo mv ${binary} /usr/bin/yq" |
|||
else |
|||
echo "command <yq> already installed!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
# flatten array, aka remove duplicated elements in array |
|||
# return: `mapfile -t OUTPUT_ARRAY < <(sort_array "${INPUT_ARRAY[@]}")` |
|||
function flatten_array { |
|||
declare -a array=("$@") |
|||
IFS=" " read -r -a array <<<"$(tr ' ' '\n' <<<"${array[@]}" | sort -u | tr '\n' ' ')" |
|||
printf '%s\n' "${array[@]}" |
|||
} |
|||
|
|||
function prepare_nftables() { |
|||
local PREFIX="miaou:firewall" |
|||
|
|||
if [[ ! -f /etc/nftables.rules.d/firewall.table ]]; then |
|||
echo "installing nftables ..." |
|||
sudo apt install -y nftables |
|||
sudo cp -f "$MIAOU_BASEDIR/templates/hardened/nftables.conf" /etc/ |
|||
sudo mkdir -p /etc/nftables.rules.d |
|||
sudo cp -f "$MIAOU_BASEDIR/templates/hardened/firewall.table" /etc/nftables.rules.d/ |
|||
sudo systemctl restart nftables |
|||
sudo systemctl enable nftables |
|||
echo "OK" |
|||
else |
|||
echo "nftables already installed!" |
|||
fi |
|||
} |
@ -0,0 +1,294 @@ |
|||
#!/bin/bash |
|||
|
|||
### FUNCTIONS |
|||
### --------- |
|||
|
|||
function prepare_config_hardened() { |
|||
mkdir -p "$HARDEN_CONFIGDIR" |
|||
} |
|||
|
|||
function pubkey_authorize() { |
|||
local PREFIX="harden:pubkey:authorize" |
|||
|
|||
if [[ ! -d $HOME/.ssh ]]; then |
|||
echo -n "create .ssh folder for the first time ..." |
|||
mkdir -m 700 ~/.ssh |
|||
PREFIX="" echo "OK" |
|||
else |
|||
local security_issue_in_ssh_folder |
|||
security_issue_in_ssh_folder=$(find "$HOME/.ssh" -perm -go=r | wc -l) |
|||
if [[ $security_issue_in_ssh_folder -gt 0 ]]; then |
|||
echo -n "force security in .ssh folder for <$CURRENT_USER> ..." |
|||
chmod -R u+rwX,go-rwx "/home/$CURRENT_USER/.ssh" |
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "security in .ssh folder for <$CURRENT_USER> approved!" |
|||
fi |
|||
fi |
|||
|
|||
pubkey_value=$(yq ".authorized.pubkey" "$HARDEN_CONFIGFILE") |
|||
if [[ ! -f /home/$CURRENT_USER/.ssh/authorized_keys ]]; then |
|||
echo -n "authorized_keys first time ..." |
|||
PREFIX="" echo "$pubkey_value" >"$HOME/.ssh/authorized_keys" |
|||
chmod u+rw,go-rwx "/home/$CURRENT_USER/.ssh/authorized_keys" |
|||
PREFIX="" echo "OK" |
|||
else |
|||
if ! grep -q "^$pubkey_value" "/home/$CURRENT_USER/.ssh/authorized_keys"; then |
|||
echo -n "pubkey <$CURRENT_USER> appended to <.ssh/authorized_keys> ..." |
|||
echo "$pubkey_value" >>"$HOME/.ssh/authorized_keys" |
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "pubkey <$CURRENT_USER> already authorized!" |
|||
fi |
|||
fi |
|||
} |
|||
|
|||
function sudoers() { |
|||
local PREFIX="harden:sudoers" |
|||
if [[ -d /etc/sudoers.d ]]; then |
|||
echo -n "add $CURRENT_USER and no more ..." |
|||
|
|||
sudo env current_user="$CURRENT_USER" tera -e --env-key env --env-only -o /etc/sudoers -t "$MIAOU_BASEDIR/templates/hardened/sudoers.j2" >/dev/null |
|||
|
|||
rm /etc/sudoers.d -rf |
|||
grep -Eq "^debian" /etc/passwd && userdel -rf debian |
|||
grep -Eq "^sudo" /etc/group && groupdel sudo |
|||
passwd -dq root |
|||
passwd -dq "$CURRENT_USER" |
|||
|
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "sudo authorized for <$CURRENT_USER> only!" |
|||
fi |
|||
} |
|||
|
|||
function sshd() { |
|||
local PREFIX="harden:sshd" |
|||
if [[ ! -f /etc/ssh/sshd_config ]]; then |
|||
sudo apt install -y openssh-server |
|||
else |
|||
echo "sshd already installed!" |
|||
fi |
|||
|
|||
if ! grep -Eq "^Port 2222" /etc/ssh/sshd_config; then |
|||
echo -n "replacing sshd ..." |
|||
|
|||
sudo env current_user="$CURRENT_USER" tera -e --env-key env --env-only -o /etc/ssh/sshd_config -t "$MIAOU_BASEDIR/templates/hardened/sshd_config.j2" >/dev/null |
|||
sudo systemctl restart sshd |
|||
|
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "already done!" |
|||
fi |
|||
} |
|||
|
|||
function prepare_proxy() { |
|||
local PREFIX="harden:proxy" |
|||
|
|||
if ! grep -Eq "^precedence ::ffff:0:0/96.*" /etc/gai.conf; then |
|||
echo "prefer ipv4 ..." |
|||
sudo /opt/debian-bash/tools/append_or_replace "^precedence ::ffff:0:0/96.*" "precedence ::ffff:0:0/96 100" /etc/gai.conf |
|||
echo "OK" |
|||
else |
|||
echo "ipv4 already prefered!" |
|||
fi |
|||
|
|||
if ! grep -Eq "^net.ipv4.ip_forward=1" /etc/sysctl.conf; then |
|||
echo "allow forwarding from kernel ..." |
|||
sudo /opt/debian-bash/tools/append_or_replace "^net.ipv4.ip_forward=1.*" "net.ipv4.ip_forward=1" /etc/sysctl.conf |
|||
sudo sysctl -p |
|||
echo "OK" |
|||
else |
|||
echo "kernel forwarding already allowed!" |
|||
fi |
|||
} |
|||
|
|||
function set_current_user { |
|||
local PREFIX="harden:environment" |
|||
|
|||
CURRENT_USER=$(id -un) |
|||
echo "current user is <$CURRENT_USER>" |
|||
} |
|||
|
|||
function load_configuration { |
|||
local PREFIX="harden:configuration:load" |
|||
|
|||
if [[ ! -f "$HARDEN_CONFIGFILE" ]]; then |
|||
echo "configuration requires further details ..." |
|||
cp "$MIAOU_BASEDIR/templates/hardened/hardened.yaml.sample" "$HARDEN_CONFIGFILE" |
|||
echo "OK" |
|||
fi |
|||
|
|||
editor "$HARDEN_CONFIGFILE" |
|||
} |
|||
|
|||
function check_configuration { |
|||
local PREFIX="harden:configuration:check" |
|||
|
|||
check_yaml_defined_value "$HARDEN_CONFIGFILE" 'authorized.pubkey' |
|||
check_yaml_defined_value "$HARDEN_CONFIGFILE" 'alert.to' |
|||
check_yaml_defined_value "$HARDEN_CONFIGFILE" 'alert.from' |
|||
check_yaml_defined_value "$HARDEN_CONFIGFILE" 'alert.smtp.server' |
|||
} |
|||
|
|||
function set_timezone_if_defined { |
|||
local PREFIX="harden:timezone" |
|||
timezone=$(yq ".timezone" "$HARDEN_CONFIGFILE") |
|||
if [[ "$timezone" != null ]]; then |
|||
if ! grep -q "$timezone" /etc/timezone; then |
|||
if [[ -f "/usr/share/zoneinfo/$timezone" ]]; then |
|||
echo "set timezone to $timezone ..." |
|||
ln -fs "/usr/share/zoneinfo/$timezone" /etc/localtime |
|||
dpkg-reconfigure -f noninteractive tzdata |
|||
echo OK |
|||
else |
|||
echoerr "unkown timezone: <$timezone>, please edit <$HARDEN_CONFIGFILE> and change to a correct value" && exit 98 |
|||
fi |
|||
else |
|||
echo "timezone <$timezone> already set!" |
|||
fi |
|||
fi |
|||
} |
|||
|
|||
function mailer_alert() { |
|||
local PREFIX="harden:mailer" |
|||
|
|||
if [[ ! -f /etc/msmtprc ]]; then |
|||
for i in exim4-config libevent-2.1-7 libgnutls-dane0 libunbound8; do |
|||
if dpkg -l "$i" 2>/dev/null | grep -q ^ii && echo 'installed'; then |
|||
echo "purging package <$i> ..." |
|||
apt purge -y "$i" |
|||
echo "OK" |
|||
fi |
|||
done |
|||
|
|||
echo "installing <msmtp> ..." |
|||
sudo /opt/debian-bash/tools/idem_apt_install msmtp msmtp-mta mailutils bsd-mailx |
|||
echo "OK" |
|||
|
|||
echo "configuring </etc/aliases>" |
|||
sudo env current_user="$CURRENT_USER" tera -e --env-key env -o /etc/aliases -t "$MIAOU_BASEDIR/templates/hardened/mailer/aliases.j2" "$HARDEN_CONFIGDIR/hardened.yaml" >/dev/null |
|||
echo "OK" |
|||
|
|||
# populate environment variable with fqdn |
|||
fqdn=$(hostname -f) |
|||
|
|||
echo "configuring </etc/mail.rc>" |
|||
sudo env current_user="$CURRENT_USER" fqdn="$fqdn" tera -e --env-key env -o /etc/mail.rc -t "$MIAOU_BASEDIR/templates/hardened/mailer/mail.rc.j2" "$HARDEN_CONFIGDIR/hardened.yaml" >/dev/null |
|||
echo "OK" |
|||
|
|||
echo "generating </etc/msmtprc> configuration file ..." |
|||
sudo env fqdn="$fqdn" tera -e --env-key env -o /etc/msmtprc -t "$MIAOU_BASEDIR/templates/hardened/mailer/msmtprc.j2" "$HARDEN_CONFIGDIR/hardened.yaml" >/dev/null |
|||
sudo chown root:msmtp /etc/msmtprc |
|||
sudo chmod 640 /etc/msmtprc |
|||
echo "OK" |
|||
else |
|||
echo "mailer <msmtp> already configured!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
function alert_at_boot() { |
|||
local PREFIX="harden:alert:boot" |
|||
if ! systemctl is-enabled --quiet on_startup.service 2>/dev/null; then |
|||
echo "installing <on_startup.service> on systemd..." |
|||
sudo cp "$MIAOU_BASEDIR/templates/hardened/systemd/on_startup.service" /etc/systemd/system/on_startup.service |
|||
sudo systemctl daemon-reload |
|||
sudo systemctl enable on_startup.service |
|||
REBOOT=true |
|||
echo "OK" |
|||
else |
|||
echo "systemd <on_startup.service> already enabled!" |
|||
fi |
|||
} |
|||
|
|||
function show_reboot_on_purpose() { |
|||
if "$REBOOT"; then |
|||
PREFIX="harden:reboot" echowarn "we recommend reboot on purpose, Reboot NOW?" |
|||
else |
|||
PREFIX="harden" echo "success" |
|||
fi |
|||
} |
|||
|
|||
function disable_systemd_resolved() { |
|||
PREFIX="harden:systemd:resolved" |
|||
if file /etc/resolv.conf | grep -q /run/systemd/resolve/stub-resolv.conf; then |
|||
echo "disabling systemd-resolved..." |
|||
sudo systemctl stop systemd-resolved.service |
|||
sudo systemctl disable systemd-resolved.service |
|||
sudo rm /etc/resolv.conf |
|||
cat <<EOF | sudo tee /etc/resolv.conf |
|||
nameserver 1.1.1.1 |
|||
EOF |
|||
echo "OK" |
|||
else |
|||
echo "systemd-resolved already disabled!" |
|||
fi |
|||
} |
|||
|
|||
function alert_at_ssh_password() { |
|||
local PREFIX="harden:alert:ssh:password" |
|||
if ! grep -Eq "^session optional pam_exec.so /usr/local/bin/alert_ssh_password.sh" /etc/pam.d/sshd; then |
|||
echo "installing alert_at_ssh_password..." |
|||
sudo cp "$MIAOU_BASEDIR/templates/hardened/pam/alert_ssh_password.sh" /usr/local/bin/ |
|||
sudo chmod 700 /usr/local/bin/alert_ssh_password.sh |
|||
sudo /opt/debian-bash/tools/append_or_replace "^session optional pam_exec.so /usr/local/bin/alert_ssh_password.sh" "session optional pam_exec.so /usr/local/bin/alert_ssh_password.sh" /etc/pam.d/sshd |
|||
echo "OK" |
|||
else |
|||
echo "alert_at_ssh_password already enabled!" |
|||
fi |
|||
} |
|||
|
|||
function customize_motd { |
|||
local PREFIX="harden:motd:customize" |
|||
if [[ ! -f /etc/update-motd.d/80-users ]]; then |
|||
echo "customizing motd..." |
|||
sudo /opt/debian-bash/tools/idem_apt_install figlet lsb-release |
|||
sudo rm -f /etc/motd |
|||
sudo mkdir -p /etc/update-motd.d |
|||
sudo rm -f /etc/update-motd.d/* |
|||
sudo cp "$MIAOU_BASEDIR"/templates/hardened/motd/* /etc/update-motd.d/ |
|||
sudo chmod +x /etc/update-motd.d/* |
|||
echo "OK" |
|||
else |
|||
echo "motd already customized!" |
|||
fi |
|||
} |
|||
|
|||
### CONSTANTS |
|||
### --------- |
|||
|
|||
MIAOU_BASEDIR=$(readlink -f "$(dirname "$0")/..") |
|||
readonly HARDEN_CONFIGDIR="$HOME/.config/hardened" |
|||
readonly HARDEN_CONFIGFILE="$HARDEN_CONFIGDIR/hardened.yaml" |
|||
|
|||
### MAIN |
|||
### ---- |
|||
|
|||
# shellcheck source=/dev/null |
|||
. "$MIAOU_BASEDIR/lib/functions.sh" |
|||
miaou_init |
|||
|
|||
REBOOT=false |
|||
PREFIX="harden" |
|||
: $PREFIX |
|||
|
|||
sudo_required |
|||
install_debian_bash |
|||
install_mandatory_commands |
|||
prepare_config_hardened |
|||
set_current_user |
|||
check_configuration 2>/dev/null || load_configuration |
|||
check_configuration |
|||
pubkey_authorize |
|||
sshd |
|||
prepare_proxy |
|||
prepare_nftables |
|||
disable_systemd_resolved |
|||
set_timezone_if_defined |
|||
mailer_alert |
|||
alert_at_boot |
|||
alert_at_ssh_password |
|||
customize_motd |
|||
|
|||
show_reboot_on_purpose |
@ -0,0 +1,18 @@ |
|||
#!/bin/bash |
|||
|
|||
MIAOU_DIR="$(dirname "$0")/../.." |
|||
readonly MIAOU_DIR |
|||
|
|||
function init_strict() { |
|||
set -Eeuo pipefail |
|||
# shellcheck source=/dev/null |
|||
source "$MIAOU_DIR/lib/functions.sh" |
|||
# shellcheck source=/dev/null |
|||
source "/opt/debian-bash/lib/functions.sh" |
|||
trap 'trap_error $? $LINENO $BASH_LINENO "$BASH_COMMAND" $(printf "::%s" ${FUNCNAME[@]})' ERR |
|||
} |
|||
|
|||
## main |
|||
init_strict |
|||
sudo_required |
|||
build_miaou_image "bullseye" |
@ -0,0 +1,19 @@ |
|||
#!/bin/bash |
|||
|
|||
MIAOU_DIR="$(dirname "$0")/../.." |
|||
readonly MIAOU_DIR |
|||
|
|||
function init_strict() { |
|||
set -Eeuo pipefail |
|||
# shellcheck source=/dev/null |
|||
source "$MIAOU_DIR/lib/functions.sh" |
|||
# shellcheck source=/dev/null |
|||
source "/opt/debian-bash/lib/functions.sh" |
|||
|
|||
trap 'trap_error $? $LINENO $BASH_LINENO "$BASH_COMMAND" $(printf "::%s" ${FUNCNAME[@]})' ERR |
|||
} |
|||
|
|||
## main |
|||
init_strict |
|||
sudo_required |
|||
build_miaou_image "buster" |
@ -0,0 +1,5 @@ |
|||
#!/bin/bash |
|||
|
|||
# shellcheck source=/dev/null |
|||
. "$MIAOU_BASEDIR/lib/functions.sh" |
|||
miaou_init |
@ -0,0 +1,409 @@ |
|||
#!/bin/bash |
|||
|
|||
MIAOU_BASEDIR=$(readlink -f "$(dirname "$0")/..") |
|||
# shellcheck source=/dev/null |
|||
. "$MIAOU_BASEDIR/lib/functions.sh" |
|||
readonly MIAOU_BASEDIR |
|||
|
|||
miaou_init |
|||
|
|||
EXPANDED_CONF="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
NEW_GROUP=lxd |
|||
readonly NEW_GROUP EXPANDED_CONF |
|||
|
|||
on_exit() { |
|||
if [[ "$SESSION_RELOAD_REQUIRED" == true ]]; then |
|||
echo "======================================================" |
|||
echo "Session Reload is required (due to new group <$NEW_GROUP>)" |
|||
echo "======================================================" |
|||
fi |
|||
if [ -n "${1:-}" ]; then |
|||
echo "Aborted by $1" |
|||
elif [ "${status:-}" -ne 0 ]; then |
|||
echo "Failure (status $status)" |
|||
fi |
|||
} |
|||
|
|||
function prepare_lxd { |
|||
local PREFIX="lxd:prepare" |
|||
|
|||
# test group lxd assign to current user |
|||
if ! groups | grep -q lxd; then |
|||
echo "define lxd and assign to user <$USER>" |
|||
sudo groupadd --force "$NEW_GROUP" |
|||
sudo usermod --append --groups "$NEW_GROUP" "$(whoami)" |
|||
exec sg "$NEW_GROUP" "exec '$0' $(printf "'%s' " SESSION_RELOAD_REQUIRED "$@")" |
|||
# no further processing because exec has been called! |
|||
else |
|||
echo "user <$USER> already belongs to group <lxd>!" |
|||
fi |
|||
|
|||
sudo /opt/debian-bash/tools/idem_apt_install lxd btrfs-progs |
|||
|
|||
# test lxdbr0 |
|||
if ! lxc network info lxdbr0 &>/dev/null; then |
|||
echo "bridge <lxdbr0> down, so initialization will use default preseed..." |
|||
sudo lxd init |
|||
# cat <<EOF | sudo lxd init --preseed |
|||
|
|||
# NEW |
|||
# networks: |
|||
# - config: |
|||
# ipv4.address: auto |
|||
# ipv6.address: none |
|||
# description: "" |
|||
# name: lxdbr0 |
|||
# type: "" |
|||
# project: default |
|||
# storage_pools: |
|||
# - config: |
|||
# source: /dev/sda4 |
|||
# description: "" |
|||
# name: default |
|||
# driver: btrfs |
|||
# profiles: |
|||
# - config: {} |
|||
# description: "" |
|||
# devices: |
|||
# eth0: |
|||
# name: eth0 |
|||
# network: lxdbr0 |
|||
# type: nic |
|||
# root: |
|||
# path: / |
|||
# pool: default |
|||
# type: disk |
|||
# name: default |
|||
# projects: [] |
|||
# cluster: null |
|||
|
|||
# OLD |
|||
# networks: |
|||
# - config: |
|||
# ipv4.address: auto |
|||
# ipv6.address: none |
|||
# description: "" |
|||
# name: lxdbr0 |
|||
# type: "" |
|||
# project: default |
|||
# storage_pools: |
|||
# - config: |
|||
# source: /dev/sda4 |
|||
# description: "" |
|||
# name: default |
|||
# driver: btrfs |
|||
# profiles: |
|||
# - config: {} |
|||
# description: "" |
|||
# devices: |
|||
# eth0: |
|||
# name: eth0 |
|||
# network: lxdbr0 |
|||
# type: nic |
|||
# root: |
|||
# path: / |
|||
# pool: default |
|||
# type: disk |
|||
# name: default |
|||
# projects: [] |
|||
# cluster: null |
|||
|
|||
echo OK |
|||
else |
|||
echo "bridge <lxdbr0> found implies it has been already initialized!" |
|||
fi |
|||
|
|||
set_alias 'sameuser' "exec @ARG1@ -- su --whitelist-environment container_hostname - $(whoami)" |
|||
set_alias 'login' 'exec @ARGS@ --mode interactive -- /bin/bash -c $@${user:-root} - exec su --whitelist-environment container_hostname - ' |
|||
set_alias 'll' 'list -c ns4mDN' |
|||
|
|||
# test environment container hostname |
|||
local env_container_hostname=$(lxc profile get default environment.container_hostname) |
|||
if [[ -z "$env_container_hostname" ]]; then |
|||
env_container_hostname=$(hostname -s) |
|||
if env | grep -q container_hostname; then |
|||
local previous_container_hostname=$(env | grep container_hostname | cut -d '=' -f2) |
|||
env_container_hostname="$previous_container_hostname $env_container_hostname" |
|||
fi |
|||
|
|||
echo -n "set environment container_hostname to <$env_container_hostname> ... " |
|||
lxc profile set default environment.container_hostname "$env_container_hostname" |
|||
PREFIX="" echoinfo OK |
|||
else |
|||
echo "environment container_hostname <$env_container_hostname> already defined!" |
|||
fi |
|||
|
|||
if ! grep -q "root:$(id -u):1" /etc/subuid; then |
|||
echo -n "subuid, subgid allowing <$(whoami)> ..." |
|||
printf "root:$(id -u):1\n" | sudo tee -a /etc/subuid /etc/subgid |
|||
PREFIX="" echoinfo DONE |
|||
|
|||
# root:1000:1 |
|||
# root:100000:65536 |
|||
# _lxd:100000:65536 |
|||
# <USER>:100000:65536 |
|||
|
|||
else |
|||
echo "subuid, subgid allowing <$(whoami)> already done!" |
|||
fi |
|||
|
|||
if [[ ! -d "$HOME/LXD/SHARED" ]]; then |
|||
echo -n "$HOME/LXD/SHARED creating ... " |
|||
mkdir "$HOME/LXD/SHARED" -p |
|||
PREFIX="" echoinfo DONE |
|||
else |
|||
echo "folder <$HOME/LXD/SHARED> already created!" |
|||
fi |
|||
|
|||
if [[ ! -d "$HOME/LXD/BACKUP" ]]; then |
|||
echo -n "$HOME/LXD/SHARED creating ... " |
|||
mkdir "$HOME/LXD/SHARED" -p |
|||
PREFIX="" echoinfo DONE |
|||
else |
|||
echo "folder <$HOME/LXD/BACKUP> already created!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
function set_alias { |
|||
local name="$1" |
|||
local command="$2" |
|||
if ! lxc alias list -f csv | grep -q "^$name,"; then |
|||
echo -n "define lxc alias $name ..." |
|||
lxc alias add "$name" "$command" |
|||
PREFIX="" echoinfo OK |
|||
else |
|||
echo "lxc alias "$name" already defined!" |
|||
fi |
|||
|
|||
} |
|||
|
|||
function miaou_evalfrombashrc() { |
|||
local PREFIX="miaou:bashrc" |
|||
output=$( |
|||
/opt/debian-bash/tools/append_or_replace \ |
|||
"^eval \"\\$\($MIAOU_BASEDIR/lib/install.sh shellenv\)\"$" \ |
|||
"eval \"\$($MIAOU_BASEDIR/lib/install.sh shellenv)\"" \ |
|||
"$HOME/.bashrc" |
|||
) |
|||
|
|||
if [[ "$output" == "appended" ]]; then |
|||
echo "new path <$MIAOU_BASEDIR> created!" |
|||
SESSION_RELOAD_REQUIRED=true |
|||
else |
|||
echo "path <$MIAOU_BASEDIR> already loaded!" |
|||
fi |
|||
} |
|||
|
|||
function ask_target() { |
|||
PS3='Choose miaou target purpose: ' |
|||
foods=("Dev" "Beta" "Prod") |
|||
select ans in "${foods[@]}"; do |
|||
builtin echo "${ans^^}" |
|||
break |
|||
done |
|||
} |
|||
|
|||
function check_credential { |
|||
local PREFIX="check:credential" |
|||
|
|||
check_yaml_defined_value /etc/miaou/defaults.yaml 'credential.username' && |
|||
check_yaml_defined_value /etc/miaou/defaults.yaml 'credential.shadow' && |
|||
check_yaml_defined_value /etc/miaou/defaults.yaml 'credential.email' && |
|||
check_yaml_defined_value /etc/miaou/defaults.yaml 'credential.password' |
|||
|
|||
} |
|||
|
|||
function check_target() { |
|||
case "${TARGET^^}" in |
|||
DEV) ;; |
|||
BETA) ;; |
|||
PROD) ;; |
|||
*) |
|||
if [[ -f /etc/miaou/defaults.yaml ]]; then |
|||
# load already defined target in expanded conf |
|||
TARGET=$(grep -Es "^target:" /etc/miaou/defaults.yaml | cut -d ' ' -f2) |
|||
else |
|||
TARGET=$(ask_target) |
|||
fi |
|||
;; |
|||
esac |
|||
TARGET=${TARGET,,} # downcase |
|||
return 0 |
|||
} |
|||
|
|||
function miaou_configfiles() { |
|||
local PREFIX="miaou:config" |
|||
|
|||
if [[ ! -d /etc/miaou ]]; then |
|||
echo -n "configuration initializing ..." |
|||
sudo mkdir -p /etc/miaou |
|||
sudo chown "$USER" /etc/miaou |
|||
PREFIX="" echoinfo OK |
|||
fi |
|||
|
|||
if [[ ! -f /etc/miaou/defaults.yaml ]]; then |
|||
echo -n "building /etc/miaou/defaults.yaml for the first time..." |
|||
shadow_passwd=$(sudo grep "$CURRENT_USER" /etc/shadow | cut -d ':' -f2) |
|||
env current_user="$CURRENT_USER" shadow_passwd="$shadow_passwd" tera -e --env-key env --env-only -t "$MIAOU_BASEDIR/templates/etc/defaults.yaml.j2" -o /etc/miaou/defaults.yaml >/dev/null |
|||
yq ".target=\"$TARGET\"" /etc/miaou/defaults.yaml -i |
|||
PREFIX="" echoinfo OK |
|||
fi |
|||
|
|||
if [[ ! -f /etc/miaou/miaou.yaml ]]; then |
|||
echo -n "building /etc/miaou/miaou.yaml for the first time..." |
|||
cp "$MIAOU_BASEDIR/templates/etc/miaou.yaml.j2" /etc/miaou/miaou.yaml |
|||
PREFIX="" echoinfo OK |
|||
fi |
|||
|
|||
PREVIOUS_TARGET="" |
|||
echo "expanded configuration stored in <$MIAOU_CONFIGDIR>!" |
|||
[[ -f "$EXPANDED_CONF" ]] && PREVIOUS_TARGET=$(grep -Es "^target:" "$EXPANDED_CONF" | cut -d ' ' -f2) |
|||
|
|||
if [[ "$PREVIOUS_TARGET" != "$TARGET" ]]; then |
|||
if [[ -z "$PREVIOUS_TARGET" ]]; then |
|||
echo "new target defined <$TARGET>" |
|||
else |
|||
echowarnn "TARGET has changed from <$PREVIOUS_TARGET> to <$TARGET>, do you agree?" |
|||
if askConfirmation N; then |
|||
echowarn "removing previous settings, please restart <miaou> to apply changes" |
|||
rm "$MIAOU_CONFIGDIR" -rf |
|||
else |
|||
echoerr "TARGET not accepted, exit" |
|||
exit 102 |
|||
fi |
|||
fi |
|||
yq ".target=\"$TARGET\"" /etc/miaou/defaults.yaml -i |
|||
else |
|||
echo "target <$TARGET> already defined!" |
|||
fi |
|||
} |
|||
|
|||
function opt_link() { |
|||
if [[ $MIAOU_BASEDIR != '/opt/miaou' ]]; then |
|||
if [[ -L '/opt/miaou' && -d '/opt/miaou' && $(readlink /opt/miaou) == "$MIAOU_BASEDIR" ]]; then |
|||
echo "symbolic link /opt/miaou already set up!" |
|||
else |
|||
sudo rm -f /opt/miaou |
|||
sudo ln -s "$MIAOU_BASEDIR" /opt/miaou |
|||
echo "symbolic link /opt/miaou successfully defined!" |
|||
fi |
|||
else |
|||
echo "real path /opt/miaou already set up!" |
|||
fi |
|||
} |
|||
|
|||
function miaou_resolver() { |
|||
local PREFIX="miaou:resolver" |
|||
bridge=$(ip addr show lxdbr0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) |
|||
gateway=$(ip route | grep default | cut -d' ' -f3) |
|||
|
|||
if command -v nmcli &>/dev/null; then |
|||
if [[ ! -f /etc/NetworkManager/dispatcher.d/50-miaou-resolver ]]; then |
|||
echo -n "use NetworkManager dispatcher to deal with LXD bridge automatically..." |
|||
sudo cp "$MIAOU_BASEDIR/templates/network-manager/50-miaou-resolver" /etc/NetworkManager/dispatcher.d/ |
|||
sudo chmod +x /etc/NetworkManager/dispatcher.d/50-miaou-resolver |
|||
ACTIVE_CONNECTION=$(nmcli -g NAME connection show --active | head -n1) |
|||
nmcli connection up "$ACTIVE_CONNECTION" &>/dev/null |
|||
PREFIX="" echoinfo OK |
|||
else |
|||
echo "miaou-resolver in NetworkManager dispatcher already initialized!" |
|||
fi |
|||
else |
|||
if ! grep -q "nameserver $bridge" /etc/resolv.conf; then |
|||
echo "customize resolv.conf from scratch (SERVER)..." |
|||
sudo tee /etc/resolv.conf &>/dev/null <<EOF |
|||
nameserver $bridge |
|||
nameserver $gateway |
|||
EOF |
|||
PREFIX="" echoinfo OK |
|||
else |
|||
echo "customize resolv.conf already already defined!" |
|||
fi |
|||
fi |
|||
} |
|||
|
|||
function extra_dev_desktop { |
|||
# detect if DEV |
|||
# detect if DESKTOP |
|||
: |
|||
} |
|||
|
|||
function override_lxd_service_to_reload_nftables { |
|||
local PREFIX="lxd:override" |
|||
|
|||
if [[ ! -d /etc/systemd/system/lxd.service.d ]]; then |
|||
echo -n "override lxd service..." |
|||
sudo mkdir -p /etc/systemd/system/lxd.service.d |
|||
cat <<EOF | sudo tee /etc/systemd/system/lxd.service.d/override.conf |
|||
[Service] |
|||
ExecStartPost=systemctl reload nftables.service |
|||
EOF |
|||
sudo systemctl daemon-reload |
|||
PREFIX="" echo "OK" |
|||
else |
|||
echo "lxd service already overridden!" |
|||
fi |
|||
} |
|||
|
|||
function ask_for_credential { |
|||
local PREFIX="ask:credential" |
|||
if ! check_credential 2>/dev/null; then |
|||
echo "further details required, please replace any <TO BE DEFINED> by a proper value ...press any key to open editor" |
|||
read -rn1 |
|||
editor /etc/miaou/defaults.yaml |
|||
fi |
|||
check_credential |
|||
echo "successfully checked!" |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
if [[ "${1:-}" == "SESSION_RELOAD_REQUIRED" ]]; then |
|||
SESSION_RELOAD_REQUIRED=true |
|||
shift |
|||
else |
|||
SESSION_RELOAD_REQUIRED=false |
|||
fi |
|||
|
|||
if [[ "${1:-}" == "shellenv" ]]; then |
|||
unset PREFIX |
|||
echo "export MIAOU_BASEDIR=$MIAOU_BASEDIR" |
|||
echo "export PATH=\"\$MIAOU_BASEDIR/scripts\":\$PATH" |
|||
else |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
trap 'status=$?; on_exit; exit $status' EXIT |
|||
trap 'trap - HUP; on_exit SIGHUP; kill -HUP $$' HUP |
|||
trap 'trap - INT; on_exit SIGINT; kill -INT $$' INT |
|||
trap 'trap - TERM; on_exit SIGTERM; kill -TERM $$' TERM |
|||
|
|||
PREFIX="miaou" |
|||
: $PREFIX |
|||
TARGET=${1:-} |
|||
CURRENT_USER=$(id -un) |
|||
|
|||
check_target |
|||
sudo_required |
|||
install_debian_bash |
|||
install_mandatory_commands |
|||
prepare_toolbox |
|||
add_toolbox_sudoers |
|||
prepare_nftables |
|||
prepare_lxd "$@" |
|||
override_lxd_service_to_reload_nftables |
|||
miaou_resolver |
|||
miaou_evalfrombashrc |
|||
miaou_configfiles |
|||
ask_for_credential |
|||
prepare_nftables |
|||
opt_link |
|||
extra_dev_desktop |
|||
|
|||
if [[ "$SESSION_RELOAD_REQUIRED" == false ]]; then |
|||
echoinfo "successful installation" |
|||
else |
|||
echowarn "please reload your session, .bashrc needs to be reloaded!" |
|||
fi |
|||
fi |
@ -0,0 +1,218 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_database_exists() { |
|||
db-maria list | grep -q "$longname" |
|||
} |
|||
|
|||
function check_port_used() { |
|||
# shellcheck disable=SC2034 |
|||
usedport=$(lxc exec "$container" -- bash -c "grep Listen /etc/apache2/sites-enabled/$longname.conf | cut -d' ' -f2") |
|||
|
|||
[[ "$usedport" == "$port" ]] |
|||
} |
|||
|
|||
function check_service_running() { |
|||
lxc exec "$container" -- bash -c "systemctl is-active --quiet apache2.service" |
|||
} |
|||
|
|||
function check_config_defined() { |
|||
lxc exec "$container" -- bash -c "test -f /var/www/cagettepei/$shortname/config.xml" |
|||
} |
|||
|
|||
function _read() { |
|||
disable_trace |
|||
check_database_exists |
|||
check_container "$container" |
|||
check_port_used |
|||
check_config_defined |
|||
check_service_running |
|||
enable_trace |
|||
return 0 |
|||
} |
|||
|
|||
function _create() { |
|||
echo "creating CagettePéi instance for <$shortname> ... " |
|||
|
|||
mkdir -p "$MIAOU_CONFIGDIR/apps/cagettepei" |
|||
APP_PORT=$port APP_NAME=$shortname tera -e --env-key env -t "$MIAOU_DIR/templates/apps/cagettepei/cagettepei-host.j2" -o "$MIAOU_CONFIGDIR/apps/cagettepei/$longname.conf" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
echo "creating templates ... OK" |
|||
|
|||
echo "copying files over container <$container> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/cagettepei/$longname.conf" "$container/etc/apache2/sites-available/$longname.conf" |
|||
echo "copying files over container <$container> ... OK" |
|||
|
|||
if ! (db-maria list | grep -q "$longname"); then |
|||
echo "create empty database <$longname> ... " |
|||
db-maria create "$longname" |
|||
echo "create empty database <$longname> ... OK" |
|||
DB_INIT=true |
|||
else |
|||
echo "database already exists!" |
|||
DB_INIT=false |
|||
fi |
|||
|
|||
credential_username=$(load_yaml_from_expanded credential.username) |
|||
credential_email=$(load_yaml_from_expanded credential.email) |
|||
|
|||
echo "initialize cagettepei $shortname $longname ..." |
|||
lxc exec "$container" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
if [[ ! -d /var/www/cagettepei/$shortname ]]; then |
|||
echo "installing new instance of cagettepei into /var/www/cagettepei/$shortname" |
|||
cd /tmp |
|||
if [[ -d cagettepei ]]; then |
|||
echo "refreshing previous clone" |
|||
cd cagettepei |
|||
git pull |
|||
cd .. |
|||
else |
|||
echo "cloning" |
|||
git clone https://git.artcode.re/cagetters/cagettepei.git |
|||
fi |
|||
cp -r cagettepei /var/www/cagettepei/$shortname |
|||
cd /var/www/cagettepei/$shortname |
|||
echo COMPILING... |
|||
export HAXE_STD_PATH=/opt/haxe_20180221160843_bb7b827/std |
|||
haxelib setup .haxelib |
|||
make install ENV=dev |
|||
echo OK |
|||
else |
|||
echo "instance of cagettepei /var/www/cagettepei/$shortname already defined!" |
|||
fi |
|||
|
|||
if diff -q /var/www/cagettepei/$shortname/config.xml /var/www/cagettepei/$shortname/config.xml.dist; then |
|||
echo "create config.xml" |
|||
cat << 'EOT2' > /var/www/cagettepei/$shortname/config.xml |
|||
<config |
|||
database="mysql://cagettepei-$shortname:cagettepei-$shortname@ct1.lxd/cagettepei-$shortname" |
|||
|
|||
host="$fqdn" |
|||
name = "$shortname" |
|||
default_email = "postmaster@artcode.re" |
|||
webmaster_email = "postmaster@artcode.re" |
|||
|
|||
key="carotteMagique" |
|||
|
|||
lang="fr" |
|||
langs="fr" |
|||
langnames="Français" |
|||
sqllog="0" |
|||
debug="0" |
|||
cache="0" |
|||
maintain="0" |
|||
cachetpl="0" |
|||
/> |
|||
EOT2 |
|||
else |
|||
echo "config.xml already defined!" |
|||
fi |
|||
|
|||
a2ensite $longname.conf |
|||
mkdir -p /var/log/apache2/cagettepei/$shortname |
|||
systemctl restart apache2 |
|||
|
|||
if [[ $DB_INIT == true ]]; then |
|||
echo "Force TABLES initialization" |
|||
curl localhost:$port |
|||
echo "Set administrator password..." |
|||
echo "insert into User values (1,'fr','c3513c793b13471f3a49bdb22acb66de',1,'$credential_username','Admin', '$credential_email', null, null, null, null, null, null, null, null, null, now(), now(),6,null, null);" | mariadb cagettepei-$shortname -u cagettepei-$shortname -pcagettepei-$shortname -h ct1.lxd |
|||
echo "TODO: password \`cagette\` should be changed soon!!!" |
|||
fi |
|||
EOF |
|||
echo "initialize cagettepei $shortname $longname ... OK" |
|||
} |
|||
|
|||
function _update() { |
|||
echo "update" |
|||
} |
|||
|
|||
function _delete() { |
|||
echo "delete" |
|||
} |
|||
|
|||
function usage() { |
|||
echo "Usage: $COMMAND_NAME -c|r|u|d --port PORT --container CONTAINER --name NAME" |
|||
exit 2 |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
# init_strict |
|||
|
|||
COMMAND_NAME=$(basename "$0") |
|||
|
|||
# read the options |
|||
|
|||
TEMP=$(getopt -n "$COMMAND_NAME" -o crud --long port:,container:,name:,fqdn: -- "$@") |
|||
# shellcheck disable=SC2181 |
|||
[[ "$?" -eq 0 ]] || usage |
|||
eval set -- "$TEMP" |
|||
|
|||
action="unset" |
|||
port="unset" |
|||
container="unset" |
|||
shortname="unset" |
|||
longname="unset" |
|||
fqdn="unset" |
|||
|
|||
# extract options and their arguments into variables. |
|||
while true; do |
|||
case "$1" in |
|||
--port) |
|||
port=$2 |
|||
shift 2 |
|||
;; |
|||
--fqdn) |
|||
fqdn=$2 |
|||
shift 2 |
|||
;; |
|||
--container) |
|||
container=$2 |
|||
shift 2 |
|||
;; |
|||
--name) |
|||
shortname=$2 |
|||
longname="cagettepei-$shortname" |
|||
shift 2 |
|||
;; |
|||
-c) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_create" |
|||
shift 1 |
|||
;; |
|||
-r) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_read" |
|||
shift 1 |
|||
;; |
|||
-u) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_update" |
|||
shift 1 |
|||
;; |
|||
-d) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_delete" |
|||
shift 1 |
|||
;; |
|||
--) |
|||
shift |
|||
break |
|||
;; |
|||
*) |
|||
echo "Internal error!" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
[[ |
|||
"$action" != unset && |
|||
"$port" != unset && |
|||
"$container" != unset && |
|||
"$fqdn" != unset && |
|||
"$shortname" != unset ]] || usage |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
$action |
@ -0,0 +1,187 @@ |
|||
#!/bin/bash |
|||
|
|||
readonly UNWANTED_PACKAGES_STRING="nginx node python haxe" |
|||
readonly MANDATORY_PACKAGES_STRING="wget apache2 make git imagemagick gettext libapache2-mod-neko mariadb-client sendemail libio-socket-ssl-perl libnet-ssleay-perl" |
|||
|
|||
### CHECK |
|||
|
|||
function check() { |
|||
PREFIX="recipe:cagettepei:check" |
|||
check_unwanted_packages || return 21 |
|||
check_mandatory_packages || return 22 |
|||
check_apache_modules || return 23 |
|||
check_node8 || return 24 |
|||
check_python2 || return 25 |
|||
check_haxe3 || return 26 |
|||
check_cagettepei_batch || return 35 |
|||
check_cagettepei_timers || return 36 |
|||
echo "container <$CONTAINER> approved successfully!" |
|||
} |
|||
|
|||
function check_apache_modules() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
test -L /etc/apache2/mods-enabled/neko.load |
|||
test -L /etc/apache2/mods-enabled/rewrite.load |
|||
true |
|||
EOF |
|||
} |
|||
|
|||
function check_cagettepei_batch() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
test -f /var/www/cagettepei/cagettepei-batch |
|||
true |
|||
EOF |
|||
} |
|||
|
|||
function check_cagettepei_timers() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
test -f /etc/systemd/system/cagettepei-batch-minute.service |
|||
test -f /etc/systemd/system/cagettepei-batch-minute.timer |
|||
test -f /etc/systemd/system/cagettepei-batch-day.service |
|||
test -f /etc/systemd/system/cagettepei-batch-day.timer |
|||
true |
|||
EOF |
|||
} |
|||
|
|||
function check_node8() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
node --version | grep -q 'v8.17.0' |
|||
EOF |
|||
} |
|||
|
|||
function check_python2() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
python --version 2>&1 | grep -q 'Python 2.7.18' |
|||
EOF |
|||
} |
|||
|
|||
function check_haxe3() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
haxe -version 2>&1 | grep -q '3.4.7' |
|||
EOF |
|||
} |
|||
|
|||
function check_unwanted_packages() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
mapfile -t PACKAGES <<< "$UNWANTED_PACKAGES_STRING" |
|||
for package in \${PACKAGES[@]}; do |
|||
! (dpkg -l "\$package" 2>/dev/null | grep -q ^ii) |
|||
done |
|||
true # useful because for might return last inexistant package |
|||
EOF |
|||
} |
|||
|
|||
function check_mandatory_packages() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
mapfile -t PACKAGES <<< "$MANDATORY_PACKAGES_STRING" |
|||
for package in \${PACKAGES[@]}; do |
|||
dpkg-query -l "\$package" 2>/dev/null | grep -q ^ii |
|||
done |
|||
EOF |
|||
} |
|||
|
|||
### INSTALL |
|||
|
|||
function install() { |
|||
PREFIX="recipe:cagettepei:install" |
|||
: $PREFIX |
|||
|
|||
launch_container "$CONTAINER" |
|||
echo "initializing CagettePéi ... " |
|||
|
|||
echo -n "check unwanted packages..." |
|||
check_unwanted_packages |
|||
PREFIX="" echo "OK" |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
|
|||
echo installing CagettePéi... |
|||
apt-get update |
|||
apt-get install -y $MANDATORY_PACKAGES_STRING |
|||
|
|||
echo installing custom Node8... |
|||
wget https://nodejs.org/download/release/v8.17.0/node-v8.17.0-linux-x64.tar.gz -O /tmp/node-v8.17.0-linux-x64.tar.gz |
|||
tar -xzvf /tmp/node-v8.17.0-linux-x64.tar.gz -C /opt |
|||
chown root:root -R /opt/node-v8.17.0-linux-x64 |
|||
ln -sf /opt/node-v8.17.0-linux-x64/bin/node /usr/local/bin/ |
|||
ln -sf /opt/node-v8.17.0-linux-x64/bin/npm /usr/local/bin/ |
|||
echo -n DONE |
|||
|
|||
echo installing custom Python2 with pypy... |
|||
wget https://downloads.python.org/pypy/pypy2.7-v7.3.13-linux64.tar.bz2 -O /tmp/pypy2.7-v7.3.13-linux64.tar.bz2 |
|||
apt install -y bzip2 |
|||
bunzip2 -f /tmp/pypy2.7-v7.3.13-linux64.tar.bz2 |
|||
tar -xvf /tmp/pypy2.7-v7.3.13-linux64.tar -C /opt |
|||
ln -sf /opt/pypy2.7-v7.3.13-linux64/bin/python /usr/local/bin/ |
|||
ln -sf /opt/pypy2.7-v7.3.13-linux64/bin/python2 /usr/local/bin/ |
|||
echo -n DONE |
|||
|
|||
echo installing custom Haxe3... |
|||
wget https://github.com/HaxeFoundation/haxe/releases/download/3.4.7/haxe-3.4.7-linux64.tar.gz -O /tmp/haxe-3.4.7-linux64.tar.gz |
|||
tar -xzvf /tmp/haxe-3.4.7-linux64.tar.gz -C /opt |
|||
ln -sf /opt/haxe_20180221160843_bb7b827/haxe /usr/local/bin/ |
|||
ln -sf /opt/haxe_20180221160843_bb7b827/haxelib /usr/local/bin/ |
|||
echo -n DONE |
|||
|
|||
systemctl stop apache2 |
|||
rm -f /etc/apache2/sites-available/{000-default,default-ssl}.conf |
|||
rm -f /etc/apache2/sites-enabled/000-default.conf |
|||
rm -rf /var/www/html |
|||
sed -i '/<Directory \/var\/www\/>/,/<\/Directory>/ s/AllowOverride None/AllowOverride All/' /etc/apache2/apache2.conf |
|||
sed -i 's/^Listen 80$//' /etc/apache2/ports.conf |
|||
|
|||
echo "prepare folder for cagettepei instances" |
|||
mkdir -p /var/www/cagettepei |
|||
|
|||
echo "enable neko and rewrite apache2 modules" |
|||
a2enmod neko |
|||
a2enmod rewrite |
|||
EOF |
|||
|
|||
echo -n "copy cagettepei-batch..." |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/cagettepei/cagettepei-batch" "$CONTAINER/var/www/cagettepei/cagettepei-batch" |
|||
lxc exec "$CONTAINER" -- chmod +x /var/www/cagettepei/cagettepei-batch |
|||
PREFIX="" echo "OK" |
|||
|
|||
echo -n "copy cagettepei timers in systemd..." |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/cagettepei/systemd/cagettepei-batch-minute.service" "$CONTAINER/etc/systemd/system/cagettepei-batch-minute.service" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/cagettepei/systemd/cagettepei-batch-minute.timer" "$CONTAINER/etc/systemd/system/cagettepei-batch-minute.timer" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/cagettepei/systemd/cagettepei-batch-day.service" "$CONTAINER/etc/systemd/system/cagettepei-batch-day.service" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/cagettepei/systemd/cagettepei-batch-day.timer" "$CONTAINER/etc/systemd/system/cagettepei-batch-day.timer" |
|||
PREFIX="" echo "OK" |
|||
|
|||
echo -n "override apache2 service to launch cagettepei timers..." |
|||
lxc exec "$CONTAINER" -- bash -c "SYSTEMD_EDITOR=tee systemctl edit apache2 <<EOT |
|||
[Unit] |
|||
BindsTo = cagettepei-batch-minute.timer cagettepei-batch-day.timer |
|||
EOT" |
|||
PREFIX="" echo "OK" |
|||
|
|||
echo "enable and start cagettepei timers in systemd..." |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
systemctl enable cagettepei-batch-minute.timer cagettepei-batch-day.timer |
|||
systemctl start cagettepei-batch-minute.timer cagettepei-batch-day.timer |
|||
EOF |
|||
PREFIX="" echo "OK" |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,124 @@ |
|||
#!/bin/bash |
|||
|
|||
readonly EXPANDED_CONF="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
|
|||
TARGET=$(yq '.target' "$EXPANDED_CONF") |
|||
readonly TARGET |
|||
|
|||
function check() { |
|||
container_exists "$CONTAINER" || return 1 |
|||
container_running "$CONTAINER" || return 2 |
|||
check_reverseproxy || return 4 |
|||
check_banner || return 5 |
|||
check_certbot || return 6 |
|||
|
|||
PREFIX="recipe:dmz:check" echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function check_reverseproxy() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
dpkg -l nginx | grep -q ^ii |
|||
systemctl is-active --quiet nginx |
|||
nginx -tq |
|||
EOF |
|||
} |
|||
|
|||
function check_certbot() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
dpkg -l certbot | grep -q ^ii |
|||
dpkg -l python3-certbot-nginx | grep -q ^ii |
|||
EOF |
|||
} |
|||
|
|||
function check_banner() { |
|||
if [[ $TARGET != "prod" ]]; then |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
test -f /etc/nginx/snippets/banner_$TARGET.conf |
|||
EOF |
|||
fi |
|||
} |
|||
|
|||
function install() { |
|||
PREFIX="recipe:dmz:install" |
|||
: $PREFIX |
|||
|
|||
echowarn "about to deploy new container <$CONTAINER> ..." |
|||
|
|||
if ! container_exists "$CONTAINER"; then |
|||
echowarn "about to create new container <$CONTAINER> ..." |
|||
lxc-miaou-create "$CONTAINER" |
|||
echo OK |
|||
fi |
|||
|
|||
if ! container_running "$CONTAINER"; then |
|||
echowarn "about to start asleep container <$CONTAINER> ..." |
|||
lxc start "$CONTAINER" |
|||
echo OK |
|||
fi |
|||
|
|||
credential_email=$(load_yaml_from_expanded credential.email) |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
apt-get update && apt-get dist-upgrade -y |
|||
apt-get install -y nginx ssl-cert libnginx-mod-http-subs-filter certbot python3-certbot-nginx |
|||
|
|||
echo "registering with your default credential email <$credential_email>" |
|||
certbot register --agree-tos --email $credential_email --no-eff-email || echo "already resgistered!" |
|||
|
|||
rm /etc/nginx/sites-{enabled,available}/default -f |
|||
systemctl enable nginx |
|||
|
|||
nginx -tq || rm /etc/nginx/sites-enabled/hosts |
|||
systemctl start nginx |
|||
EOF |
|||
|
|||
if [[ "$TARGET" != "prod" ]]; then |
|||
echo "copying Nginx banner to container <$CONTAINER> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/nginx/snippets/banner_$TARGET.conf" "$CONTAINER/etc/nginx/snippets/banner_$TARGET.conf" |
|||
echo "copying files over container <$CONTAINER> ... OK" |
|||
else |
|||
echo "no Nginx banner on PROD!" |
|||
fi |
|||
|
|||
echo "populate nftables entries into yaml" |
|||
local wan_interface dmz_ip |
|||
wan_interface=$(ip route show default | cut -d ' ' -f5) |
|||
dmz_ip=$(host "$CONTAINER.lxd" | cut -d ' ' -f4) |
|||
yq ".nftables.wan_interface=\"$wan_interface\"" "$EXPANDED_CONF" -i |
|||
yq ".nftables.dmz_ip=\"$dmz_ip\"" "$EXPANDED_CONF" -i |
|||
|
|||
local nftables_reloading=false |
|||
if [[ "$TARGET" != "dev" ]]; then |
|||
mkdir -p "$MIAOU_CONFIGDIR/nftables.rules.d" |
|||
echo "nat http/s port to dmz" |
|||
tera -t "$MIAOU_BASEDIR/templates/nftables/nat.table.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/nftables.rules.d/nat.table" &>/dev/null |
|||
sudo cp "$MIAOU_CONFIGDIR/nftables.rules.d/nat.table" /etc/nftables.rules.d/nat.table |
|||
nftables_reloading=true |
|||
else |
|||
if [[ -f /etc/nftables.rules.d/nat.table ]]; then |
|||
sudo_required "remove previous nat.table" |
|||
sudo rm -f /etc/nftables.rules.d/nat.table |
|||
nftables_reloading=true |
|||
fi |
|||
fi |
|||
if [[ "$nftables_reloading" == true ]]; then |
|||
sudo_required "reload nftables" |
|||
sudo systemctl reload nftables.service |
|||
fi |
|||
|
|||
} |
|||
|
|||
# MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,21 @@ |
|||
#!/bin/bash |
|||
|
|||
function check() { |
|||
echowarn "recipe:dokuwiki not yet checked!" |
|||
return 0 |
|||
} |
|||
|
|||
function install() { |
|||
echowarn "recipe:dokuwiki not yet initialized!" |
|||
} |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
launch_container "$CONTAINER" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,180 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_database_exists() { |
|||
db-psql list | grep -q "$longname" |
|||
} |
|||
|
|||
function check_port_used() { |
|||
# shellcheck disable=SC2034 |
|||
usedport=$(lxc exec "$container" -- cat /etc/nginx/sites-enabled/"$longname".conf | grep listen | cut -d ' ' -f2) |
|||
[[ "$usedport" == "$port" ]] |
|||
} |
|||
|
|||
function check_directory_exists() { |
|||
lxc exec "$container" -- test -d /var/www/"$longname" |
|||
} |
|||
|
|||
function check_service_running() { |
|||
lxc exec "$container" -- bash -c "systemctl is-active --quiet nginx.service" |
|||
} |
|||
|
|||
function _read() { |
|||
disable_trace |
|||
check_database_exists |
|||
check_container "$container" |
|||
check_port_used |
|||
check_directory_exists |
|||
check_service_running |
|||
enable_trace |
|||
return 0 |
|||
} |
|||
|
|||
function _create() { |
|||
PREFIX="recipe:dolibarr:create" |
|||
: $PREFIX |
|||
|
|||
echo "create a Dolibarr instance for $shortname" |
|||
|
|||
lxc exec "$container" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
echo "install latest release ... " |
|||
cd /var/www |
|||
PATH="\$PATH:/opt/debian-bash/tools" |
|||
VERSION="\$(wget_semver github Dolibarr/dolibarr)" |
|||
if [[ ! -f "dolibarr-\$VERSION.tgz" ]]; then |
|||
wget_release github Dolibarr/dolibarr |
|||
else |
|||
echo "dolibarr version=\$VERSION already downloaded!" |
|||
fi |
|||
|
|||
if [[ ! -d "$longname" ]]; then |
|||
tar -xzvf "dolibarr-\$VERSION.tgz" |
|||
mv dolibarr-\$VERSION $longname |
|||
chown www-data:www-data -R $longname |
|||
else |
|||
echo "$longname already created!" |
|||
fi |
|||
|
|||
EOF |
|||
|
|||
echo "generating configuration files from templates... " |
|||
mkdir -p "$MIAOU_CONFIGDIR/apps/dolibarr/$shortname" |
|||
PHP_VERSION=$(lxc exec "$container" -- dpkg -l php-fpm | grep "^ii" | cut -d ':' -f2 | cut -d '+' -f1) |
|||
APP_PORT=$port APP_NAME=$longname PHP_VERSION=$PHP_VERSION tera -e -t "$MIAOU_DIR/templates/apps/dolibarr/host.j2" -o "$MIAOU_CONFIGDIR/apps/dolibarr/$shortname/host.conf" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" >/dev/null |
|||
|
|||
echo "copying configuration files onto container <$container>... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/dolibarr/$shortname/host.conf" "$container/etc/nginx/sites-available/$longname.conf" |
|||
echo "copying files over container <$container> ... OK" |
|||
|
|||
if ! (db-psql list | grep -q "$longname"); then |
|||
echo "create empty database <$longname> ... " |
|||
db-psql create "$longname" |
|||
echo "create empty database <$longname> ... OK" |
|||
|
|||
else |
|||
echo "database already exists!" |
|||
fi |
|||
|
|||
echo "enable host config..." |
|||
lxc exec "$container" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
cd /etc/nginx/sites-enabled |
|||
ln -sf ../sites-available/$longname.conf |
|||
mkdir -p /var/log/nginx/$longname |
|||
nginx -t |
|||
systemctl reload nginx |
|||
EOF |
|||
echo "enable host config... OK" |
|||
} |
|||
|
|||
function _update() { |
|||
echo "TODO: update" |
|||
} |
|||
|
|||
function _delete() { |
|||
echo "TODO: delete" |
|||
} |
|||
|
|||
function usage() { |
|||
echo "Usage: $COMMAND_NAME -c|r|u|d --port PORT --container CONTAINER --name NAME" |
|||
exit 2 |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
# init_strict |
|||
|
|||
COMMAND_NAME=$(basename "$0") |
|||
|
|||
# read the options |
|||
|
|||
TEMP=$(getopt -n "$COMMAND_NAME" -o crud --long port:,container:,name:,fqdn: -- "$@") |
|||
# shellcheck disable=SC2181 |
|||
[[ "$?" -eq 0 ]] || usage |
|||
eval set -- "$TEMP" |
|||
|
|||
action="unset" |
|||
port="unset" |
|||
container="unset" |
|||
shortname="unset" |
|||
longname="unset" |
|||
|
|||
# extract options and their arguments into variables. |
|||
while true; do |
|||
case "$1" in |
|||
--port) |
|||
port=$2 |
|||
shift 2 |
|||
;; |
|||
--fqdn) |
|||
shift 2 |
|||
;; |
|||
--container) |
|||
container=$2 |
|||
shift 2 |
|||
;; |
|||
--name) |
|||
shortname=$2 |
|||
longname="dolibarr-$shortname" |
|||
shift 2 |
|||
;; |
|||
-c) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_create" |
|||
shift 1 |
|||
;; |
|||
-r) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_read" |
|||
shift 1 |
|||
;; |
|||
-u) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_update" |
|||
shift 1 |
|||
;; |
|||
-d) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_delete" |
|||
shift 1 |
|||
;; |
|||
--) |
|||
shift |
|||
break |
|||
;; |
|||
*) |
|||
echo "Internal error!" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
[[ |
|||
"$action" != unset && |
|||
"$port" != unset && |
|||
"$container" != unset && |
|||
"$shortname" != unset ]] || usage |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
$action |
@ -0,0 +1,56 @@ |
|||
#!/bin/bash |
|||
|
|||
MANDATORY_PACKAGES_STRING="nginx php-fpm postgresql-client php-pgsql php-intl php-curl php-gd php-zip php-imap php-xml php-mbstring" |
|||
|
|||
function check() { |
|||
PREFIX="recipe:dolibarr:check" |
|||
: $PREFIX |
|||
|
|||
check_mandatory_packages || return 11 |
|||
check_one_release || return 12 |
|||
|
|||
echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function check_mandatory_packages() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
mapfile -t PACKAGES <<< "$MANDATORY_PACKAGES_STRING" |
|||
for package in \${PACKAGES[@]}; do |
|||
dpkg -l "\$package" 2>/dev/null | grep -q ^ii |
|||
done |
|||
EOF |
|||
} |
|||
|
|||
function check_one_release() { |
|||
lxc exec "$CONTAINER" -- /TOOLBOX/fd -1q -tf "dolibarr-" /var/www |
|||
} |
|||
|
|||
function install() { |
|||
echo "recipe:dolibarr installing..." |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
cloud-init status --wait >/dev/null |
|||
apt update |
|||
apt install -y $MANDATORY_PACKAGES_STRING |
|||
cd /var/www |
|||
PATH="\$PATH:/opt/debian-bash/tools" |
|||
VERSION="\$(wget_semver github Dolibarr/dolibarr)" |
|||
if [[ ! -f "dolibarr-\$VERSION.tgz" ]]; then |
|||
wget_release github Dolibarr/dolibarr |
|||
else |
|||
echo "dolibarr version=\$VERSION already downloaded!" |
|||
fi |
|||
EOF |
|||
} |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
launch_container "$CONTAINER" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,65 @@ |
|||
#!/bin/bash |
|||
|
|||
function check() { |
|||
PREFIX="recipe:mariadb:check" |
|||
dpkg -l mariadb-client | grep -q ^ii || return 9 |
|||
container_running "$CONTAINER" || return 10 |
|||
cat <<EOF | lxc_exec "$CONTAINER" || return 20 |
|||
set -Eeuo pipefail |
|||
systemctl is-active mariadb.service &>/dev/null |
|||
ss -tlnp | grep 0.0.0.0:3306 | grep -q maria |
|||
|
|||
test -f /etc/default/automysqlbackup |
|||
grep -q BACKUPDIR=\"/mnt/BACKUP/mariadb\" /etc/default/automysqlbackup |
|||
EOF |
|||
echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function build_device_backup() { |
|||
PREFIX="recipe:mariadb:backup" |
|||
if ! (lxc config device list "$CONTAINER" | grep -q BACKUP); then |
|||
local backup_dir="$HOME/LXD/BACKUP/databases-$CONTAINER" |
|||
mkdir -p "$backup_dir" |
|||
lxc config device add "$CONTAINER" BACKUP disk source=$backup_dir path=mnt/BACKUP |
|||
fi |
|||
} |
|||
|
|||
function install() { |
|||
sudo_required |
|||
PREFIX="recipe:mariadb:install" |
|||
: $PREFIX |
|||
|
|||
sudo /opt/debian-bash/tools/idem_apt_install mariadb-client |
|||
echowarn "initializing ..." |
|||
launch_container "$CONTAINER" |
|||
build_device_backup |
|||
echowarn "executing various commands onto container <$CONTAINER>, please be patient ..." |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
cloud-init status --wait >/dev/null |
|||
. /opt/debian-bash/lib/functions.sh |
|||
apt update && apt dist-upgrade -y |
|||
/opt/debian-bash/tools/idem_apt_install mariadb-server automysqlbackup |
|||
echo "change bind-adress" |
|||
/opt/debian-bash/tools/append_or_replace "^bind-address.*$" "bind-address = 0.0.0.0" /etc/mysql/mariadb.conf.d/50-server.cnf |
|||
systemctl restart mariadb.service |
|||
|
|||
function systemctl-exists() ([ \$(systemctl list-unit-files "\${1}*" | wc -l) -gt 3 ]) |
|||
systemctl-exists exim4.service && systemctl stop exim4.service && systemctl disable exim4.service |
|||
/opt/debian-bash/tools/append_or_replace "^BACKUPDIR=.*$" "BACKUPDIR=\"/mnt/BACKUP/mariadb\"" /etc/default/automysqlbackup |
|||
exit 0 |
|||
EOF |
|||
echo DONE |
|||
} |
|||
|
|||
# MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,180 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_database_exists() { |
|||
db-psql list | grep -q "$longname" |
|||
} |
|||
|
|||
function check_port_used() { |
|||
# shellcheck disable=SC2034 |
|||
usedport=$(lxc exec "$container" -- bash -c "grep xmlrpc_port /etc/odoo12/$shortname.conf | cut -d' ' -f3") |
|||
[[ "$usedport" == "$port" ]] |
|||
} |
|||
|
|||
function check_service_running() { |
|||
lxc exec "$container" -- bash -c "systemctl is-active --quiet ${longname}.service" |
|||
} |
|||
|
|||
function _read() { |
|||
disable_trace |
|||
check_database_exists |
|||
check_container "$container" |
|||
check_port_used |
|||
check_service_running |
|||
enable_trace |
|||
return 0 |
|||
} |
|||
|
|||
function _create() { |
|||
|
|||
echo "creating templates ... " |
|||
mkdir -p "$MIAOU_CONFIGDIR/apps/odoo12" |
|||
|
|||
longport=$((port + 1000)) |
|||
APP_PORT=$port LONG_PORT=$longport APP_NAME=$shortname tera -e -t "$MIAOU_DIR/templates/apps/odoo12/odoo.conf.j2" -o "$MIAOU_CONFIGDIR/apps/odoo12/$shortname.conf" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" >/dev/null |
|||
APP_NAME=$shortname tera -t "$MIAOU_DIR/templates/apps/odoo12/odoo.service.j2" --env-only -o "$MIAOU_CONFIGDIR/apps/odoo12/$longname.service" >/dev/null |
|||
echo "creating templates ... OK" |
|||
|
|||
echo "copying files over container <$container> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/odoo12/$shortname.conf" "$container/etc/odoo12/$shortname.conf" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/odoo12/$longname.service" "$container/etc/systemd/system/$longname.service" |
|||
echo "copying files over container <$container> ... OK" |
|||
|
|||
if ! (db-psql list | grep -q "$longname"); then |
|||
echo "create empty database <$longname> ... " |
|||
db-psql create "$longname" |
|||
echo "create empty database <$longname> ... OK" |
|||
|
|||
credential_username=$(load_yaml_from_expanded credential.username) |
|||
credential_password=$(load_yaml_from_expanded credential.password) |
|||
cat <<EOF | lxc_exec "$container" |
|||
set -Eeuo pipefail |
|||
echo reloading systemd |
|||
systemctl daemon-reload |
|||
systemctl stop $longname |
|||
|
|||
echo initialize database... |
|||
su odoo -c "/home/odoo/venv/bin/python3 /home/odoo/odoo12/odoo-bin -c /etc/odoo12/$shortname.conf -i base --without-demo=all --stop-after-init" |
|||
echo initialize database OK |
|||
|
|||
echo install default modules ... |
|||
su odoo -c "/home/odoo/venv/bin/python3 /home/odoo/odoo12/odoo-bin -c /etc/odoo12/$shortname.conf -i account,contacts,l10n_fr,account,sale,point_of_sale -d $longname --worker=0 --stop-after-init" |
|||
echo default modules OK |
|||
|
|||
chmod u+w -R "/home/odoo/data-$shortname" |
|||
|
|||
echo "TODO: change administrator password, default credential applied!" |
|||
echo "UPDATE res_users SET login='$credential_username', password='$credential_password' WHERE id=2" | PGPASSWORD=$longname psql -U $longname -h ct1.lxd |
|||
EOF |
|||
else |
|||
echo "database already exists!" |
|||
fi |
|||
|
|||
echo "initialize odoo $shortname $longname ..." |
|||
cat <<EOF | lxc_exec "$container" |
|||
set -Eeuo pipefail |
|||
echo reloading systemd |
|||
systemctl daemon-reload |
|||
|
|||
if ! systemctl is-active --quiet $longname; then |
|||
echo start service $longname |
|||
systemctl start $longname |
|||
systemctl is-active --quiet $longname |
|||
systemctl enable $longname |
|||
else |
|||
echo service $longname already started! |
|||
fi |
|||
EOF |
|||
echo "initialize odoo $shortname $longname ... OK" |
|||
} |
|||
|
|||
function _update() { |
|||
echo "TODO: update" |
|||
} |
|||
|
|||
function _delete() { |
|||
echo "TODO: delete" |
|||
} |
|||
|
|||
function usage() { |
|||
echo "Usage: $COMMAND_NAME -c|r|u|d --port PORT --container CONTAINER --name NAME" |
|||
exit 2 |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
# init_strict |
|||
|
|||
COMMAND_NAME=$(basename "$0") |
|||
|
|||
# read the options |
|||
|
|||
TEMP=$(getopt -n "$COMMAND_NAME" -o crud --long port:,container:,name:,fqdn: -- "$@") |
|||
# shellcheck disable=SC2181 |
|||
[[ "$?" -eq 0 ]] || usage |
|||
eval set -- "$TEMP" |
|||
|
|||
action="unset" |
|||
port="unset" |
|||
container="unset" |
|||
shortname="unset" |
|||
longname="unset" |
|||
|
|||
# extract options and their arguments into variables. |
|||
while true; do |
|||
case "$1" in |
|||
--port) |
|||
port=$2 |
|||
shift 2 |
|||
;; |
|||
--fqdn) |
|||
shift 2 |
|||
;; |
|||
--container) |
|||
container=$2 |
|||
shift 2 |
|||
;; |
|||
--name) |
|||
shortname=$2 |
|||
longname="odoo12-$shortname" |
|||
shift 2 |
|||
;; |
|||
-c) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_create" |
|||
shift 1 |
|||
;; |
|||
-r) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_read" |
|||
shift 1 |
|||
;; |
|||
-u) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_update" |
|||
shift 1 |
|||
;; |
|||
-d) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_delete" |
|||
shift 1 |
|||
;; |
|||
--) |
|||
shift |
|||
break |
|||
;; |
|||
*) |
|||
echo "Internal error!" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
[[ |
|||
"$action" != unset && |
|||
"$port" != unset && |
|||
"$container" != unset && |
|||
"$shortname" != unset ]] || usage |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
$action |
@ -0,0 +1,158 @@ |
|||
#!/bin/bash |
|||
|
|||
readonly EXPANDED_CONF="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
readonly WKHTML_VERSION="0.12.6-1" |
|||
readonly WKHTML_RELEASE="$WKHTML_VERSION.buster_amd64" |
|||
|
|||
function check_user_odoo() { |
|||
(lxc exec "$CONTAINER" -- id odoo &>/dev/null) || return 12 |
|||
return 0 |
|||
} |
|||
|
|||
function check_target_bgcolor() { |
|||
(lxc exec "$CONTAINER" -- grep -Pq "^\\\$o-community-color: $BACKGROUND_COLOR" /home/odoo/odoo12/addons/web/static/src/scss/primary_variables.scss) || return 13 |
|||
return 0 |
|||
} |
|||
|
|||
function check_wkhtmltox() { |
|||
(lxc exec "$CONTAINER" -- dpkg -l | grep -s wkhtmltox | grep -qs $WKHTML_VERSION) || return 1 |
|||
} |
|||
|
|||
function check_python() { |
|||
(lxc exec "$CONTAINER" -- test -d /opt/Python-3.7.13) || return 1 |
|||
} |
|||
|
|||
function check_venv() { |
|||
(lxc exec "$CONTAINER" -- test -d /home/odoo/venv) || return 1 |
|||
} |
|||
|
|||
function check_favicon() { |
|||
lxc exec "$CONTAINER" -- test -L /home/odoo/odoo12/addons/web/static/src/img/favicon.ico |
|||
} |
|||
|
|||
function check_file_odoo-addon-install() { |
|||
(lxc exec "$CONTAINER" -- test -f /home/odoo/odoo12/odoo12-addon-install) || return 23 |
|||
} |
|||
|
|||
function check() { |
|||
PREFIX="recipe:odoo12:check" |
|||
check_wkhtmltox || return 10 |
|||
check_python || return 11 |
|||
check_user_odoo || return 12 |
|||
check_target_bgcolor || return 13 |
|||
check_venv || return 14 |
|||
check_favicon || return 15 |
|||
check_file_odoo-addon-install || return 23 |
|||
|
|||
echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function install() { |
|||
PREFIX="recipe:odoo12:install" |
|||
: $PREFIX |
|||
|
|||
launch_container "$CONTAINER" |
|||
echo "initializing Odoo12 ... " |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
echo "installing odoo12..." |
|||
apt-get update && apt-get dist-upgrade -y |
|||
|
|||
if dpkg -l | grep -s wkhtmltox | grep -qs $WKHTML_VERSION; then |
|||
echo package=wkhtmltox version=$WKHTML_RELEASE already found! |
|||
else |
|||
echo "wkhtmltox version=$WKHTML_RELEASE has to be installed!" |
|||
wget https://github.com/wkhtmltopdf/packaging/releases/download/$WKHTML_VERSION/wkhtmltox_$WKHTML_RELEASE.deb |
|||
dpkg -i wkhtmltox_$WKHTML_RELEASE.deb || (apt -fy install && rm wkhtmltox_$WKHTML_RELEASE.deb) |
|||
fi |
|||
|
|||
if [[ ! -d /opt/Python-3.7.13 ]]; then |
|||
echo "installing Python-3.7.13..." |
|||
apt-get install -y zlib1g-dev python3-pip python3-venv xz-utils build-essential libssl-dev python3-dev libxml2-dev libxslt1-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev postgresql-client |
|||
wget -O- https://www.python.org/ftp/python/3.7.13/Python-3.7.13.tar.xz | tar -xJ -C /opt |
|||
chown -R root:root /opt/Python-3.7.13 |
|||
cd /opt/Python-3.7.13 |
|||
./configure --enable-optimizations --enable-shared |
|||
make -j8 altinstall |
|||
ldconfig /opt/Python3.7.13 |
|||
echo "Python-3.7.13...OK" |
|||
else |
|||
echo "Python-3.7.13 already installed!" |
|||
fi |
|||
|
|||
if ! id -u odoo &>/dev/null; then |
|||
echo "creating system user <odoo>" |
|||
useradd -rms /bin/bash odoo |
|||
else |
|||
echo "user <odoo> already exists!" |
|||
fi |
|||
|
|||
cat <<EOT | su - odoo |
|||
if [[ ! -d odoo12 ]]; then |
|||
echo "git odoo12 from remote" |
|||
git clone https://github.com/odoo/odoo.git --depth 1 --branch 12.0 odoo12 |
|||
else |
|||
echo "git odoo12 already downloaded!" |
|||
fi |
|||
if [[ ! -d venv ]]; then |
|||
echo "installing Python-3.7 virtual env (venv)" |
|||
python3.7 -m venv venv |
|||
source venv/bin/activate |
|||
python -m pip install --upgrade pip |
|||
pip install wheel |
|||
pip install -r odoo12/requirements.txt |
|||
else |
|||
echo "venv (Python-3.7) already installed!" |
|||
fi |
|||
|
|||
echo "community-color change to $BACKGROUND_COLOR" |
|||
/opt/debian-bash/tools/append_or_replace "^.*o-community-color:.*" "\\\\\\\$o-community-color: $BACKGROUND_COLOR;" /home/odoo/odoo12/addons/web/static/src/scss/primary_variables.scss |
|||
|
|||
EOT |
|||
mkdir -p /etc/odoo12 |
|||
EOF |
|||
|
|||
lxc file push "$MIAOU_BASEDIR/templates/apps/odoo12/odoo12-addon-install" "$CONTAINER/home/odoo/odoo12/odoo12-addon-install" |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
chown odoo:odoo /home/odoo/odoo12/odoo12-addon-install |
|||
chmod 740 /home/odoo/odoo12/odoo12-addon-install |
|||
echo "new script <odoo12-addon-install> added!" |
|||
EOF |
|||
|
|||
echo "push various target-related favicons..." |
|||
for favicon in "$MIAOU_BASEDIR"/templates/apps/odoo12/favicon/*.ico; do |
|||
lxc file push --uid 0 --gid 0 "$favicon" "$CONTAINER/home/odoo/odoo12/addons/web/static/src/img/" |
|||
done |
|||
echo "OK" |
|||
|
|||
echo "adjust symbolic link according to target=<$TARGET>" |
|||
lxc exec "$CONTAINER" -- rm -f /home/odoo/odoo12/addons/web/static/src/img/favicon.ico |
|||
lxc exec "$CONTAINER" -- ln -s /home/odoo/odoo12/addons/web/static/src/img/favicon-"$TARGET".ico /home/odoo/odoo12/addons/web/static/src/img/favicon.ico |
|||
echo "OK" |
|||
} |
|||
|
|||
function compute_bgcolor_target() { |
|||
case "$TARGET" in |
|||
dev) echo "#17a2b8" ;; |
|||
beta) echo "#79A70A" ;; |
|||
prod) echo "#7C7BAD" ;; |
|||
*) echoerr "unknown target <$TARGET>" && exit 10 ;; |
|||
esac |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
TARGET=$(yq '.target' "$EXPANDED_CONF") |
|||
readonly TARGET |
|||
BACKGROUND_COLOR=$(compute_bgcolor_target) |
|||
readonly BACKGROUND_COLOR |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,196 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_database_exists() { |
|||
db-psql list | grep -q "$longname" |
|||
} |
|||
|
|||
function check_port_used() { |
|||
# shellcheck disable=SC2034 |
|||
usedport=$(lxc exec "$container" -- bash -c "grep xmlrpc_port /etc/odoo15/$shortname.conf | cut -d' ' -f3") |
|||
[[ "$usedport" == "$port" ]] |
|||
} |
|||
|
|||
function check_service_running() { |
|||
lxc exec "$container" -- bash -c "systemctl is-active --quiet ${longname}.service" |
|||
} |
|||
|
|||
function _read() { |
|||
disable_trace |
|||
check_database_exists |
|||
check_container "$container" |
|||
check_port_used |
|||
check_service_running |
|||
enable_trace |
|||
return 0 |
|||
} |
|||
|
|||
function _create() { |
|||
|
|||
echo "creating templates ... " |
|||
mkdir -p "$MIAOU_CONFIGDIR/apps/odoo15" |
|||
|
|||
longport=$((port + 1000)) |
|||
APP_PORT=$port LONG_PORT=$longport APP_NAME=$shortname tera -e -t "$MIAOU_DIR/templates/apps/odoo15/odoo.conf.j2" -o "$MIAOU_CONFIGDIR/apps/odoo15/$shortname.conf" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" >/dev/null |
|||
APP_NAME=$shortname tera -t "$MIAOU_DIR/templates/apps/odoo15/odoo.service.j2" --env-only -o "$MIAOU_CONFIGDIR/apps/odoo15/$longname.service" >/dev/null |
|||
echo "creating templates ... OK" |
|||
|
|||
echo "copying files over container <$container> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/odoo15/$shortname.conf" "$container/etc/odoo15/$shortname.conf" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/odoo15/$longname.service" "$container/etc/systemd/system/$longname.service" |
|||
echo "copying files over container <$container> ... OK" |
|||
|
|||
echo "create data folder for $shortname" |
|||
cat <<EOF | lxc_exec "$container" |
|||
set -Eeuo pipefail |
|||
|
|||
echo allow addons writable for user odoo |
|||
mkdir -p /home/odoo/data-$shortname/addons/15.0 |
|||
chmod u+w /home/odoo/data-$shortname/addons/15.0 |
|||
chown -R odoo:odoo /home/odoo/data-$shortname |
|||
EOF |
|||
|
|||
if ! (db-psql list | grep -q "$longname"); then |
|||
echo "create empty database <$longname> ... " |
|||
db-psql create "$longname" |
|||
echo "create empty database <$longname> ... OK" |
|||
|
|||
credential_username=$(load_yaml_from_expanded credential.username) |
|||
credential_password=$(load_yaml_from_expanded credential.password) |
|||
|
|||
cat <<EOF | lxc_exec "$container" |
|||
set -Eeuo pipefail |
|||
|
|||
echo reloading systemd |
|||
systemctl daemon-reload |
|||
systemctl stop $longname |
|||
|
|||
echo initialize database... |
|||
su odoo -c "python3.9 /home/odoo/odoo15/odoo-bin -c /etc/odoo15/$shortname.conf -i base --without-demo=all --stop-after-init" |
|||
echo initialize database OK |
|||
|
|||
echo install default modules ... |
|||
su odoo -c "python3.9 /home/odoo/odoo15/odoo-bin -c /etc/odoo15/$shortname.conf -i account,contacts,l10n_fr,account,sale,point_of_sale -d $longname --worker=0 --stop-after-init" |
|||
echo default modules OK |
|||
|
|||
echo activate french language |
|||
su odoo -c "python3.9 /home/odoo/odoo15/odoo-bin -c /etc/odoo15/$shortname.conf --load-language fr_FR -d $longname --worker=0 --stop-after-init" |
|||
|
|||
echo change administrator password, default credential applied! |
|||
echo "UPDATE res_users SET login='$credential_username', password='$credential_password' WHERE id=2" | PGPASSWORD=$longname psql -U $longname -h ct1.lxd |
|||
|
|||
EOF |
|||
else |
|||
echo "database already exists!" |
|||
fi |
|||
|
|||
echo "initialize odoo $shortname $longname ..." |
|||
cat <<EOF | lxc_exec "$container" |
|||
set -Eeuo pipefail |
|||
|
|||
echo reloading systemd |
|||
systemctl daemon-reload |
|||
|
|||
if ! systemctl is-active --quiet $longname; then |
|||
echo start service $longname |
|||
systemctl enable $longname |
|||
systemctl start $longname |
|||
systemctl is-active --quiet $longname |
|||
else |
|||
echo service $longname already started! |
|||
fi |
|||
|
|||
EOF |
|||
echo "initialize odoo $shortname $longname ... OK" |
|||
} |
|||
|
|||
function _update() { |
|||
echo "update" |
|||
} |
|||
|
|||
function _delete() { |
|||
echo "delete" |
|||
} |
|||
|
|||
function usage() { |
|||
echo "Usage: $COMMAND_NAME -c|r|u|d --port PORT --container CONTAINER --name NAME" |
|||
exit 2 |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
# init_strict |
|||
|
|||
COMMAND_NAME=$(basename "$0") |
|||
|
|||
# read the options |
|||
|
|||
TEMP=$(getopt -n "$COMMAND_NAME" -o crud --long port:,container:,name:,fqdn: -- "$@") |
|||
# shellcheck disable=SC2181 |
|||
[[ "$?" -eq 0 ]] || usage |
|||
eval set -- "$TEMP" |
|||
|
|||
action="unset" |
|||
port="unset" |
|||
container="unset" |
|||
shortname="unset" |
|||
longname="unset" |
|||
|
|||
# extract options and their arguments into variables. |
|||
while true; do |
|||
case "$1" in |
|||
--port) |
|||
port=$2 |
|||
shift 2 |
|||
;; |
|||
--fqdn) |
|||
shift 2 |
|||
;; |
|||
--container) |
|||
container=$2 |
|||
shift 2 |
|||
;; |
|||
--name) |
|||
shortname=$2 |
|||
longname="odoo15-$shortname" |
|||
shift 2 |
|||
;; |
|||
-c) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_create" |
|||
shift 1 |
|||
;; |
|||
-r) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_read" |
|||
shift 1 |
|||
;; |
|||
-u) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_update" |
|||
shift 1 |
|||
;; |
|||
-d) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_delete" |
|||
shift 1 |
|||
;; |
|||
--) |
|||
shift |
|||
break |
|||
;; |
|||
*) |
|||
echo "Internal error!" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
[[ |
|||
"$action" != unset && |
|||
"$port" != unset && |
|||
"$container" != unset && |
|||
"$shortname" != unset ]] || usage |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
$action |
@ -0,0 +1,129 @@ |
|||
#!/bin/bash |
|||
|
|||
readonly EXPANDED_CONF="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
readonly ODOO15_DIR="/home/odoo/odoo15" |
|||
|
|||
readonly WKHTML_VERSION="0.12.6.1-3" |
|||
readonly WKHTML_RELEASE="$WKHTML_VERSION.bookworm_amd64" |
|||
|
|||
function check_user_odoo() { |
|||
(lxc exec "$CONTAINER" -- id odoo &>/dev/null) || return 12 |
|||
return 0 |
|||
} |
|||
|
|||
function check_target_bgcolor() { |
|||
(lxc exec "$CONTAINER" -- grep -Pq "^\\\$o-community-color: $BACKGROUND_COLOR" "$ODOO15_DIR/addons/web/static/src/legacy/scss/primary_variables.scss") || return 13 |
|||
return 0 |
|||
} |
|||
|
|||
function check_file_odoo-addon-install() { |
|||
(lxc exec "$CONTAINER" -- test -f /home/odoo/odoo15/odoo-addon-install) || return 23 |
|||
return 0 |
|||
} |
|||
|
|||
function check() { |
|||
PREFIX="recipe:odoo15:check" |
|||
check_user_odoo || return 21 |
|||
check_target_bgcolor || return 22 |
|||
check_file_odoo-addon-install || return 23 |
|||
|
|||
echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function install() { |
|||
PREFIX="recipe:odoo15:install" |
|||
: $PREFIX |
|||
launch_container "$CONTAINER" |
|||
echo "initializing Odoo15 ... " |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
cloud-init status --wait > /dev/null |
|||
|
|||
echo "installing odoo15..." |
|||
apt update && apt dist-upgrade -y |
|||
|
|||
echo "required packages" |
|||
apt install -y postgresql-client build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev |
|||
|
|||
if [[ ! -d /usr/local/share/python3.9 ]]; then |
|||
echo "install python-3.9.18" |
|||
cd /tmp |
|||
wget https://www.python.org/ftp/python/3.9.18/Python-3.9.18.tgz |
|||
tar -xf Python-3.9.18.tgz |
|||
mv Python-3.9.18 /usr/local/share/python3.9 |
|||
cd /usr/local/share/python3.9 |
|||
./configure --enable-optimizations --enable-shared |
|||
make -j \$(nproc) |
|||
make altinstall |
|||
ldconfig /usr/local/share/python3.9 |
|||
else |
|||
echo "python-3.9.18 already installed!" |
|||
fi |
|||
|
|||
if dpkg -l | grep -s wkhtmltox | grep -qs $WKHTML_VERSION; then |
|||
echo package=wkhtmltox version=$WKHTML_RELEASE already found! |
|||
else |
|||
echo "wkhtmltox version=$WKHTML_RELEASE has to be installed!" |
|||
wget https://github.com/wkhtmltopdf/packaging/releases/download/$WKHTML_VERSION/wkhtmltox_$WKHTML_RELEASE.deb |
|||
dpkg -i wkhtmltox_$WKHTML_RELEASE.deb || (apt -fy install && rm wkhtmltox_$WKHTML_RELEASE.deb) |
|||
fi |
|||
|
|||
if ! grep -q odoo /etc/passwd; then |
|||
echo "add user <odoo>" |
|||
useradd -ms /bin/bash odoo |
|||
else |
|||
echo "user <odoo> already set!" |
|||
fi |
|||
|
|||
echo "install odoo15 in odoo userspace" |
|||
cat <<EOT | su - odoo |
|||
set -Eeuo pipefail |
|||
|
|||
if [[ ! -d odoo15 ]]; then |
|||
echo "git odoo15 from remote" |
|||
git clone https://github.com/odoo/odoo.git --depth 1 --branch 15.0 odoo15 |
|||
export MAKEFLAGS="-j\$(nproc)" |
|||
pip3.9 install --upgrade pip |
|||
pip3.9 install wheel pypdf2 slugify |
|||
pip3.9 install -r odoo15/requirements.txt |
|||
else |
|||
echo "git odoo15 already downloaded!" |
|||
fi |
|||
echo "community-color change to $BACKGROUND_COLOR" |
|||
/opt/debian-bash/tools/append_or_replace "^.*o-community-color:.*" "\\\\\\\$o-community-color: $BACKGROUND_COLOR !default;" /home/odoo/odoo15/addons/web/static/src/legacy/scss/primary_variables.scss |
|||
|
|||
EOT |
|||
mkdir -p /etc/odoo15 |
|||
EOF |
|||
lxc file push "$MIAOU_BASEDIR/templates/apps/odoo15/odoo-addon-install" "$CONTAINER/home/odoo/odoo15/odoo-addon-install" |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
chown odoo:odoo /home/odoo/odoo15/odoo-addon-install |
|||
chmod 740 /home/odoo/odoo15/odoo-addon-install |
|||
echo "new script <odoo-addon-install> added!" |
|||
EOF |
|||
} |
|||
|
|||
function compute_bgcolor_target() { |
|||
target=$(yq '.target' "$EXPANDED_CONF") |
|||
case "$target" in |
|||
dev) builtin echo "#17a2b8" ;; |
|||
beta) builtin echo "#79A70A" ;; |
|||
prod) builtin echo "#7C7BAD" ;; |
|||
*) echoerr "unknown target <$target>" && exit 10 ;; |
|||
esac |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
BACKGROUND_COLOR=$(compute_bgcolor_target) |
|||
readonly BACKGROUND_COLOR |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,68 @@ |
|||
#!/bin/bash |
|||
|
|||
function check() { |
|||
PREFIX="recipe:postgresql:check" |
|||
container_running "$CONTAINER" || return 10 |
|||
echo "checking postgresql regarding access to the bridge subnet <$BRIDGE_SUBNET>..." |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
systemctl is-active postgresql.service &>/dev/null |
|||
ss -tlnp | grep postgres | grep -q 0.0.0.0:5432 |
|||
PG_VERSION=\$(pg_lsclusters -h | cut -d' ' -f1) |
|||
grep -Eq "^host.*all.*all.*$BRIDGE_SUBNET.*md5" /etc/postgresql/\$PG_VERSION/main/pg_hba.conf |
|||
test -f /etc/default/autopostgresqlbackup |
|||
EOF |
|||
status="$?" |
|||
[[ $status -eq 0 ]] && echo "container <$CONTAINER> approved!" |
|||
return $status |
|||
} |
|||
|
|||
function install() { |
|||
PREFIX="recipe:postgresql:install" |
|||
: "$PREFIX" |
|||
|
|||
echowarn "initializing postgresql regarding access to the bridge subnet <$BRIDGE_SUBNET>..." |
|||
|
|||
launch_container "$CONTAINER" |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
|
|||
apt update |
|||
. /opt/debian-bash/lib/functions.sh |
|||
/opt/debian-bash/tools/idem_apt_install postgresql |
|||
|
|||
echo -n "start postgresql now..." |
|||
PG_VERSION=\$(pg_lsclusters -h | cut -d' ' -f1) |
|||
pg_ctlcluster \$PG_VERSION main start |
|||
echo "OK" |
|||
|
|||
function systemctl-exists() ([ \$(systemctl list-unit-files "\${1}*" | wc -l) -gt 3 ]) |
|||
systemctl-exists exim4.service && systemctl disable exim4.service |
|||
|
|||
/opt/debian-bash/tools/append_or_replace "^listen_addresses = .*$" "listen_addresses = '0.0.0.0'" /etc/postgresql/\$PG_VERSION/main/postgresql.conf |
|||
/opt/debian-bash/tools/append_or_replace "^host.*all.*all.*$BRIDGE_SUBNET.*md5" "host\tall\t\tall\t\t$BRIDGE_SUBNET\t\tmd5" /etc/postgresql/\$PG_VERSION/main/pg_hba.conf |
|||
systemctl restart postgresql.service |
|||
EOF |
|||
|
|||
echo -n "copying <autopostgresqlbackup> files over container <$CONTAINER> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/autopostgresqlbackup/script" "$CONTAINER/usr/sbin/autopostgresqlbackup" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/autopostgresqlbackup/cron.daily" "$CONTAINER/etc/cron.daily/autopostgresqlbackup" |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/autopostgresqlbackup/default.conf" "$CONTAINER/etc/default/autopostgresqlbackup" |
|||
PREFIX="" echo OK |
|||
|
|||
} |
|||
|
|||
# MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
|
|||
CONTAINER="$1" |
|||
BRIDGE_SUBNET=$(lxc network get lxdbr0 ipv4.address) |
|||
readonly CONTAINER BRIDGE_SUBNET |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,202 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_database_exists() { |
|||
db-maria list | grep -q "$longname" |
|||
} |
|||
|
|||
function check_port_used() { |
|||
# shellcheck disable=SC2034 |
|||
usedport=$(lxc exec "$container" -- bash -c "grep listen /etc/nginx/sites-enabled/$longname.conf | cut -d' ' -f6") |
|||
[[ "$usedport" == "$port" ]] |
|||
} |
|||
|
|||
function check_service_running() { |
|||
lxc exec "$container" -- bash -c "systemctl is-active --quiet nginx.service" |
|||
} |
|||
|
|||
function _read() { |
|||
disable_trace |
|||
check_database_exists |
|||
check_container "$container" |
|||
check_port_used |
|||
check_service_running |
|||
enable_trace |
|||
return 0 |
|||
} |
|||
|
|||
function _create() { |
|||
|
|||
echo "creating wordpress instance for <$shortname> ... " |
|||
|
|||
mkdir -p "$MIAOU_CONFIGDIR/apps/wordpress" |
|||
APP_PORT=$port APP_NAME=$shortname tera -e --env-key env -t "$MIAOU_BASEDIR/templates/apps/wordpress/wp-host.j2" -o "$MIAOU_CONFIGDIR/apps/wordpress/$longname.conf" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
echo "creating templates ... OK" |
|||
|
|||
echo "copying files over container <$container> ... " |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_CONFIGDIR/apps/wordpress/$longname.conf" "$container/etc/nginx/sites-available/$longname.conf" |
|||
echo "copying files over container <$container> ... OK" |
|||
|
|||
if ! (db-maria list | grep -q "$longname"); then |
|||
echo "create empty database <$longname> ... " |
|||
db-maria create "$longname" |
|||
echo "create empty database <$longname> ... OK" |
|||
else |
|||
echo "database already exists!" |
|||
fi |
|||
|
|||
echo "initialize wordpress $shortname $longname ..." |
|||
lxc exec "$container" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
if [[ ! -d /var/www/wordpress/$shortname ]]; then |
|||
echo "installing new instance of wordpress into /var/www/wordpress/$shortname" |
|||
mkdir -p /tmp/$shortname |
|||
tar -xzf /var/www/wordpress-latest.tgz -C /tmp/$shortname |
|||
mv /tmp/$shortname/wordpress /var/www/wordpress/$shortname |
|||
else |
|||
echo "instance of wordpress /var/www/wordpress/$shortname already defined!" |
|||
fi |
|||
|
|||
if [[ ! -f /var/www/wordpress/$shortname/wp-config.php ]]; then |
|||
echo "create wp-config.php" |
|||
cat << 'EOT2' > /var/www/wordpress/$shortname/wp-config.php |
|||
<?php |
|||
|
|||
define( 'DB_NAME', '$longname' ); |
|||
define( 'DB_USER', '$longname' ); |
|||
define( 'DB_PASSWORD', '$longname' ); |
|||
define( 'DB_HOST', 'ct1.lxd' ); |
|||
|
|||
define( 'DB_CHARSET', 'utf8' ); |
|||
define( 'DB_COLLATE', '' ); |
|||
|
|||
define( 'AUTH_KEY', '$(genpasswd 20)' ); |
|||
define( 'SECURE_AUTH_KEY', '$(genpasswd 20)' ); |
|||
define( 'LOGGED_IN_KEY', '$(genpasswd 20)' ); |
|||
define( 'NONCE_KEY', '$(genpasswd 20)' ); |
|||
define( 'AUTH_SALT', '$(genpasswd 20)' ); |
|||
define( 'SECURE_AUTH_SALT', '$(genpasswd 20)' ); |
|||
define( 'LOGGED_IN_SALT', '$(genpasswd 20)' ); |
|||
define( 'NONCE_SALT', '$(genpasswd 20)' ); |
|||
|
|||
\$table_prefix = 'wp_'; |
|||
define( 'WP_DEBUG', false ); |
|||
|
|||
/* Add any custom values between this line and the "stop editing" line. */ |
|||
\$_SERVER['HTTPS'] = 'on'; |
|||
|
|||
/* That's all, stop editing! Happy publishing. */ |
|||
|
|||
/** Absolute path to the WordPress directory. */ |
|||
if ( ! defined( 'ABSPATH' ) ) { |
|||
define( 'ABSPATH', __DIR__ . '/' ); |
|||
} |
|||
|
|||
/** Sets up WordPress vars and included files. */ |
|||
require_once ABSPATH . 'wp-settings.php'; |
|||
EOT2 |
|||
|
|||
else |
|||
echo "wp-config.php already defined!" |
|||
fi |
|||
|
|||
chown -R www-data:www-data /var/www/wordpress/$shortname |
|||
|
|||
echo "enabling wordpress host into nginx" |
|||
mkdir -p /var/log/nginx/$shortname |
|||
ln -sf "/etc/nginx/sites-available/$longname.conf" "/etc/nginx/sites-enabled/$longname.conf" |
|||
|
|||
echo "reloading nginx" |
|||
nginx -tq && systemctl reload nginx |
|||
EOF |
|||
echo "initialize wordpress $shortname $longname ... OK" |
|||
} |
|||
|
|||
function _update() { |
|||
echo "update" |
|||
} |
|||
|
|||
function _delete() { |
|||
echo "delete" |
|||
} |
|||
|
|||
function usage() { |
|||
echo "Usage: $COMMAND_NAME -c|r|u|d --port PORT --container CONTAINER --name NAME" |
|||
exit 2 |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
# init_strict |
|||
|
|||
COMMAND_NAME=$(basename "$0") |
|||
|
|||
# read the options |
|||
|
|||
TEMP=$(getopt -n "$COMMAND_NAME" -o crud --long port:,container:,name:,fqdn: -- "$@") |
|||
# shellcheck disable=SC2181 |
|||
[[ "$?" -eq 0 ]] || usage |
|||
eval set -- "$TEMP" |
|||
|
|||
action="unset" |
|||
port="unset" |
|||
container="unset" |
|||
shortname="unset" |
|||
longname="unset" |
|||
|
|||
# extract options and their arguments into variables. |
|||
while true; do |
|||
case "$1" in |
|||
--port) |
|||
port=$2 |
|||
shift 2 |
|||
;; |
|||
--fqdn) |
|||
shift 2 |
|||
;; |
|||
--container) |
|||
container=$2 |
|||
shift 2 |
|||
;; |
|||
--name) |
|||
shortname=$2 |
|||
longname="wp-$shortname" |
|||
shift 2 |
|||
;; |
|||
-c) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_create" |
|||
shift 1 |
|||
;; |
|||
-r) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_read" |
|||
shift 1 |
|||
;; |
|||
-u) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_update" |
|||
shift 1 |
|||
;; |
|||
-d) |
|||
[[ "$action" == "unset" ]] || usage |
|||
action="_delete" |
|||
shift 1 |
|||
;; |
|||
--) |
|||
shift |
|||
break |
|||
;; |
|||
*) |
|||
echo "Internal error!" |
|||
exit 1 |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
[[ |
|||
"$action" != unset && |
|||
"$port" != unset && |
|||
"$container" != unset && |
|||
"$shortname" != unset ]] || usage |
|||
$action |
@ -0,0 +1,83 @@ |
|||
#!/bin/bash |
|||
|
|||
MANDATORY_PACKAGES_STRING="nginx php-cli php-fpm php-mysql php-curl php-xml php-imagick php-zip php-gd php-intl composer mariadb-client" |
|||
|
|||
### CHECK |
|||
|
|||
function check() { |
|||
PREFIX="recipe:wordpress:check" |
|||
check_mandatory_packages || return 21 |
|||
check_wordpress_tgz || return 22 |
|||
check_wp-tool || return 23 |
|||
check_wp-backup || return 24 |
|||
|
|||
echo "container <$CONTAINER> approved successfully!" |
|||
return 0 |
|||
} |
|||
|
|||
function check_mandatory_packages() { |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
mapfile -t PACKAGES <<< "$MANDATORY_PACKAGES_STRING" |
|||
for package in \${PACKAGES[@]}; do |
|||
dpkg -l "\$package" 2>/dev/null | grep -q ^ii |
|||
done |
|||
EOF |
|||
} |
|||
|
|||
function check_wp-tool() { |
|||
lxc exec "$CONTAINER" -- test -f /usr/local/sbin/wp-tool |
|||
} |
|||
|
|||
function check_wp-backup() { |
|||
lxc exec "$CONTAINER" -- test -f /usr/local/sbin/wp-backup |
|||
} |
|||
|
|||
function check_wordpress_tgz() { |
|||
lxc exec "$CONTAINER" -- test -f /var/www/wordpress-latest.tgz |
|||
} |
|||
|
|||
### INSTALL |
|||
|
|||
function install() { |
|||
PREFIX="recipe:wordpress:install" |
|||
: $PREFIX |
|||
launch_container "$CONTAINER" |
|||
echo "initializing Wordpress ... " |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
echo installing wordpress... |
|||
apt-get install -y $MANDATORY_PACKAGES_STRING |
|||
rm -f /etc/nginx/sites-enabled/default |
|||
rm -f /etc/nginx/sites-available/default |
|||
systemctl reload nginx |
|||
|
|||
/TOOLBOX/wget https://wordpress.org/latest.tar.gz -O /var/www/wordpress-latest.tgz |
|||
mkdir -p /var/www/wordpress |
|||
chown www-data:www-data /var/www/wordpress |
|||
EOF |
|||
echo "OK" |
|||
|
|||
echo -n "copying wp-tool to /usr/local/sbin/..." |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/wordpress/wp-tool" "$CONTAINER/usr/local/sbin/wp-tool" |
|||
lxc exec "$CONTAINER" -- chmod +x /usr/local/sbin/wp-tool |
|||
PREFIX="" echo "OK" |
|||
|
|||
echo -n "copying wp-backup to /usr/local/sbin/..." |
|||
lxc file push --uid 0 --gid 0 "$MIAOU_BASEDIR/templates/apps/wordpress/wp-backup" "$CONTAINER/usr/local/sbin/wp-backup" |
|||
lxc exec "$CONTAINER" -- chmod +x /usr/local/sbin/wp-backup |
|||
PREFIX="" echo "OK" |
|||
|
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
arg1_required "$@" |
|||
readonly CONTAINER="$1" |
|||
|
|||
check || ( |
|||
install |
|||
check |
|||
) |
@ -0,0 +1,221 @@ |
|||
#!/bin/bash |
|||
|
|||
DEFAULT_BACKUP_FOLDER="$HOME/RECOVERY_MARIADB" |
|||
|
|||
confirm() { |
|||
read -rp "$1 ([y]es or [N]o): " |
|||
case $(echo "$REPLY" | tr '[:upper:]' '[:lower:]') in |
|||
y | yes) echo "yes" ;; |
|||
*) echo "no" ;; |
|||
esac |
|||
} |
|||
|
|||
synopsis() { |
|||
echo "usage: " |
|||
printf "\t list | console | connections\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t use <DB_NAME>\n" |
|||
printf "\t create <DB_NAME> [PASSWORD]\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t backup <DB_NAME> [FOLDER]\n" |
|||
printf "\t restore <DB_NAME> <FILE>\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t rename <DB_NAME> <NEW_NAME>\n" |
|||
} |
|||
|
|||
list() { |
|||
lxc exec ct1 -- sh -c "echo \"SELECT schema_name FROM information_schema.schemata where schema_name not in ('information_schema','mariadb','mysql','performance_schema')\" | mariadb -u root --skip-column-names -r " |
|||
} |
|||
|
|||
console() { |
|||
if [[ -z $1 ]]; then |
|||
lxc exec ct1 -- mariadb -u root |
|||
else |
|||
lxc exec ct1 -- sh -c "echo \"$1\" | mariadb -u root" |
|||
fi |
|||
} |
|||
|
|||
connections() { |
|||
lxc exec ct1 -- sh -c "echo \"select id, user, host, db, command, time, state, info, progress from information_schema.processlist\" | mariadb -u root " |
|||
} |
|||
|
|||
use() { |
|||
lxc exec ct1 -- mariadb -u root "$DB_NAME" |
|||
} |
|||
|
|||
create() { |
|||
|
|||
# shellcheck disable=SC1091 |
|||
source /opt/debian-bash/lib/functions.sh |
|||
|
|||
# shellcheck disable=SC2034 |
|||
mapfile -t DBs < <(list) |
|||
|
|||
local NEW_DB="${1:-$DB_NAME}" |
|||
local NEW_PASSWORD="${2:-$NEW_DB}" |
|||
|
|||
if ! containsElement DBs "$NEW_DB"; then |
|||
lxc exec ct1 -- sh -c "echo \"\ |
|||
CREATE DATABASE \\\`$NEW_DB\\\`; \ |
|||
GRANT ALL ON \\\`$NEW_DB\\\`.* TO \\\`$NEW_DB\\\`@'%' IDENTIFIED BY '$NEW_PASSWORD'; \ |
|||
FLUSH PRIVILEGES; \ |
|||
\" | mariadb -u root" |
|||
else |
|||
echo "$NEW_DB already exists!" |
|||
fi |
|||
} |
|||
|
|||
backup() { |
|||
|
|||
if [[ ! -d "$FOLDER" ]]; then |
|||
echo "error: Folder required!" |
|||
file "$FOLDER" |
|||
exit 2 |
|||
fi |
|||
|
|||
mkdir -p "$FOLDER" |
|||
DATE=$(date '+%F') |
|||
ARCHIVE="$FOLDER"/$DB_NAME-$DATE.mariadb.gz |
|||
|
|||
if [[ -f $ARCHIVE ]]; then |
|||
VERSION_CONTROL=numbered mv -b "$ARCHIVE" "$FOLDER"/"$DB_NAME"-"$DATE"-daily.mariadb.gz |
|||
fi |
|||
|
|||
echo "backup $DB_NAME into $FOLDER" |
|||
mariadb-dump -h ct1.lxd -u "$DB_NAME" -p"$DB_NAME" "$DB_NAME" | gzip >"$ARCHIVE" |
|||
echo "archive file created: $ARCHIVE" |
|||
} |
|||
|
|||
restore() { |
|||
|
|||
echo "restore $DB_NAME $FILE" |
|||
if [[ ! -f "$FILE" ]]; then |
|||
echo "error: Backup file (*.mariadb.gz) required!" |
|||
file "$FILE" |
|||
exit 2 |
|||
fi |
|||
|
|||
PROCESSES=$(lxc exec ct1 -- sh -c "echo \"select id, user, host, db, command, time, state, info, progress from information_schema.processlist\" | mariadb -u root") |
|||
|
|||
set +e |
|||
PROCESS_COUNT=$(echo "$PROCESSES" | grep -c "$DB_NAME") |
|||
if [[ $PROCESS_COUNT -gt 0 ]]; then |
|||
echo "FAILURE: There are some connections to database, please consider stopping bound services" |
|||
echo |
|||
echo "$PROCESSES" |
|||
exit 2 |
|||
fi |
|||
set -e |
|||
|
|||
if [[ "yes" == $(confirm "RESTORATION will drop DATABASE, please acknowledge with care!!!") ]]; then |
|||
if list | grep -q "$DB_NAME"; then |
|||
echo "backup <$DB_NAME> for safety reason" |
|||
backup |
|||
echo "drop database <$DB_NAME>" |
|||
lxc exec ct1 -- sh -c "echo \"DROP DATABASE \\\`$DB_NAME\\\`\" | mariadb -u root" |
|||
fi |
|||
|
|||
echo "create <$DB_NAME>" |
|||
create |
|||
# lxc exec ct1 -- sh -c "CREATE DATABASE \\\`$DB_NAME\\\`\" | mariadb -u root" |
|||
gunzip -c "$FILE" | grep -av "^CREATE DATABASE" | grep -av "^USE" | mariadb -h ct1.lxd -u "$DB_NAME" -p"$DB_NAME" "$DB_NAME" |
|||
echo RESTORATION completed successfully |
|||
|
|||
else |
|||
exit 1 |
|||
fi |
|||
} |
|||
|
|||
rename() { |
|||
echo "rename $DB_NAME to $NEW_NAME" |
|||
local DB_NAME_FOUND=false |
|||
for database in $(list); do |
|||
if [[ $database == "$DB_NAME" ]]; then |
|||
DB_NAME_FOUND=true |
|||
fi |
|||
if [[ $database == "$NEW_NAME" ]]; then |
|||
echoerr "$NEW_NAME already exists! please provide another name instead of <$NEW_NAME> or run list command" |
|||
exit 20 |
|||
fi |
|||
done |
|||
if [[ ! $DB_NAME_FOUND ]]; then |
|||
echoerr "source <$DB_NAME> does not exist!" |
|||
exit 20 |
|||
fi |
|||
|
|||
if [[ "$DB_NAME" == "$NEW_NAME" ]]; then |
|||
echowarn "no need to rename; no change required <$DB_NAME>" |
|||
exit 0 |
|||
fi |
|||
|
|||
echo "create new database <$NEW_NAME>" |
|||
create "$NEW_NAME" |
|||
|
|||
for table in $(console "use '$DB_NAME'; show tables"); do |
|||
if [[ $table != "Tables_in_$DB_NAME" ]]; then |
|||
echo "renaming table \`$DB_NAME\`.$table to \`$NEW_NAME\`.$table" |
|||
console "use '$DB_NAME'; rename table \\\`$DB_NAME\\\`.$table to \\\`$NEW_NAME\\\`.$table;" |
|||
fi |
|||
done |
|||
|
|||
echo "every table has been renamed, so remove old database <$DB_NAME>" |
|||
console "drop user \\\`$DB_NAME\\\`" |
|||
console "drop database \\\`$DB_NAME\\\`" |
|||
} |
|||
|
|||
# MAIN |
|||
set -Eeuo pipefail |
|||
# shellcheck source=/dev/null |
|||
. "$MIAOU_BASEDIR/lib/functions.sh" |
|||
|
|||
[[ $# -lt 1 ]] && synopsis && exit 1 |
|||
ACTION=$1 |
|||
|
|||
case $ACTION in |
|||
console) |
|||
shift |
|||
TAIL=$* |
|||
console "$TAIL" |
|||
;; |
|||
list) |
|||
list |
|||
;; |
|||
connections) |
|||
connections |
|||
;; |
|||
use) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
use |
|||
;; |
|||
create) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
DB_PASSWORD=${3:-$DB_NAME} |
|||
create |
|||
;; |
|||
backup) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
FOLDER=${3:-$DEFAULT_BACKUP_FOLDER} |
|||
backup |
|||
;; |
|||
restore) |
|||
[[ $# -lt 3 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
FILE=$3 |
|||
FOLDER=${4:-$DEFAULT_BACKUP_FOLDER} |
|||
DB_PASSWORD="$DB_NAME" |
|||
restore |
|||
;; |
|||
rename) |
|||
[[ $# -lt 3 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
NEW_NAME=$3 |
|||
rename |
|||
;; |
|||
*) |
|||
synopsis |
|||
exit 1 |
|||
;; |
|||
esac |
@ -0,0 +1,200 @@ |
|||
#!/bin/bash |
|||
|
|||
confirm() { |
|||
read -p "$1 ([y]es or [N]o): " |
|||
case $(echo $REPLY | tr '[A-Z]' '[a-z]') in |
|||
y | yes) echo "yes" ;; |
|||
*) echo "no" ;; |
|||
esac |
|||
} |
|||
|
|||
synopsis() { |
|||
echo "usage: " |
|||
printf "\t list | console | connections\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t use <DB_NAME>\n" |
|||
printf "\t lookup <DB_NAME> <TERM>\n" |
|||
printf "\t create <DB_NAME> [PASSWORD]\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t backup <DB_NAME> [FOLDER]\n" |
|||
printf "\t restore <DB_NAME> <FILE> [--yes]\n" |
|||
printf "\t ---------------------------\n" |
|||
printf "\t rename <DB_NAME> <NEW_NAME>\n" |
|||
} |
|||
|
|||
list() { |
|||
lxc exec ct1 -- su - postgres -c "psql -Atc \"SELECT datname FROM pg_database WHERE datistemplate=false AND datname<>'postgres';\"" |
|||
} |
|||
|
|||
console() { |
|||
if [[ -z $1 ]]; then |
|||
lxc exec ct1 -- su - postgres |
|||
else |
|||
lxc exec ct1 -- su - postgres -c "$1" |
|||
fi |
|||
} |
|||
|
|||
connections() { |
|||
PROCESSES=$(console "psql -c \"select pid as process_id, usename as username, datname as database_name, client_addr as client_address, application_name, backend_start, state, state_change from pg_stat_activity WHERE datname<>'postgres' ORDER BY datname, usename;\"") |
|||
printf "$PROCESSES\n" |
|||
} |
|||
|
|||
use() { |
|||
echo >&2 "about to connect to <${DB_NAME}> ..." |
|||
if [[ -z $1 ]]; then |
|||
lxc exec ct1 -- su - postgres -c "psql $DB_NAME" |
|||
else |
|||
local sql="psql -A -t $DB_NAME -c \\\"$1;\\\"" |
|||
local command="su - postgres -c \"$sql\"" |
|||
lxc exec ct1 -- sh -c "$command" |
|||
fi |
|||
} |
|||
|
|||
create() { |
|||
echo >&2 "about to create to <${DB_NAME}> ..." |
|||
source /opt/debian-bash/lib/functions.sh |
|||
local DBs=($(list)) |
|||
if ! $(containsElement DBs $DB_NAME); then |
|||
local SQL="CREATE USER \\\\\\\"$DB_NAME\\\\\\\" WITH PASSWORD '$DB_PASSWORD'" |
|||
local command="su - postgres sh -c \"psql -c \\\"$SQL\\\"\" && su - postgres sh -c \"createdb -O $DB_NAME $DB_NAME\" && echo CREATE DB" |
|||
# echo $command |
|||
lxc exec ct1 -- sh -c "$command" |
|||
else |
|||
echo $DB_NAME already exists! |
|||
fi |
|||
|
|||
} |
|||
|
|||
lookup() { |
|||
if [[ ${#TERM} -ge 4 ]]; then |
|||
echo >&2 "about to lookup term <${TERM}> over all tables of database <$DB_NAME> ..." |
|||
local command="pg_dump --data-only --inserts $DB_NAME 2>/dev/null | grep --color \"$TERM\"" |
|||
lxc exec ct1 -- su - postgres -c "$command" |
|||
else |
|||
echo "term <$TERM> should contain 4 chars minimum!" && exit 2 |
|||
fi |
|||
} |
|||
|
|||
backup() { |
|||
if [[ ! -d "$FOLDER" ]]; then |
|||
echo "error: Folder required!" |
|||
file $FOLDER |
|||
exit 2 |
|||
fi |
|||
DATE=$(date '+%F') |
|||
ARCHIVE="$FOLDER"/$DB_NAME-$DATE.postgres.gz |
|||
|
|||
if [[ -f $ARCHIVE ]]; then |
|||
VERSION_CONTROL=numbered mv -b $ARCHIVE "$FOLDER"/$DB_NAME-$DATE-daily.postgres.gz |
|||
fi |
|||
|
|||
echo "backup $DB_NAME $FOLDER" |
|||
PGPASSWORD=$DB_NAME pg_dump -U $DB_NAME $DB_NAME -h ct1.lxd | gzip >"$ARCHIVE" |
|||
echo "archive file created: $ARCHIVE" |
|||
} |
|||
|
|||
restore() { |
|||
echo "restore $DB_NAME $FILE" |
|||
if [[ ! -f "$FILE" ]]; then |
|||
echo "error: Backup file (*.postgres.gz) required!" |
|||
file $FILE |
|||
exit 2 |
|||
fi |
|||
PROCESSES=$(console "psql -c \"select pid as process_id, usename as username, datname as database_name, client_addr as client_address, application_name, backend_start, state, state_change from pg_stat_activity WHERE datname='$DB_NAME';\"") |
|||
PROCESS_COUNT=$(echo "$PROCESSES" | wc -l) |
|||
if [[ $PROCESS_COUNT -gt 3 ]]; then |
|||
echo "FAILURE: There are some connections to database, please consider stopping bound services" |
|||
echo |
|||
printf "$PROCESSES\n" |
|||
exit 2 |
|||
fi |
|||
|
|||
if [[ $YES == "true" || "yes" == $(confirm "RESTORATION will drop DATABASE, please acknowledge with care!!!") ]]; then |
|||
FOLDER="$HOME/RECOVERY_POSTGRES" |
|||
mkdir -p "$FOLDER" |
|||
backup |
|||
echo "backup successful, now drop and restore" |
|||
lxc exec ct1 -- su - postgres -c "dropdb $DB_NAME && createdb -O $DB_NAME $DB_NAME" |
|||
gunzip -c "$FILE" | grep -v "^CREATE DATABASE" | PGPASSWORD=$DB_NAME PGOPTIONS='--client-min-messages=warning' psql -X -q -1 -v ON_ERROR_STOP=1 --pset pager=off -U $DB_NAME -h ct1.lxd $DB_NAME 2>&1 >/dev/null |
|||
else |
|||
exit 1 |
|||
fi |
|||
} |
|||
|
|||
rename() { |
|||
echo "rename <$DB_NAME> to <$DB_NEW_NAME>" |
|||
mapfile -t LIST <<<"$(list)" |
|||
found=false |
|||
for db in "${LIST[@]}"; do |
|||
[[ "$db" == "$DB_NEW_NAME" ]] && echoerr "destination database <$DB_NEW_NAME> already exists! Please provide another name." && exit 11 |
|||
[[ "$db" == "$DB_NAME" ]] && found=true |
|||
done |
|||
$found || (echoerr "source database <$DB_NAME> not found!" && exit 12) |
|||
|
|||
console "psql -c \"ALTER DATABASE \\\"$DB_NAME\\\" RENAME TO \\\"$DB_NEW_NAME\\\" \"" |
|||
console "psql -c \"ALTER USER \\\"$DB_NAME\\\" RENAME TO \\\"$DB_NEW_NAME\\\" \"" |
|||
console "psql -c \"ALTER USER \\\"$DB_NEW_NAME\\\" PASSWORD '$DB_NEW_NAME' \"" |
|||
|
|||
} |
|||
|
|||
# MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
[[ $# -lt 1 ]] && synopsis && exit 1 |
|||
ACTION=$1 |
|||
|
|||
case $ACTION in |
|||
console) |
|||
shift |
|||
TAIL="$@" |
|||
console "$TAIL" |
|||
;; |
|||
list) |
|||
list |
|||
;; |
|||
connections) |
|||
connections |
|||
;; |
|||
use) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
shift 2 |
|||
TAIL="$@" |
|||
use "$TAIL" |
|||
;; |
|||
create) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
DB_PASSWORD=${3:-$DB_NAME} |
|||
create |
|||
;; |
|||
lookup) |
|||
[[ $# -lt 3 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
TERM=$3 |
|||
lookup |
|||
;; |
|||
backup) |
|||
[[ $# -lt 2 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
FOLDER=${3:-.} |
|||
backup |
|||
;; |
|||
restore) |
|||
[[ $# -lt 3 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
FILE=$3 |
|||
YES=true |
|||
restore |
|||
;; |
|||
rename) |
|||
[[ $# -lt 3 ]] && synopsis && exit 1 |
|||
DB_NAME=$2 |
|||
DB_NEW_NAME=$3 |
|||
rename |
|||
;; |
|||
*) |
|||
synopsis |
|||
exit 1 |
|||
;; |
|||
esac |
@ -0,0 +1,180 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_container_missing() { |
|||
if container_exists "$CONTAINER"; then |
|||
echoerr "$CONTAINER already created!" |
|||
exit 1 |
|||
fi |
|||
} |
|||
|
|||
function usage() { |
|||
echo 'USAGE with options:' |
|||
echo -e "\t\tlxc-miaou-create <CONTAINER_NAME> -o sameuser[,nesting,ssh]" |
|||
} |
|||
|
|||
function check() { |
|||
check_container_missing || return 1 |
|||
return 0 |
|||
} |
|||
|
|||
function set_options() { |
|||
declare -a options=("$@") |
|||
length=${#options[@]} |
|||
if [[ "$length" -ne 0 ]]; then |
|||
if [[ "$length" -ne 2 ]]; then |
|||
echoerr "unrecognized options: $@" && usage && exit 30 |
|||
else |
|||
prefix="${options[0]}" |
|||
option="${options[1]}" |
|||
if [[ "$prefix" == '-o' ]]; then |
|||
IFS=',' read -r -a options <<<"$option" |
|||
for i in ${options[@]}; do |
|||
case "$i" in |
|||
sameuser) OPTION_SAMEUSER=true ;; |
|||
nesting) OPTION_NESTING=true ;; |
|||
ssh) OPTION_SSH=true ;; |
|||
*) echoerr "unrecognized options: $@" && usage && exit 32 ;; |
|||
esac |
|||
done |
|||
# echo "OPTION_SAMEUSER=$OPTION_SAMEUSER, OPTION_NESTING=$OPTION_NESTING, OPTION_SSH=$OPTION_SSH" |
|||
else |
|||
echoerr "unrecognized options prefix: $prefix" && usage && exit 31 |
|||
fi |
|||
fi |
|||
shift |
|||
fi |
|||
} |
|||
|
|||
function create() { |
|||
local PREFIX="miaou:create" |
|||
|
|||
if [[ "$OPTION_SAMEUSER" == true ]]; then |
|||
miaou_user=$(whoami) |
|||
fi |
|||
|
|||
echo -n "creating new container <$CONTAINER> based on image <$CONTAINER_RELEASE>... " |
|||
bridge_gw=$(lxc network get lxdbr0 ipv4.address | cut -d'/' -f1) |
|||
user_data="$( |
|||
cat <<EOF |
|||
#cloud-config |
|||
timezone: 'Indian/Reunion' |
|||
apt: |
|||
preserve_sources_list: false |
|||
conf: | |
|||
Acquire::Retries "60"; |
|||
DPkg::Lock::Timeout "60"; |
|||
primary: |
|||
- arches: [default] |
|||
uri: http://debian.mithril.re/debian |
|||
security: |
|||
- arches: [default] |
|||
uri: http://debian.mithril.re/debian-security |
|||
sources_list: | |
|||
# generated by miaou-cloud |
|||
deb \$PRIMARY \$RELEASE main |
|||
deb \$PRIMARY \$RELEASE-updates main |
|||
deb \$SECURITY \$RELEASE-security main |
|||
package_update: true |
|||
package_upgrade: true |
|||
package_reboot_if_required: true |
|||
packages: |
|||
- git |
|||
- file |
|||
- bc |
|||
- bash-completion |
|||
write_files: |
|||
- path: /etc/sudoers.d/10-add_TOOLBOX_to_secure_path |
|||
content: > |
|||
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/TOOLBOX" |
|||
runcmd: |
|||
- [ systemctl, mask, systemd-hostnamed.service ] |
|||
- [ systemctl, disable, e2scrub_reap.service ] |
|||
- [ systemctl, disable, systemd-resolved.service, --now ] |
|||
- [ systemctl, reset-failed ] |
|||
- [ rm, /etc/resolv.conf] |
|||
- [ rm, /etc/sudoers.d/90-cloud-init-users] |
|||
- "echo nameserver $bridge_gw > /etc/resolv.conf" |
|||
final_message: "Container from datasource \$datasource is finally up, after \$UPTIME seconds" |
|||
EOF |
|||
)" |
|||
lxc init images:debian/$CONTAINER_RELEASE/cloud "$CONTAINER" --config user.user-data="$user_data" -q |
|||
|
|||
# allow directory `SHARED` to be read-write mounted |
|||
lxc config set "$CONTAINER" raw.idmap "both $(id -u) 0" -q |
|||
mkdir -p "$HOME/LXD/SHARED/$CONTAINER" |
|||
|
|||
lxc config device add "$CONTAINER" SHARED disk source="$HOME/LXD/SHARED/$CONTAINER" path=/mnt/SHARED -q |
|||
lxc config device add "$CONTAINER" TOOLBOX disk source=/TOOLBOX path=/TOOLBOX -q |
|||
lxc config device add "$CONTAINER" DEBIAN_BASH disk source=$(realpath /opt/debian-bash) path=/opt/debian-bash -q |
|||
lxc config set "$CONTAINER" environment.PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/debian-bash/tools:/TOOLBOX -q |
|||
|
|||
if [[ "$OPTION_NESTING" == true ]]; then |
|||
lxc config set $CONTAINER security.nesting true -q |
|||
lxc config device add "$CONTAINER" miaou disk source=/opt/miaou path=/opt/miaou -q |
|||
fi |
|||
|
|||
lxc start "$CONTAINER" -q |
|||
|
|||
# initializing debian-bash |
|||
lxc exec "$CONTAINER" -- /opt/debian-bash/init.sh |
|||
|
|||
# default configuration files (btm,) |
|||
lxc exec "$CONTAINER" -- mkdir -p /root/.config/bottom |
|||
lxc file push "$MIAOU_BASEDIR/templates/bottom/bottom.toml" "$CONTAINER/root/.config/bottom/bottom.toml" -q |
|||
|
|||
# purge cloud-init after success |
|||
lxc exec "$CONTAINER" -- systemd-run -q -p After=cloud-final.service -p Type=oneshot --no-block bash -c '\ |
|||
cloud-init status --wait &&\ |
|||
cp /var/lib/cloud/data/status.json /root/cloud-status.json &&\ |
|||
systemctl stop cloud-{config,final,init-local,init}.service &&\ |
|||
systemctl disable cloud-{config,final,init-local,init}.service &&\ |
|||
systemctl stop cloud-config.target cloud-init.target &&\ |
|||
apt-get purge -y cloud-init &&\ |
|||
rm -rf /var/lib/cloud && \ |
|||
userdel -rf debian \ |
|||
' |
|||
|
|||
if [[ "$OPTION_SAMEUSER" == true ]]; then |
|||
if ! lxc exec "$CONTAINER" -- grep "$miaou_user" /etc/passwd; then |
|||
lxc exec "$CONTAINER" -- useradd -ms /bin/bash -G sudo "$miaou_user" |
|||
fi |
|||
if ! lxc exec "$CONTAINER" -- passwd -S "$miaou_user" | cut -d ' ' -f2 | grep -q ^P; then |
|||
shadow_passwd=$(load_yaml_from_expanded credential.shadow) |
|||
shadow_remainder=$(lxc exec "$CONTAINER" -- bash -c "grep $miaou_user /etc/shadow | cut -d':' -f3-") |
|||
lxc exec "$CONTAINER" -- /opt/debian-bash/tools/append_or_replace "^$miaou_user:.*:" "$miaou_user:$shadow_passwd:$shadow_remainder" /etc/shadow >/dev/null |
|||
fi |
|||
fi |
|||
|
|||
if [[ "$OPTION_SSH" == true ]]; then |
|||
lxc exec "$CONTAINER" -- /opt/debian-bash/tools/idem_apt_install openssh-server |
|||
fi |
|||
|
|||
if [[ "$OPTION_SSH" == true && "$OPTION_SAMEUSER" == true ]]; then |
|||
lxc-miaou-enable-ssh "$CONTAINER" |
|||
fi |
|||
|
|||
PREFIX="" echoinfo OK |
|||
|
|||
echo "hint: \`lxc login $CONTAINER [--env user=<USER>]\`" |
|||
[[ "$OPTION_SAMEUSER" == true ]] && echo "hint: \`lxc sameuser $CONTAINER\`" |
|||
|
|||
true |
|||
} |
|||
|
|||
## MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
OPTION_SAMEUSER=false |
|||
OPTION_NESTING=false |
|||
OPTION_SSH=false |
|||
PREFIX="miaou" |
|||
|
|||
arg1_required "$@" || (usage && exit 1) |
|||
readonly CONTAINER=$1 |
|||
readonly CONTAINER_RELEASE="bookworm" |
|||
|
|||
shift |
|||
set_options "$@" |
|||
readonly FULL_OPTIONS="$@" |
|||
|
|||
check |
|||
create |
@ -0,0 +1,88 @@ |
|||
#!/bin/bash |
|||
|
|||
function check_container_exists() { |
|||
if ! container_exists "$CONTAINER"; then |
|||
echoerr "container <$CONTAINER> does not exist!" |
|||
exit 1 |
|||
fi |
|||
} |
|||
|
|||
function check() { |
|||
check_container_exists || return 1 |
|||
return 0 |
|||
} |
|||
|
|||
function enable_ssh() { |
|||
echo "lxc: enable ssh in container <$CONTAINER> for user <$SSH_USER>" |
|||
|
|||
if ! container_running "$CONTAINER"; then |
|||
echowarn "container <$CONTAINER> seems to be asleep, starting ..." |
|||
lxc start "$CONTAINER" |
|||
echowarn DONE |
|||
fi |
|||
|
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
if ! id "$SSH_USER" &>/dev/null; then |
|||
echo "adding new user <$SSH_USER>" |
|||
useradd -ms /bin/bash -G sudo "$SSH_USER" |
|||
else |
|||
echo "bash: $SSH_USER exists already!" |
|||
fi |
|||
EOF |
|||
|
|||
miaou_user=$(whoami) |
|||
shadow_passwd=$(load_yaml_from_expanded credential.shadow) |
|||
shadow_remainder=$(lxc exec "$CONTAINER" -- bash -c "grep $SSH_USER /etc/shadow | cut -d':' -f3-") |
|||
lxc exec "$CONTAINER" -- /opt/debian-bash/tools/append_or_replace "^$SSH_USER:.*:" "$SSH_USER:$shadow_passwd:$shadow_remainder" /etc/shadow >/dev/null |
|||
|
|||
lxc exec "$CONTAINER" -- /opt/debian-bash/tools/idem_apt_install openssh-server |
|||
previous_users=($( |
|||
lxc exec "$CONTAINER" -- bash <<EOF |
|||
set -Eeuo pipefail |
|||
if [[ -f /etc/ssh/sshd_config ]] && grep -q AllowUsers /etc/ssh/sshd_config ; then |
|||
cat /etc/ssh/sshd_config | grep AllowUsers | cut -d' ' -f 2- |
|||
fi |
|||
EOF |
|||
)) |
|||
|
|||
if containsElement previous_users "$SSH_USER"; then |
|||
echo "sshd_config: AllowUsers $SSH_USER already done!" |
|||
else |
|||
echo "previous_users ${previous_users[*]}" |
|||
previous_users+=("$SSH_USER") |
|||
echo -n "building template for sshd_config..." |
|||
USERS=${previous_users[*]} tera -e --env-key env -t "$MIAOU_BASEDIR/templates/dev-container-ssh/sshd_config.j2" -o "/tmp/sshd_config" "$MIAOU_CONFIGDIR/miaou.expanded.yaml" >/dev/null |
|||
echo 'OK' |
|||
echo -n "copying sshd_config over container <$CONTAINER> ... " |
|||
lxc file push --uid 0 --gid 0 "/tmp/sshd_config" "$CONTAINER/etc/ssh/sshd_config" &>/dev/null |
|||
echo 'OK' |
|||
lxc exec "$CONTAINER" -- systemctl reload sshd.service |
|||
fi |
|||
|
|||
lxc exec "$CONTAINER" -- mkdir -p "/home/$SSH_USER/.ssh" |
|||
lxc exec "$CONTAINER" -- chown "$SSH_USER:$SSH_USER" "/home/$SSH_USER/.ssh" |
|||
lxc exec "$CONTAINER" -- chmod 760 "/home/$SSH_USER/.ssh" |
|||
lxc file push --uid 0 --gid 0 "/home/$miaou_user/.ssh/id_rsa.pub" "$CONTAINER/home/$SSH_USER/.ssh/authorized_keys" &>/dev/null |
|||
lxc exec "$CONTAINER" -- chown "$SSH_USER:$SSH_USER" "/home/$SSH_USER/.ssh/authorized_keys" |
|||
lxc exec "$CONTAINER" -- chmod 600 "/home/$SSH_USER/.ssh/authorized_keys" |
|||
|
|||
echo "create symbolic link for curl from TOOLBOX as required for Codium remote-ssh" |
|||
lxc exec "$CONTAINER" -- ln -sf /TOOLBOX/curl /usr/bin/ |
|||
|
|||
echo "SUCCESS: container $CONTAINER listening on port 22" |
|||
} |
|||
|
|||
## MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
arg1_required "$@" |
|||
readonly CONTAINER=$1 |
|||
if [[ -z "${2:-}" ]]; then |
|||
readonly SSH_USER=$(id -un) |
|||
else |
|||
readonly SSH_USER="$2" |
|||
fi |
|||
|
|||
check |
|||
enable_ssh |
@ -0,0 +1,3 @@ |
|||
#!/bin/bash |
|||
|
|||
lxc list -c nDm -f compact status=running | tail -n+2 | sort -k2 -h -r |
@ -0,0 +1,3 @@ |
|||
#!/bin/bash |
|||
|
|||
lxc list -c nmD -f compact status=running | tail -n+2 | sort -k2 -h -r |
@ -0,0 +1,12 @@ |
|||
#!/bin/bash |
|||
|
|||
function restart_dnsmasq() { |
|||
echo -n "lxd: restart dnsmasq... " |
|||
lxc network get lxdbr0 raw.dnsmasq >/tmp/dnsmaq.conf |
|||
lxc network set lxdbr0 raw.dnsmasq - </tmp/dnsmaq.conf |
|||
echo "OK" |
|||
} |
|||
|
|||
## MAIN |
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
restart_dnsmasq |
@ -0,0 +1,479 @@ |
|||
#!/bin/bash |
|||
|
|||
usage() { |
|||
PREFIX="miaou:usage" echo '<init>' |
|||
exit 0 |
|||
} |
|||
|
|||
yqm() { |
|||
#read only |
|||
yq "$1" "$EXPANDED_CONF" |
|||
} |
|||
|
|||
yqmi() { |
|||
# for update |
|||
yq "$1" "$EXPANDED_CONF" -i |
|||
} |
|||
|
|||
yqmt() { |
|||
# tabular |
|||
yq "$1" "$EXPANDED_CONF" -o t |
|||
} |
|||
|
|||
compute_fqdn_middlepart() { |
|||
case "$1" in |
|||
prod) |
|||
local fqdn_middlepart="." |
|||
;; |
|||
beta) |
|||
local fqdn_middlepart=".beta." |
|||
;; |
|||
dev) |
|||
local fqdn_middlepart=".dev." |
|||
;; |
|||
*) |
|||
echowarn "unknown target <${target}>, please fix with correct value from {prod, beta, dev} and try again..." |
|||
exit 1 |
|||
;; |
|||
esac |
|||
builtin echo "$fqdn_middlepart" |
|||
} |
|||
|
|||
# archive_conf(FILE) |
|||
# save patch in archived folder of current file |
|||
function archive_conf() { |
|||
PREFIX="miaou:conf:archive" |
|||
|
|||
file="$1" |
|||
filename=$(basename "$file") |
|||
mkdir -p "$MIAOU_CONFIGDIR/archived/$filename" |
|||
previous="$MIAOU_CONFIGDIR/archived/$filename/previous" |
|||
|
|||
# shellcheck disable=SC2012 |
|||
latest_patch=$(ls -1tr "$MIAOU_CONFIGDIR/archived/$filename/" | tail -n1) |
|||
|
|||
if [[ -z "$latest_patch" ]]; then |
|||
echo -n "archiving first file <$file> ..." |
|||
cp "$file" "$previous" |
|||
PREFIX="" echoinfo OK |
|||
elif [[ "$file" -nt "$latest_patch" ]]; then |
|||
patchname="$MIAOU_CONFIGDIR/archived/$filename/$(date +%F_%T)" |
|||
if ! diff "$previous" "$file" >"$patchname"; then |
|||
echo -n "archiving patch <$patchname> ..." |
|||
cp "$file" "$previous" |
|||
PREFIX="" echoinfo OK |
|||
else |
|||
rm "$patchname" |
|||
fi |
|||
fi |
|||
} |
|||
|
|||
function archive_allconf() { |
|||
mkdir -p "$MIAOU_CONFIGDIR" |
|||
archive_conf "$CONF" |
|||
archive_conf "$DEFAULTS" |
|||
} |
|||
|
|||
function check_expand_conf() { |
|||
PREFIX="miaou:conf:check" |
|||
if ! "$FORCE" && [ -f "$EXPANDED_CONF" ] && [ "$EXPANDED_CONF" -nt "$CONF" ] && [ "$EXPANDED_CONF" -nt "$DEFAULTS" ]; then |
|||
echo "already expanded!" |
|||
return 1 |
|||
fi |
|||
} |
|||
|
|||
function expand_conf() { |
|||
PREFIX="miaou:conf" |
|||
|
|||
if [[ -f "$EXPANDED_CONF" ]]; then |
|||
current_target=$(grep -Es "^target:" /etc/miaou/defaults.yaml | cut -d ' ' -f2) |
|||
previous_target=$(grep -Es "^target:" "$EXPANDED_CONF" | cut -d ' ' -f2) |
|||
[[ "$current_target" != "$previous_target" ]] && echoerr "TARGET <$previous_target> mismatched <$current_target>" && exit 101 |
|||
fi |
|||
|
|||
# initialize expanded conf by merging default |
|||
# shellcheck disable=SC2016 |
|||
yq eval-all '. as $item ireduce ({}; . * $item )' "$CONF" "$DEFAULTS" >"$EXPANDED_CONF" |
|||
|
|||
# append unique container unless overridden |
|||
mapfile -t services_app_only < <(yqmt '.services.[].[] | has("container") | select ( . == false) | [(parent|key)+" " +key]') |
|||
|
|||
for i in "${services_app_only[@]}"; do |
|||
read -r -a item <<<"$i" |
|||
domain=${item[0]} |
|||
subdomain=${item[1]} |
|||
app=$(yqm ".services.\"$domain\".\"$subdomain\".app") |
|||
container=$(get_container_for_domain_subdomain_app "$domain" "$subdomain" "$app") |
|||
yqmi ".services.\"$domain\".\"$subdomain\".container=\"$container\"" |
|||
done |
|||
|
|||
# append enabled=true unless overridden |
|||
mapfile -t services_app_only < <(yqmt '.services.[].[] | has("enabled") | select ( . == false) | [(parent|key)+" " +key] | unique ') |
|||
# echo "found <${#services_app_only[@]}> enabled services" |
|||
for i in "${services_app_only[@]}"; do |
|||
read -r -a item <<<"$i" |
|||
domain=${item[0]} |
|||
subdomain=${item[1]} |
|||
yqmi ".services.\"$domain\".\"$subdomain\".enabled=true" |
|||
done |
|||
|
|||
# compute fqdn |
|||
target=$(yqm '.target') |
|||
fqdn_middlepart=$(compute_fqdn_middlepart "$target") |
|||
|
|||
# write fqdn_middlepart |
|||
yqmi ".expanded.fqdn_middlepart = \"$fqdn_middlepart\"" |
|||
|
|||
# add monitored.containers section |
|||
yqmi '.expanded.monitored.containers = ([ .services[] | to_entries | .[] | .value | select (.enabled == true ) | .container ] | unique)' |
|||
|
|||
# add monitored.hosts section |
|||
yqmi '.expanded.monitored.hosts = [( .services[][] | select (.enabled == true ) | {"domain": ( parent | key ), "subdomain": key, "fqdn": key + (parent | parent | parent | .expanded.fqdn_middlepart) + ( parent | key ), "container":.container, "port":.port, "app":.app })]' |
|||
|
|||
# add services section |
|||
if [[ ${#services_app_only[@]} -gt 0 ]]; then |
|||
yqmi '.expanded.services = [( .services[][] | select (.enabled == true ) | {"domain": ( parent | key ), "subdomain": key, "fqdn": key + (parent | parent | parent | .expanded.fqdn_middlepart) + ( parent | key ), "container":.container, "port":.port, "app":.app, "name": .name // ""})]' |
|||
else |
|||
yqmi '.expanded.services = []' |
|||
fi |
|||
|
|||
# add firewall section, bridge_subnet + mail_passthrough if any |
|||
bridge_subnet=$(lxc network get lxdbr0 ipv4.address) |
|||
yqmi ".firewall.bridge_subnet = \"$bridge_subnet\"" |
|||
|
|||
container_mail_passthrough=$(yqm ".firewall.container_mail_passthrough") |
|||
} |
|||
|
|||
function build_routes() { |
|||
PREFIX="miaou:routes" |
|||
|
|||
mapfile -t fqdns < <(yqm '.expanded.services[].fqdn') |
|||
echo "found <${#fqdns[@]}> fqdn" |
|||
raw_dnsmasq='' |
|||
for i in "${fqdns[@]}"; do |
|||
raw_dnsmasq+="address=/$i/$DMZ_IP\\n" |
|||
# append domains to conf |
|||
echo "re-routing any connection from <$i> to internal container <$DMZ_CONTAINER.lxd>" |
|||
done |
|||
|
|||
builtin echo -e "$raw_dnsmasq" | lxc network set $BRIDGE raw.dnsmasq - |
|||
} |
|||
|
|||
function build_dmz_reverseproxy() { |
|||
PREFIX="miaou:build:dmz" |
|||
echo -n "building configuration for nginx ... " |
|||
mkdir -p "$MIAOU_CONFIGDIR/nginx" |
|||
tera -t "$MIAOU_BASEDIR/templates/nginx/_default.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/nginx/_default" &>/dev/null |
|||
|
|||
tera -t "$MIAOU_BASEDIR/templates/nginx/hosts.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/nginx/hosts" &>/dev/null |
|||
PREFIX="" echo OK |
|||
|
|||
echo -n "pushing configuration to <$DMZ_CONTAINER> ... " |
|||
for f in "$MIAOU_CONFIGDIR"/nginx/*; do |
|||
lxc file push --uid=0 --gid=0 "$f" "$DMZ_CONTAINER/etc/nginx/sites-available/" &>/dev/null |
|||
done |
|||
PREFIX="" echo OK |
|||
|
|||
cat <<EOF | PREFIX="miaou:build:dmz" lxc_exec "$DMZ_CONTAINER" |
|||
cd /etc/nginx/sites-enabled/ |
|||
for i in ../sites-available/*; do |
|||
# echo dmz: enabling... \$i |
|||
ln -sf \$i |
|||
done |
|||
nginx -tq |
|||
systemctl restart nginx |
|||
EOF |
|||
echo "nginx reloaded successfully!" |
|||
} |
|||
|
|||
function monit_show() { |
|||
PREFIX="monit:show" |
|||
: $PREFIX |
|||
|
|||
readarray -t hosts < <(yqmt '.expanded.monitored.hosts[] | [ .container, .port, .fqdn, .app ]') |
|||
echo "=================" |
|||
echo "${#hosts[@]} available hosts" |
|||
echo "=================" |
|||
|
|||
for host in "${hosts[@]}"; do |
|||
read -r -a item <<<"$host" |
|||
container=${item[0]} |
|||
port=${item[1]} |
|||
fqdn=${item[2]} |
|||
app=${item[3]} |
|||
|
|||
[[ -n ${PREFIX:-} ]] && printf "${DARK}%25.25s${NC} " "${PREFIX}" |
|||
|
|||
if curl -m $MAX_WAIT -I -4so /dev/null "http://$container:$port"; then |
|||
builtin echo -ne "${GREEN}✔${NC}" |
|||
else |
|||
builtin echo -ne "${RED}✘${NC}" |
|||
fi |
|||
printf "\t%10.10s\thttps://%-40s\thttp://%s\n" "$app" "$fqdn" "$container:$port" |
|||
done |
|||
|
|||
} |
|||
|
|||
function build_monit() { |
|||
|
|||
# test whether monitored items actually run safely |
|||
PREFIX="monit:build" |
|||
echo -n "testing monitored hosts ..." |
|||
readarray -t hosts < <(yqmt '.expanded.monitored.hosts[] | [ .container, .port, .fqdn ]') |
|||
for host in "${hosts[@]}"; do |
|||
read -r -a item <<<"$host" |
|||
container=${item[0]} |
|||
port=${item[1]} |
|||
fqdn=${item[2]} |
|||
|
|||
if ! (lxc exec "$container" -- ss -tln | grep -q "\(0.0.0.0\|*\):$port"); then |
|||
echoerr |
|||
echoerr "no HTTP server responds on <$container.lxd:$port>" |
|||
echoerr "please review configuration <miaou.yaml> for fqdn: $fqdn" |
|||
exit 2 |
|||
fi |
|||
|
|||
if ! curl_check_unsecure "https://$fqdn"; then |
|||
echoerr |
|||
echoerr "DMZ does not seem to dispatch <https://$fqdn> please review DMZ Nginx proxy" |
|||
exit 3 |
|||
elif [[ "$target" != 'dev' ]] && ! curl_check "https://$fqdn"; then |
|||
PREFIX="" echo |
|||
echowarn "T=$target missing valid certificate for fqdn <https://$fqdn> please review DMZ certbot" |
|||
fi |
|||
|
|||
done |
|||
PREFIX="" echo OK |
|||
|
|||
# templates for monit |
|||
echo -n "copying templates for monit ..." |
|||
mkdir -p "$MIAOU_CONFIGDIR/monit" |
|||
tera -t "$MIAOU_BASEDIR/templates/monit/containers.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/monit/containers" >/dev/null |
|||
tera -t "$MIAOU_BASEDIR/templates/monit/hosts.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/monit/hosts" >/dev/null |
|||
PREFIX="" echo OK |
|||
} |
|||
|
|||
# count_service_for_container(container: string) |
|||
# returns how many services run inside container according to expanded conf |
|||
function count_service_for_container() { |
|||
container_mail_passthrough="$1" |
|||
count=$(yqm ".expanded.services.[] | select(.container == \"$container_mail_passthrough\") | .fqdn" | wc -l) |
|||
builtin echo "$count" |
|||
} |
|||
|
|||
function build_nftables() { |
|||
PREFIX="miaou:nftables:build" |
|||
mkdir -p "$MIAOU_CONFIGDIR/nftables.rules.d" |
|||
|
|||
container_mail_passthrough=$(yqm '.firewall.container_mail_passthrough') |
|||
if [[ "$container_mail_passthrough" != null ]]; then |
|||
ip_mail_passthrough=$(lxc list "$container_mail_passthrough" -c4 -f csv | grep eth0 | cut -d ' ' -f1) |
|||
[[ -z "$ip_mail_passthrough" ]] && echoerr "container <$container_mail_passthrough> passthrough unknown ip!" && exit 55 |
|||
echo "passthrough=$container_mail_passthrough/$ip_mail_passthrough" |
|||
|
|||
count=$(count_service_for_container "$container_mail_passthrough") |
|||
[[ $count == 0 ]] && echowarn "no service detected => no passthrough, no change!" |
|||
[[ $count -gt 1 ]] && echoerr "count <$count> services detected on container <$container_mail_passthrough>, please disable some and leave only one service for safety!!!" && exit 56 |
|||
|
|||
ip_mail_passthrough=$ip_mail_passthrough tera -e --env-key env -t "$MIAOU_BASEDIR/templates/nftables/lxd.table.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/nftables.rules.d/lxd.table" &>/dev/null |
|||
else |
|||
echo "no container passthrough" |
|||
tera -t "$MIAOU_BASEDIR/templates/nftables/lxd.table.j2" "$EXPANDED_CONF" -o "$MIAOU_CONFIGDIR/nftables.rules.d/lxd.table" &>/dev/null |
|||
fi |
|||
|
|||
if ! diff -q "$MIAOU_CONFIGDIR/nftables.rules.d/lxd.table" /etc/nftables.rules.d/lxd.table; then |
|||
sudo_required "reloading nftables" |
|||
echo -n "reloading nftables..." |
|||
sudo cp "$MIAOU_CONFIGDIR/nftables.rules.d/lxd.table" /etc/nftables.rules.d/lxd.table |
|||
sudo systemctl reload nftables |
|||
PREFIX="" echo OK |
|||
fi |
|||
} |
|||
|
|||
# check whether http server responds 200 OK, required <url>, ie: http://example.com:8001, https://example.com |
|||
function curl_check() { |
|||
arg1_required "$@" |
|||
# echo "curl $1" |
|||
curl -m $MAX_WAIT -sLI4 "$1" | grep -q "^HTTP.* 200" |
|||
} |
|||
|
|||
# check whether https server responds 200 OK, even unsecured certificate (auto-signed in mode DEV) |
|||
function curl_check_unsecure() { |
|||
arg1_required "$@" |
|||
curl -m $MAX_WAIT -skLI4 "$1" | grep -q "^HTTP.* 200" |
|||
} |
|||
|
|||
function get_dmz_ip() { |
|||
if ! container_running "$DMZ_CONTAINER"; then |
|||
echowarn "Container running dmz <$DMZ_CONTAINER> seems down" |
|||
echoerr "please \`lxc start $DMZ_CONTAINER\` or initialize first!" |
|||
exit 1 |
|||
fi |
|||
|
|||
dmz_ip=$(host "$DMZ_CONTAINER.lxd" | cut -d ' ' -f4) |
|||
if ! valid_ipv4 "$dmz_ip"; then |
|||
echowarn "dmz seems up but no valid ip <$dmz_ip> found!" |
|||
echoerr "please fix this networking issue, then retry..." |
|||
exit 1 |
|||
else |
|||
builtin echo "$dmz_ip" |
|||
fi |
|||
} |
|||
|
|||
function fetch_container_of_type() { |
|||
local type="$1" |
|||
readarray -t dmzs < <(yqm ".containers.[].[] | select(.==\"$type\") | parent | key") |
|||
case ${#dmzs[@]} in |
|||
0) : ;; |
|||
1) builtin echo "${dmzs[0]}" ;; |
|||
*) for d in "${dmzs[@]}"; do |
|||
builtin echo "$d" |
|||
done ;; |
|||
esac |
|||
} |
|||
|
|||
function get_container_for_domain_subdomain_app() { |
|||
local domain="$1" |
|||
local subdomain="$2" |
|||
local app="$3" |
|||
readarray -t containers < <(fetch_container_of_type "$app") |
|||
case ${#containers[@]} in |
|||
0) echoerr "no container of type <$app> found amongst containers for $subdomain.$domain\nHINT : Please, either :\n1. define at least one container for recipe <$app>\n2. remove all services related to recipe <$app>" && exit 1 ;; |
|||
1) builtin echo "${containers[0]}" ;; |
|||
*) |
|||
for d in "${containers[@]}"; do |
|||
echowarn "container of type $app found in <$d>" |
|||
done |
|||
echoerr "multiple containers (${#containers[@]}) provided same app <$app>, therefore container is mandatory alongside $subdomain.$domain" && exit 2 |
|||
;; |
|||
esac |
|||
} |
|||
|
|||
function get_unique_container_dmz() { |
|||
readarray -t containers < <(fetch_container_of_type "dmz") |
|||
case ${#containers[@]} in |
|||
0) echoerr "no container of type <dmz> found amongst containers" && exit 1 ;; |
|||
1) builtin echo "${containers[0]}" ;; |
|||
|
|||
*) |
|||
for d in "${containers[@]}"; do |
|||
echowarn "container of type dmz found in <$d>" |
|||
done |
|||
echoerr "multiple dmz (${#containers[@]}) are not allowed, please select only one " && exit 2 |
|||
;; |
|||
esac |
|||
} |
|||
|
|||
function prepare_dmz_container() { |
|||
"$MIAOU_BASEDIR"/recipes/dmz/install.sh "$DMZ_CONTAINER" |
|||
} |
|||
|
|||
function check_resolv_conf() { |
|||
local bridge_gw resolver |
|||
bridge_gw=$(lxc network get lxdbr0 ipv4.address | cut -d'/' -f1) |
|||
resolver=$(grep nameserver /etc/resolv.conf | head -n1 | cut -d ' ' -f2) |
|||
|
|||
PREFIX="resolver:check" echo "container resolver is <$resolver>" |
|||
PREFIX="resolver:check" echo "container bridge is <$bridge_gw>" |
|||
[[ "$bridge_gw" != "$resolver" ]] && return 21 |
|||
return 0 |
|||
} |
|||
|
|||
function prepare_containers() { |
|||
PREFIX="miaou:prepare" |
|||
readarray -t containers < <(yqmt ".containers.[] | [ key, .[] ] ") |
|||
for i in "${containers[@]}"; do |
|||
read -r -a item <<<"$i" |
|||
container=${item[0]} |
|||
for ((j = 1; j < ${#item[@]}; j++)); do |
|||
service="${item[$j]}" |
|||
recipe_install="$MIAOU_BASEDIR/recipes/$service/install.sh" |
|||
if [[ -f "$recipe_install" ]]; then |
|||
echo "install [$service] onto container <$container>" |
|||
"$recipe_install" "$container" |
|||
else |
|||
echoerr "FAILURE, for container <$container>, install recipe [$service] not found!" |
|||
echoerr "please review configuration, mismatch recipe name maybe?" |
|||
exit 50 |
|||
fi |
|||
done |
|||
done |
|||
} |
|||
|
|||
function build_services() { |
|||
PREFIX="miaou:build:services" |
|||
echo "building services..." |
|||
readarray -t services < <(yqmt '.expanded.services[] | [ .[] ]') |
|||
for i in "${services[@]}"; do |
|||
|
|||
read -r -a item <<<"$i" |
|||
fqdn=${item[2]} |
|||
container=${item[3]} |
|||
port=${item[4]} |
|||
app=${item[5]} |
|||
name=${item[6]:-} |
|||
|
|||
recipe="$MIAOU_BASEDIR/recipes/$app/crud.sh" |
|||
if [[ -f "$recipe" ]]; then |
|||
echo "read [$app:$name] onto container <$container>" |
|||
if ! "$recipe" -r --port "$port" --container "$container" --name "$name" --fqdn "$fqdn"; then |
|||
echoinfo "CREATE RECIPE" |
|||
"$recipe" -c --port "$port" --container "$container" --name "$name" --fqdn "$fqdn" |
|||
echoinfo "CREATE RECIPE: OK" |
|||
fi |
|||
else |
|||
echowarn "for container <$container>, crud recipe [$app] not found!" |
|||
fi |
|||
done |
|||
} |
|||
|
|||
### MAIN |
|||
|
|||
. "$MIAOU_BASEDIR/lib/init.sh" |
|||
|
|||
readonly CONF="/etc/miaou/miaou.yaml" |
|||
readonly DEFAULTS="/etc/miaou/defaults.yaml" |
|||
readonly EXPANDED_CONF="$MIAOU_CONFIGDIR/miaou.expanded.yaml" |
|||
readonly BRIDGE="lxdbr0" |
|||
readonly MAX_WAIT=3 # timeout in seconds |
|||
|
|||
# shellcheck disable=SC2034 |
|||
declare -a options=("$@") |
|||
|
|||
FORCE=false |
|||
if containsElement options "-f" || containsElement options "--force"; then |
|||
FORCE=true |
|||
fi |
|||
|
|||
if containsElement options "history"; then |
|||
echo "TODO: HISTORY" |
|||
exit 0 |
|||
fi |
|||
|
|||
if containsElement options "config"; then |
|||
editor /etc/miaou/miaou.yaml |
|||
if diff -q /etc/miaou/miaou.yaml $HOME/.config/miaou/archived/miaou.yaml/previous; then |
|||
exit 0 |
|||
fi |
|||
fi |
|||
|
|||
if check_expand_conf; then |
|||
archive_allconf |
|||
expand_conf |
|||
check_resolv_conf |
|||
build_nftables |
|||
prepare_containers |
|||
|
|||
DMZ_CONTAINER=$(get_unique_container_dmz) |
|||
readonly DMZ_CONTAINER |
|||
build_services |
|||
|
|||
DMZ_IP=$(get_dmz_ip) |
|||
readonly DMZ_IP |
|||
build_dmz_reverseproxy |
|||
|
|||
build_routes |
|||
build_monit |
|||
|
|||
fi |
|||
monit_show |
@ -0,0 +1,80 @@ |
|||
#!/bin/bash |
|||
readonly DOMAIN=$1 |
|||
readonly PROTOCOL=${2:-https} |
|||
readonly TIMEOUT=10 # max seconds to wait |
|||
|
|||
result=0 |
|||
|
|||
function usage { |
|||
echo 'usage: <DOMAIN> [ https | 443 | smtps | 587 | pop3 | 993 | imap | 995 | ALL ]' |
|||
exit -1 |
|||
} |
|||
|
|||
function check_ssl { |
|||
local protocol=$1 |
|||
case $protocol in |
|||
SMTPS ) |
|||
local extra="-starttls smtp -showcerts" |
|||
;; |
|||
esac |
|||
|
|||
echo -n "$protocol " |
|||
|
|||
|
|||
certificate_info=$(echo | timeout $TIMEOUT openssl s_client $extra -connect $DOMAIN:$2 2>/dev/null) |
|||
|
|||
issuer=$(echo "$certificate_info" | openssl x509 -noout -text 2>/dev/null | grep Issuer: | cut -d: -f2) |
|||
date=$( echo "$certificate_info" | openssl x509 -noout -enddate 2>/dev/null | cut -d'=' -f2) |
|||
date_s=$(date -d "${date}" +%s) |
|||
now_s=$(date -d now +%s) |
|||
date_diff=$(( (date_s - now_s) / 86400 )) |
|||
|
|||
if [[ -z $date ]]; then |
|||
echo -n "does not respond " |
|||
echo -ne "\033[31;1m" |
|||
echo FAILURE |
|||
(( result += 1 )) |
|||
elif [[ $date_diff -gt 20 ]]; then |
|||
echo -n "issuer:$issuer " |
|||
echo -n "will expire in $date_diff days " |
|||
echo -ne "\033[32;1m" |
|||
echo ok |
|||
elif [[ $date_diff -gt 0 ]];then |
|||
echo -n "issuer:$issuer " |
|||
echo -n "will expire in $date_diff days " |
|||
echo -ne "\033[31;1m" |
|||
echo WARNING |
|||
(( result += 1 )) |
|||
else |
|||
echo -n "issuer:$issuer " |
|||
echo -n "has already expired $date_diff ago " |
|||
echo -ne "\033[31;1m" |
|||
echo FAILURE |
|||
(( result += 1 )) |
|||
fi |
|||
echo -ne "\033[0m" |
|||
} |
|||
|
|||
#MAIN |
|||
[[ -z "$DOMAIN" ]] && usage |
|||
case $PROTOCOL in |
|||
https | 443 ) |
|||
check_ssl HTTPS 443;; |
|||
smtps | 587 ) |
|||
check_ssl SMTPS 587;; |
|||
pop3 | 995 ) |
|||
check_ssl POP3 995;; |
|||
imap | 993 ) |
|||
check_ssl IMAP 993;; |
|||
all | ALL ) |
|||
check_ssl HTTPS 443 |
|||
check_ssl SMTPS 587 |
|||
check_ssl POP3 995 |
|||
check_ssl IMAP 993 |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
exit "$result" |
@ -0,0 +1,18 @@ |
|||
#!/bin/bash |
|||
|
|||
case $1 in |
|||
minute) ;; |
|||
daily) ;; |
|||
*) echo "expected [minute|daily]" && exit 1 ;; |
|||
esac |
|||
|
|||
SELECTOR=$1 |
|||
|
|||
for i in /var/www/cagettepei/*; do |
|||
if [[ -d $i ]]; then |
|||
cd "$i/www" || echo "Folder not found: $i/www" |
|||
echo "cron-$SELECTOR in: $i" |
|||
neko index.n cron/$SELECTOR |
|||
echo |
|||
fi |
|||
done |
@ -0,0 +1,8 @@ |
|||
Listen {{ env.APP_PORT }} |
|||
<VirtualHost *:{{ env.APP_PORT }}> |
|||
DirectoryIndex index.n |
|||
DocumentRoot /var/www/cagettepei/{{env.APP_NAME}}/www/ |
|||
|
|||
ErrorLog ${APACHE_LOG_DIR}/cagettepei/{{env.APP_NAME}}/debug.log |
|||
ErrorLogFormat "[%{uc}t] %M" |
|||
</VirtualHost> |
@ -0,0 +1,10 @@ |
|||
[Unit] |
|||
Description=Run batch cagettepei every day |
|||
|
|||
[Service] |
|||
User=www-data |
|||
SyslogIdentifier=cagettepei |
|||
ExecStart=/var/www/cagettepei/cagettepei-batch daily |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
@ -0,0 +1,10 @@ |
|||
[Unit] |
|||
Description=Timer for batch cagettepei every day |
|||
Requires=apache2.service |
|||
|
|||
[Timer] |
|||
OnCalendar=daily |
|||
Unit=cagettepei-batch-day.service |
|||
|
|||
[Install] |
|||
WantedBy=timers.target |
@ -0,0 +1,10 @@ |
|||
[Unit] |
|||
Description=Run batch cagettepei every minute |
|||
|
|||
[Service] |
|||
User=www-data |
|||
SyslogIdentifier=cagettepei |
|||
ExecStart=/var/www/cagettepei/cagettepei-batch minute |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
@ -0,0 +1,10 @@ |
|||
[Unit] |
|||
Description=Timer for batch cagettepei every minute |
|||
Requires=apache2.service |
|||
|
|||
[Timer] |
|||
OnCalendar=minutely |
|||
Unit=cagettepei-batch-minute.service |
|||
|
|||
[Install] |
|||
WantedBy=timers.target |
@ -0,0 +1,23 @@ |
|||
server { |
|||
listen {{ APP_PORT }} default_server; |
|||
|
|||
root /var/www/{{APP_NAME}}/htdocs; # Check this |
|||
error_log /var/log/nginx/{{APP_NAME}}/error.log; |
|||
|
|||
index index.php index.html index.htm; |
|||
charset utf-8; |
|||
|
|||
location / { |
|||
try_files $uri $uri/ /index.php; |
|||
} |
|||
|
|||
location ~ [^/]\.php(/|$) { |
|||
client_max_body_size 50M; |
|||
try_files $uri =404; |
|||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; |
|||
fastcgi_read_timeout 600; |
|||
include fastcgi_params; |
|||
fastcgi_pass unix:/var/run/php/php{{PHP_VERSION}}-fpm.sock; |
|||
} |
|||
|
|||
} |
@ -0,0 +1,17 @@ |
|||
[options] |
|||
data_dir = /home/odoo/data-{{ APP_NAME }} |
|||
|
|||
xmlrpc_port = {{ APP_PORT }} |
|||
longpolling_port = {{ LONG_PORT }} |
|||
|
|||
db_host = ct1.lxd |
|||
db_name = odoo12-{{ APP_NAME }} |
|||
db_user = odoo12-{{ APP_NAME }} |
|||
db_password = odoo12-{{ APP_NAME }} |
|||
list_db = {{ target != 'prod'}} |
|||
|
|||
workers = 2 |
|||
db_maxconn = 10 |
|||
db_filter = .* |
|||
syslog = True |
|||
proxy_mode = True |
@ -0,0 +1,14 @@ |
|||
[Unit] |
|||
Description=Odoo12 {{ APP_NAME }} |
|||
After=network.target |
|||
|
|||
[Service] |
|||
Type=simple |
|||
SyslogIdentifier=odoo12-{{ APP_NAME }} |
|||
PermissionsStartOnly=true |
|||
User=odoo |
|||
Group=odoo |
|||
ExecStart=/home/odoo/venv/bin/python3 /home/odoo/odoo12/odoo-bin -c /etc/odoo12/{{ APP_NAME }}.conf |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
@ -0,0 +1,31 @@ |
|||
#!/bin/bash |
|||
|
|||
CLIENT=$1 |
|||
ADDON=$2 |
|||
|
|||
function usage() { |
|||
echo 'usage: <CLIENT> <ADDON>' |
|||
exit 1 |
|||
} |
|||
|
|||
# VERIFICATION |
|||
|
|||
[[ -z "$CLIENT" || -z "$ADDON" ]] && usage |
|||
[[ ! -d "/home/odoo/data-${CLIENT}" ]] && echo "unknown CLIENT <${CLIENT}>, should exist in folder /home/odoo/data-..." && exit 2 |
|||
|
|||
URL="https://pypi.org/project/odoo12-addon-${ADDON}/" |
|||
curl --output /dev/null --silent --head --fail "${URL}" |
|||
[[ $? -ne 0 ]] && echo "unknown ADDON <${ADDON}>, should be downloadable from: ${URL}" && exit 3 |
|||
|
|||
[[ -d "/home/odoo/data-${CLIENT}/addons/12.0/${ADDON}" ]] && echo "ADDON <${ADDON}> already exists, consider removing manually!" && exit 4 |
|||
|
|||
# ACTION |
|||
|
|||
package=$(curl -Ls ${URL} | rg '<a href="(https://files.pythonhosted.org/.*)">' -r '$1') |
|||
wget $package -O /tmp/package.zip |
|||
rm /tmp/ADDON -rf && mkdir /tmp/ADDON |
|||
unzip /tmp/package.zip 'odoo/addons/*' -d /tmp/ADDON/ |
|||
chown -R odoo:odoo /tmp/ADDON/ |
|||
mv /tmp/ADDON/odoo/addons/* /home/odoo/data-${CLIENT}/addons/12.0/ |
|||
|
|||
echo "FORCE RELOADING ADDONS with: ./web?debug#menu_id=48&action=36" |
@ -0,0 +1,33 @@ |
|||
#!/bin/bash |
|||
|
|||
CLIENT=$1 |
|||
ADDON=$2 |
|||
|
|||
function usage() { |
|||
echo 'usage: <CLIENT> <ADDON>' |
|||
exit 100 |
|||
} |
|||
|
|||
# VERIFICATION |
|||
|
|||
[[ -z "$CLIENT" || -z "$ADDON" ]] && usage |
|||
[[ ! -d "/home/odoo/data-${CLIENT}" ]] && echo "unknown CLIENT <${CLIENT}>, should exist in folder /home/odoo/data-..." && exit 2 |
|||
|
|||
URL="https://pypi.org/project/odoo-addon-${ADDON}/" |
|||
curl --output /dev/null --silent --head --fail "${URL}" |
|||
[[ $? -ne 0 ]] && echo "unknown ADDON <${ADDON}>, should be downloadable from: ${URL}" && exit 3 |
|||
|
|||
[[ -d "/home/odoo/data-${CLIENT}/addons/15.0/${ADDON}" ]] && echo "ADDON <${ADDON}> already exists, consider removing manually!" && exit 4 |
|||
|
|||
# ACTION |
|||
|
|||
package=$(curl -Ls "$URL" | rg '<a href="(https://files.pythonhosted.org/.*)">' -r '$1') |
|||
wget $package -O /tmp/package.zip |
|||
rm /tmp/ADDON -rf && mkdir /tmp/ADDON |
|||
unzip /tmp/package.zip 'odoo/addons/*' -d /tmp/ADDON/ |
|||
real_name=$(unzip -l /tmp/package.zip | head -n4 | tail -n1 | cut -d'/' -f3) |
|||
chown -R odoo:odoo /tmp/ADDON/ |
|||
mv /tmp/ADDON/odoo/addons/* "/home/odoo/data-$CLIENT/addons/15.0/" |
|||
|
|||
# ADD |
|||
su odoo -c "python3.9 /home/odoo/odoo15/odoo-bin -c /etc/odoo15/$CLIENT.conf -i $real_name -d odoo15-$CLIENT --worker=0 --stop-after-init" |
@ -0,0 +1,17 @@ |
|||
[options] |
|||
data_dir = /home/odoo/data-{{ APP_NAME }} |
|||
|
|||
xmlrpc_port = {{ APP_PORT }} |
|||
longpolling_port = {{ LONG_PORT }} |
|||
|
|||
db_host = ct1.lxd |
|||
db_name = odoo15-{{ APP_NAME }} |
|||
db_user = odoo15-{{ APP_NAME }} |
|||
db_password = odoo15-{{ APP_NAME }} |
|||
list_db = {{ target != 'prod'}} |
|||
|
|||
workers = 2 |
|||
db_maxconn = 10 |
|||
db_filter = .* |
|||
syslog = True |
|||
proxy_mode = True |
@ -0,0 +1,14 @@ |
|||
[Unit] |
|||
Description=Odoo15 {{ APP_NAME }} |
|||
After=network.target |
|||
|
|||
[Service] |
|||
Type=simple |
|||
SyslogIdentifier=odoo15-{{ APP_NAME }} |
|||
PermissionsStartOnly=true |
|||
User=odoo |
|||
Group=odoo |
|||
ExecStart=python3.9 /home/odoo/odoo15/odoo-bin -c /etc/odoo15/{{ APP_NAME }}.conf |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
@ -0,0 +1,44 @@ |
|||
#!/bin/bash |
|||
|
|||
function detectWordpress() { |
|||
local result=$(pwd) |
|||
while [[ ! ("$result" == / || -f "$result/wp-config.php") ]]; do |
|||
result=$(dirname "$result") |
|||
done |
|||
|
|||
if [[ "$result" == / ]]; then |
|||
echo >&2 "no WORDPRESS detected from current folder <$(pwd)>!" |
|||
exit 100 |
|||
fi |
|||
|
|||
echo "$result" |
|||
} |
|||
|
|||
## MAIN |
|||
## ---- |
|||
|
|||
set -Eeuo pipefail |
|||
WP_BASE=$(detectWordpress) |
|||
WP_CONFIG="$WP_BASE/wp-config.php" |
|||
DB_HOST=$(grep DB_HOST $WP_CONFIG | cut -d"'" -f4) |
|||
DB_NAME=$(grep DB_NAME $WP_CONFIG | cut -d"'" -f4) |
|||
DB_USER=$(grep DB_USER $WP_CONFIG | cut -d"'" -f4) |
|||
DB_PASSWORD=$(grep DB_PASSWORD $WP_CONFIG | cut -d"'" -f4) |
|||
TODAY=$(date +%F) |
|||
BACKUP_DIR="/mnt/SHARED/wordpress-backup/$DB_NAME-$TODAY" |
|||
|
|||
[[ -d "$BACKUP_DIR" ]] && find "$BACKUP_DIR" -mindepth 1 -delete || mkdir -p "$BACKUP_DIR" |
|||
|
|||
echo -n "backing up database..." |
|||
mariadb-dump -h "$DB_HOST" -u "$DB_NAME" -p"$DB_PASSWORD" "$DB_NAME" | gzip >"$BACKUP_DIR/$DB_NAME".mariadb.gz |
|||
echo OK |
|||
|
|||
echo -n "compressing as tar.gz the wp-content folder ..." |
|||
tar -czvf "$BACKUP_DIR/wp-content.tgz" -C "$WP_BASE" wp-content |
|||
echo OK |
|||
|
|||
echo -n "copying wp-config.php file ..." |
|||
cp "$WP_BASE/wp-config.php" "$BACKUP_DIR" |
|||
echo OK |
|||
|
|||
echo "successful backup in $BACKUP_DIR, db + wp-content + wp-config" |
@ -0,0 +1,37 @@ |
|||
server { |
|||
listen {{ env.APP_PORT }} default_server; |
|||
|
|||
access_log /var/log/nginx/{{ env.APP_NAME }}/wp-access.log; |
|||
error_log /var/log/nginx/{{ env.APP_NAME }}/wp-error.log; |
|||
|
|||
client_max_body_size 50M; |
|||
root /var/www/wordpress/{{ env.APP_NAME }}; |
|||
index index.php index.html index.htm; |
|||
charset UTF-8; |
|||
|
|||
location / { |
|||
try_files $uri/ /index.php?$args; |
|||
} |
|||
|
|||
location ~ \.php$ { |
|||
try_files $uri =404; |
|||
fastcgi_split_path_info ^(.+\.php)(/.+)$; |
|||
fastcgi_pass unix:/run/php/php-fpm.sock; |
|||
fastcgi_index index.php; |
|||
include fastcgi.conf; |
|||
} |
|||
|
|||
location ~* \.(js|css|png|jpg|jpeg|svg|gif|ico|eot|otf|ttf|woff|woff2|mp3|wav|ogg)$ { |
|||
add_header Access-Control-Allow-Origin *; |
|||
access_log off; log_not_found off; expires 30d; |
|||
} |
|||
|
|||
# Mailpoet - tinyMCE quick fix |
|||
location ~ /wp-content/plugins/wysija-newsletters/js/tinymce/.*\.(htm|html)$ { |
|||
add_header Access-Control-Allow-Origin *; |
|||
access_log off; log_not_found off; expires 30d; |
|||
} |
|||
|
|||
location = /robots.txt { access_log off; log_not_found off; } |
|||
location ~ /\. { deny all; access_log off; log_not_found off; } |
|||
} |
@ -0,0 +1,176 @@ |
|||
#!/bin/bash |
|||
|
|||
### error_handling |
|||
|
|||
function trap_error() { |
|||
error_code=$1 |
|||
error_line=$2 |
|||
|
|||
if [[ ${error_code} -lt 100 ]]; then |
|||
printf "\nEXIT #${error_code} due to error at line ${error_line} : \n-----------------------------------------\n" |
|||
sed "${error_line}q;d" $0 |
|||
echo |
|||
fi |
|||
exit $error_code |
|||
} |
|||
set -e |
|||
trap 'trap_error $? $LINENO' ERR |
|||
|
|||
### ------------------ |
|||
|
|||
function detectWordpress() { |
|||
local result=$(pwd) |
|||
while [[ ! ("$result" == / || -f "$result/wp-config.php") ]]; do |
|||
result=$(dirname "$result") |
|||
done |
|||
|
|||
if [[ "$result" == / ]]; then |
|||
echo >&2 "no WORDPRESS detected!" |
|||
exit 100 |
|||
fi |
|||
|
|||
echo "$result" |
|||
} |
|||
|
|||
function getConfigComment() { |
|||
local result=$(grep -e "^#" $WP_CONFIG | grep "$1" | head -n1 | cut -d ',' -f2 | cut -d \' -f2) |
|||
if [[ -z "$result" ]]; then |
|||
echo "config comment: $1 not found!" |
|||
exit 2 |
|||
fi |
|||
echo "$result" |
|||
} |
|||
function getConfigEntry() { |
|||
local result=$(grep "$1" $WP_CONFIG | head -n1 | cut -d ',' -f2 | cut -d \' -f2) |
|||
if [[ -z "$result" ]]; then |
|||
echo "config entry: $1 not found!" |
|||
exit 2 |
|||
fi |
|||
echo "$result" |
|||
} |
|||
|
|||
function sql() { |
|||
local result=$(echo "$1" | mysql -srN -u $DB_USER -h $DB_HOST $DB_NAME -p$DB_PASS 2>&1) |
|||
if [[ $result =~ ^ERROR ]]; then |
|||
echo >&2 "sql failure: $result" |
|||
exit 3 |
|||
else |
|||
echo "$result" |
|||
fi |
|||
} |
|||
|
|||
function sqlFile() { |
|||
local result=$(cat "$1" | mysql -srN -u $DB_USER -h $DB_HOST $DB_NAME -p$DB_PASS 2>&1) |
|||
if [[ $result =~ ^ERROR ]]; then |
|||
echo >&2 "sql failure: $result" |
|||
exit 3 |
|||
else |
|||
echo "$result" |
|||
fi |
|||
} |
|||
function changeHome() { |
|||
local FROM=$1 |
|||
local TO=$2 |
|||
sql "UPDATE wp_options SET option_value = replace(option_value, '$FROM', '$TO') WHERE option_name = 'home' OR option_name = 'siteurl'" |
|||
sql "UPDATE wp_posts SET guid = replace(guid, '$FROM','$TO')" |
|||
sql "UPDATE wp_posts SET post_content = replace(post_content, '$FROM', '$TO')" |
|||
sql "UPDATE wp_postmeta SET meta_value = replace(meta_value,'$FROM','$TO')" |
|||
} |
|||
|
|||
function lastMigration() { |
|||
sql "SELECT migration_file FROM migrations ORDER BY last_run DESC LIMIT 1" |
|||
} |
|||
|
|||
function upgradeMigration() { |
|||
local LAST_MIGRATION=$1 |
|||
local UPGRADE=false |
|||
if [[ "$LAST_MIGRATION" == '' ]]; then |
|||
UPGRADE=true |
|||
fi |
|||
|
|||
local MIG_BASE="$WP_BASE/wp-content/migrations" |
|||
local MIGRATIONS=$(ls -p1 $MIG_BASE | grep -v /) |
|||
local MIG_FILE |
|||
for mig in $MIGRATIONS; do |
|||
if [[ "$UPGRADE" == true ]]; then |
|||
printf "applying %50s ... " $mig |
|||
printf "%d %d" $(sqlFile $MIG_BASE/$mig) |
|||
echo " DONE" |
|||
MIG_FILE=$mig |
|||
else |
|||
printf "useless %50s \n" $mig |
|||
if [[ "$LAST_MIGRATION" == "$mig" ]]; then |
|||
UPGRADE=true |
|||
fi |
|||
fi |
|||
done |
|||
|
|||
if [[ $UPGRADE == true && $MIG_FILE != '' ]]; then |
|||
local done=$(sql "INSERT INTO migrations(migration_file, last_run) VALUES ('$mig', NOW())") |
|||
echo "all migrations succeeded, wrote: $mig" |
|||
else |
|||
echo "already up-to-date" |
|||
fi |
|||
} |
|||
|
|||
function buildMigrations() { |
|||
if [[ ! -d "$WP_BASE"/wp-content/migrations ]]; then |
|||
mkdir -p "$WP_BASE"/wp-content/migrations |
|||
echo "migrations folder created!" |
|||
fi |
|||
|
|||
sql "CREATE TABLE IF NOT EXISTS migrations (id int(11) NOT NULL AUTO_INCREMENT, migration_file varchar(255) COLLATE utf8_unicode_ci NOT NULL, last_run varchar(45) COLLATE utf8_unicode_ci NOT NULL, PRIMARY KEY (id) )" |
|||
|
|||
} |
|||
|
|||
function playEnvironment() { |
|||
|
|||
buildMigrations |
|||
|
|||
local PLATFORM=$1 |
|||
local PLATFORM_BASE="$WP_BASE/wp-content/migrations/$PLATFORM" |
|||
if [[ -d "$PLATFORM_BASE" ]]; then |
|||
echo play platform $PLATFORM |
|||
|
|||
local MIGRATIONS=$(ls -p1 $PLATFORM_BASE | grep -v /) |
|||
for mig in $MIGRATIONS; do |
|||
printf "applying %50s ... " $mig |
|||
printf "%d %d" $(sqlFile $PLATFORM_BASE/$mig) |
|||
echo " DONE" |
|||
done |
|||
fi |
|||
} |
|||
|
|||
## MAIN |
|||
## ---- |
|||
|
|||
WP_BASE=$(detectWordpress) |
|||
WP_CONFIG="$WP_BASE/wp-config.php" |
|||
echo "WP_BASE = $WP_BASE" |
|||
|
|||
WP_HOME=$(getConfigComment WP_HOME) |
|||
echo "WP_HOME = $WP_HOME" |
|||
|
|||
DB_HOST=$(getConfigEntry DB_HOST) |
|||
DB_NAME=$(getConfigEntry DB_NAME) |
|||
DB_USER=$(getConfigEntry DB_USER) |
|||
DB_PASS=$(getConfigEntry DB_PASSWORD) |
|||
|
|||
CURRENT_HOME=$(sql "SELECT option_value FROM wp_options WHERE option_name = 'home'") |
|||
if [[ "$CURRENT_HOME" != "$WP_HOME" ]]; then |
|||
echo "HOME detected = $CURRENT_HOME , needs to apply changes" |
|||
$(changeHome "$CURRENT_HOME" "$WP_HOME") |
|||
fi |
|||
|
|||
if [[ "$WP_HOME" =~ https?:\/\/beta[0-9]*\..*|https?:\/\/.*\.beta[0-9]*\..* ]]; then |
|||
playEnvironment BETA |
|||
else |
|||
if [[ "$WP_HOME" =~ https?:\/\/dev[0-9]*\..*|https?:\/\/.*\.dev[0-9]*\..* ]]; then |
|||
playEnvironment DEV |
|||
else |
|||
playEnvironment PROD |
|||
fi |
|||
fi |
|||
|
|||
CURRENT_MIGRATION=$(lastMigration) |
|||
upgradeMigration "$CURRENT_MIGRATION" |
@ -0,0 +1,5 @@ |
|||
#!/bin/sh |
|||
|
|||
if [ -x /usr/sbin/autopostgresqlbackup ]; then |
|||
/usr/sbin/autopostgresqlbackup |
|||
fi |
@ -0,0 +1,122 @@ |
|||
# =============================== |
|||
# === Debian specific options === |
|||
#================================ |
|||
|
|||
# By default, on Debian systems, only 'postgres' user |
|||
# is allowed to access PostgreSQL databases without password. |
|||
# In order to dump databases we need to run pg_dump/psql |
|||
# commands as 'postgres' with su. |
|||
# |
|||
# The following setting has been added to workraound this issue. |
|||
# (if it is set to empty, 'su' usage will be disabled) |
|||
SU_USERNAME=postgres |
|||
|
|||
#===================================================================== |
|||
# Set the following variables to your system needs |
|||
# (Detailed instructions below variables) |
|||
#===================================================================== |
|||
|
|||
# Username to access the PostgreSQL server e.g. dbuser |
|||
USERNAME=postgres |
|||
|
|||
# Password |
|||
# create a file $HOME/.pgpass containing a line like this |
|||
# hostname:*:*:dbuser:dbpass |
|||
# replace hostname with the value of DBHOST and postgres with |
|||
# the value of USERNAME |
|||
|
|||
# Host name (or IP address) of PostgreSQL server e.g localhost |
|||
DBHOST=localhost |
|||
|
|||
# List of DBNAMES for Daily/Weekly Backup e.g. "DB1 DB2 DB3" |
|||
DBNAMES="all" |
|||
|
|||
# pseudo database name used to dump global objects (users, roles, tablespaces) |
|||
GLOBALS_OBJECTS="postgres_globals" |
|||
|
|||
# Backup directory location e.g /backups |
|||
BACKUPDIR="/mnt/BACKUP/postgresql" |
|||
|
|||
# Mail setup |
|||
# What would you like to be mailed to you? |
|||
# - log : send only log file |
|||
# - files : send log file and sql files as attachments (see docs) |
|||
# - stdout : will simply output the log to the screen if run manually. |
|||
# - quiet : Only send logs if an error occurs to the MAILADDR. |
|||
MAILCONTENT="quiet" |
|||
|
|||
# Set the maximum allowed email size in k. (4000 = approx 5MB email [see docs]) |
|||
MAXATTSIZE="4000" |
|||
|
|||
# Email Address to send mail to? (user@domain.com) |
|||
MAILADDR="root" |
|||
|
|||
# ============================================================ |
|||
# === ADVANCED OPTIONS ( Read the doc's below for details )=== |
|||
#============================================================= |
|||
|
|||
# List of DBBNAMES for Monthly Backups. |
|||
MDBNAMES="$DBNAMES" |
|||
GLOBALS_OBJECTS_INCLUDE="no" |
|||
# List of DBNAMES to EXLUCDE if DBNAMES are set to all (must be in " quotes) |
|||
DBEXCLUDE="postgres template1" |
|||
|
|||
# Include CREATE DATABASE in backup? |
|||
CREATE_DATABASE=yes |
|||
|
|||
# Separate backup directory and file for each DB? (yes or no) |
|||
SEPDIR=yes |
|||
|
|||
# Which day do you want weekly backups? (1 to 7 where 1 is Monday) |
|||
DOWEEKLY=6 |
|||
|
|||
# Choose Compression type. (gzip, bzip2 or xz) |
|||
COMP=gzip |
|||
|
|||
# Compress communications between backup server and PostgreSQL server? |
|||
# set compression level from 0 to 9 (0 means no compression) |
|||
COMMCOMP=0 |
|||
|
|||
# Additionally keep a copy of the most recent backup in a seperate directory. |
|||
LATEST=no |
|||
|
|||
# OPT string for use with pg_dump ( see man pg_dump ) |
|||
OPT="" |
|||
|
|||
# Backup files extension |
|||
EXT="sql" |
|||
|
|||
# Backup files permissions |
|||
PERM=600 |
|||
|
|||
# Encyrption settings |
|||
# (inspired by http://blog.altudov.com/2010/09/27/using-openssl-for-asymmetric-encryption-of-backups/) |
|||
# |
|||
# Once the backup done, each SQL dump will be encrypted and the original file |
|||
# will be deleted (if encryption was successful). |
|||
# It is recommended to backup into a staging directory, and then use the |
|||
# POSTBACKUP script to sync the encrypted files to the desired location. |
|||
# |
|||
# Encryption uses private/public keys. You can generate the key pairs like the following: |
|||
# openssl req -x509 -nodes -days 100000 -newkey rsa:2048 -keyout backup.key -out backup.crt -subj '/' |
|||
# |
|||
# Decryption: |
|||
# openssl smime -decrypt -in backup.sql.gz.enc -binary -inform DEM -inkey backup.key -out backup.sql.gz |
|||
|
|||
# Enable encryption |
|||
ENCRYPTION=no |
|||
|
|||
# Encryption public key |
|||
ENCRYPTION_PUBLIC_KEY="/etc/ssl/certs/autopostgresqlbackup.crt" |
|||
|
|||
# Encryption Cipher (see enc manpage) |
|||
ENCRYPTION_CIPHER="aes256" |
|||
|
|||
# Suffix for encyrpted files |
|||
ENCRYPTION_SUFFIX=".enc" |
|||
|
|||
# Command to run before backups (uncomment to use) |
|||
#PREBACKUP="/etc/postgresql-backup-pre" |
|||
|
|||
# Command run after backups (uncomment to use) |
|||
#POSTBACKUP="/etc/postgresql-backup-post" |
@ -0,0 +1,666 @@ |
|||
#!/bin/bash |
|||
# |
|||
# PostgreSQL Backup Script Ver 1.0 |
|||
# http://autopgsqlbackup.frozenpc.net |
|||
# Copyright (c) 2005 Aaron Axelsen <axelseaa@amadmax.com> |
|||
# 2005 Friedrich Lobenstock <fl@fl.priv.at> |
|||
# 2013-2019 Emmanuel Bouthenot <kolter@openics.org> |
|||
# |
|||
# This program is free software; you can redistribute it and/or modify |
|||
# it under the terms of the GNU General Public License as published by |
|||
# the Free Software Foundation; either version 2 of the License, or |
|||
# (at your option) any later version. |
|||
# |
|||
# This program is distributed in the hope that it will be useful, |
|||
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
|||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|||
# GNU General Public License for more details. |
|||
# |
|||
# You should have received a copy of the GNU General Public License |
|||
# along with this program; if not, write to the Free Software |
|||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
|||
# |
|||
#===================================================================== |
|||
# Set the following variables to your system needs |
|||
# (Detailed instructions below variables) |
|||
#===================================================================== |
|||
|
|||
# Username to access the PostgreSQL server e.g. dbuser |
|||
USERNAME=postgres |
|||
|
|||
# Password |
|||
# create a file $HOME/.pgpass containing a line like this |
|||
# hostname:*:*:dbuser:dbpass |
|||
# replace hostname with the value of DBHOST and postgres with |
|||
# the value of USERNAME |
|||
|
|||
# Host name (or IP address) of PostgreSQL server e.g localhost |
|||
DBHOST=localhost |
|||
|
|||
# List of DBNAMES for Daily/Weekly Backup e.g. "DB1 DB2 DB3" |
|||
DBNAMES="all" |
|||
|
|||
# pseudo database name used to dump global objects (users, roles, tablespaces) |
|||
GLOBALS_OBJECTS="postgres_globals" |
|||
|
|||
# Backup directory location e.g /backups |
|||
BACKUPDIR="/backups" |
|||
GLOBALS_OBJECTS_INCLUDE="yes" |
|||
|
|||
# Mail setup |
|||
# What would you like to be mailed to you? |
|||
# - log : send only log file |
|||
# - files : send log file and sql files as attachments (see docs) |
|||
# - stdout : will simply output the log to the screen if run manually. |
|||
# - quiet : Only send logs if an error occurs to the MAILADDR. |
|||
MAILCONTENT="stdout" |
|||
|
|||
# Set the maximum allowed email size in k. (4000 = approx 5MB email [see docs]) |
|||
MAXATTSIZE="4000" |
|||
|
|||
# Email Address to send mail to? (user@domain.com) |
|||
MAILADDR="user@domain.com" |
|||
|
|||
# ============================================================ |
|||
# === ADVANCED OPTIONS ( Read the doc's below for details )=== |
|||
#============================================================= |
|||
|
|||
# List of DBBNAMES for Monthly Backups. |
|||
MDBNAMES="template1 $DBNAMES" |
|||
|
|||
# List of DBNAMES to EXLUCDE if DBNAMES are set to all (must be in " quotes) |
|||
DBEXCLUDE="" |
|||
|
|||
# Include CREATE DATABASE in backup? |
|||
CREATE_DATABASE=yes |
|||
|
|||
# Separate backup directory and file for each DB? (yes or no) |
|||
SEPDIR=yes |
|||
|
|||
# Which day do you want weekly backups? (1 to 7 where 1 is Monday) |
|||
DOWEEKLY=6 |
|||
|
|||
# Choose Compression type. (gzip, bzip2 or xz) |
|||
COMP=gzip |
|||
|
|||
# Compress communications between backup server and PostgreSQL server? |
|||
# set compression level from 0 to 9 (0 means no compression) |
|||
COMMCOMP=0 |
|||
|
|||
# Additionally keep a copy of the most recent backup in a seperate directory. |
|||
LATEST=no |
|||
|
|||
# OPT string for use with pg_dump ( see man pg_dump ) |
|||
OPT="" |
|||
|
|||
# Backup files extension |
|||
EXT="sql" |
|||
|
|||
# Backup files permissions |
|||
PERM=600 |
|||
|
|||
# Encyrption settings |
|||
# (inspired by http://blog.altudov.com/2010/09/27/using-openssl-for-asymmetric-encryption-of-backups/) |
|||
# |
|||
# Once the backup done, each SQL dump will be encrypted and the original file |
|||
# will be deleted (if encryption was successful). |
|||
# It is recommended to backup into a staging directory, and then use the |
|||
# POSTBACKUP script to sync the encrypted files to the desired location. |
|||
# |
|||
# Encryption uses private/public keys. You can generate the key pairs like the following: |
|||
# openssl req -x509 -nodes -days 100000 -newkey rsa:2048 -keyout backup.key -out backup.crt -subj '/' |
|||
# |
|||
# Decryption: |
|||
# openssl smime -decrypt -in backup.sql.gz.enc -binary -inform DEM -inkey backup.key -out backup.sql.gz |
|||
|
|||
# Enable encryption |
|||
ENCRYPTION=no |
|||
|
|||
# Encryption public key |
|||
ENCRYPTION_PUBLIC_KEY="" |
|||
|
|||
# Encryption Cipher (see enc manpage) |
|||
ENCRYPTION_CIPHER="aes256" |
|||
|
|||
# Suffix for encyrpted files |
|||
ENCRYPTION_SUFFIX=".enc" |
|||
|
|||
# Command to run before backups (uncomment to use) |
|||
#PREBACKUP="/etc/postgresql-backup-pre" |
|||
|
|||
# Command run after backups (uncomment to use) |
|||
#POSTBACKUP="/etc/postgresql-backup-post" |
|||
|
|||
#===================================================================== |
|||
# Debian specific options === |
|||
#===================================================================== |
|||
|
|||
if [ -f /etc/default/autopostgresqlbackup ]; then |
|||
. /etc/default/autopostgresqlbackup |
|||
fi |
|||
|
|||
#===================================================================== |
|||
# Options documentation |
|||
#===================================================================== |
|||
# Set USERNAME and PASSWORD of a user that has at least SELECT permission |
|||
# to ALL databases. |
|||
# |
|||
# Set the DBHOST option to the server you wish to backup, leave the |
|||
# default to backup "this server".(to backup multiple servers make |
|||
# copies of this file and set the options for that server) |
|||
# |
|||
# Put in the list of DBNAMES(Databases)to be backed up. If you would like |
|||
# to backup ALL DBs on the server set DBNAMES="all".(if set to "all" then |
|||
# any new DBs will automatically be backed up without needing to modify |
|||
# this backup script when a new DB is created). |
|||
# |
|||
# If the DB you want to backup has a space in the name replace the space |
|||
# with a % e.g. "data base" will become "data%base" |
|||
# NOTE: Spaces in DB names may not work correctly when SEPDIR=no. |
|||
# |
|||
# You can change the backup storage location from /backups to anything |
|||
# you like by using the BACKUPDIR setting.. |
|||
# |
|||
# The MAILCONTENT and MAILADDR options and pretty self explanitory, use |
|||
# these to have the backup log mailed to you at any email address or multiple |
|||
# email addresses in a space seperated list. |
|||
# (If you set mail content to "log" you will require access to the "mail" program |
|||
# on your server. If you set this to "files" you will have to have mutt installed |
|||
# on your server. If you set it to "stdout" it will log to the screen if run from |
|||
# the console or to the cron job owner if run through cron. If you set it to "quiet" |
|||
# logs will only be mailed if there are errors reported. ) |
|||
# |
|||
# MAXATTSIZE sets the largest allowed email attachments total (all backup files) you |
|||
# want the script to send. This is the size before it is encoded to be sent as an email |
|||
# so if your mail server will allow a maximum mail size of 5MB I would suggest setting |
|||
# MAXATTSIZE to be 25% smaller than that so a setting of 4000 would probably be fine. |
|||
# |
|||
# Finally copy autopostgresqlbackup.sh to anywhere on your server and make sure |
|||
# to set executable permission. You can also copy the script to |
|||
# /etc/cron.daily to have it execute automatically every night or simply |
|||
# place a symlink in /etc/cron.daily to the file if you wish to keep it |
|||
# somwhere else. |
|||
# NOTE:On Debian copy the file with no extention for it to be run |
|||
# by cron e.g just name the file "autopostgresqlbackup" |
|||
# |
|||
# Thats it.. |
|||
# |
|||
# |
|||
# === Advanced options doc's === |
|||
# |
|||
# The list of MDBNAMES is the DB's to be backed up only monthly. You should |
|||
# always include "template1" in this list to backup the default database |
|||
# template used to create new databases. |
|||
# NOTE: If DBNAMES="all" then MDBNAMES has no effect as all DBs will be backed |
|||
# up anyway. |
|||
# |
|||
# If you set DBNAMES="all" you can configure the option DBEXCLUDE. Other |
|||
# wise this option will not be used. |
|||
# This option can be used if you want to backup all dbs, but you want |
|||
# exclude some of them. (eg. a db is to big). |
|||
# |
|||
# Set CREATE_DATABASE to "yes" (the default) if you want your SQL-Dump to create |
|||
# a database with the same name as the original database when restoring. |
|||
# Saying "no" here will allow your to specify the database name you want to |
|||
# restore your dump into, making a copy of the database by using the dump |
|||
# created with autopostgresqlbackup. |
|||
# NOTE: Not used if SEPDIR=no |
|||
# |
|||
# The SEPDIR option allows you to choose to have all DBs backed up to |
|||
# a single file (fast restore of entire server in case of crash) or to |
|||
# seperate directories for each DB (each DB can be restored seperately |
|||
# in case of single DB corruption or loss). |
|||
# |
|||
# To set the day of the week that you would like the weekly backup to happen |
|||
# set the DOWEEKLY setting, this can be a value from 1 to 7 where 1 is Monday, |
|||
# The default is 6 which means that weekly backups are done on a Saturday. |
|||
# |
|||
# COMP is used to choose the copmression used, options are gzip or bzip2. |
|||
# bzip2 will produce slightly smaller files but is more processor intensive so |
|||
# may take longer to complete. |
|||
# |
|||
# COMMCOMP is used to set the compression level (from 0 to 9, 0 means no compression) |
|||
# between the client and the server, so it is useful to save bandwidth when backing up |
|||
# a remote PostgresSQL server over the network. |
|||
# |
|||
# LATEST is to store an additional copy of the latest backup to a standard |
|||
# location so it can be downloaded bt thrid party scripts. |
|||
# |
|||
# Use PREBACKUP and POSTBACKUP to specify Per and Post backup commands |
|||
# or scripts to perform tasks either before or after the backup process. |
|||
# |
|||
# |
|||
#===================================================================== |
|||
# Backup Rotation.. |
|||
#===================================================================== |
|||
# |
|||
# Daily Backups are rotated weekly.. |
|||
# Weekly Backups are run by default on Saturday Morning when |
|||
# cron.daily scripts are run...Can be changed with DOWEEKLY setting.. |
|||
# Weekly Backups are rotated on a 5 week cycle.. |
|||
# Monthly Backups are run on the 1st of the month.. |
|||
# Monthly Backups are NOT rotated automatically... |
|||
# It may be a good idea to copy Monthly backups offline or to another |
|||
# server.. |
|||
# |
|||
#===================================================================== |
|||
# Please Note!! |
|||
#===================================================================== |
|||
# |
|||
# I take no resposibility for any data loss or corruption when using |
|||
# this script.. |
|||
# This script will not help in the event of a hard drive crash. If a |
|||
# copy of the backup has not be stored offline or on another PC.. |
|||
# You should copy your backups offline regularly for best protection. |
|||
# |
|||
# Happy backing up... |
|||
# |
|||
#===================================================================== |
|||
# Restoring |
|||
#===================================================================== |
|||
# Firstly you will need to uncompress the backup file. |
|||
# eg. |
|||
# gunzip file.gz (or bunzip2 file.bz2) |
|||
# |
|||
# Next you will need to use the postgresql client to restore the DB from the |
|||
# sql file. |
|||
# eg. |
|||
# psql --host dbserver --dbname database < /path/file.sql |
|||
# |
|||
# NOTE: Make sure you use "<" and not ">" in the above command because |
|||
# you are piping the file.sql to psql and not the other way around. |
|||
# |
|||
# Lets hope you never have to use this.. :) |
|||
# |
|||
#===================================================================== |
|||
# Change Log |
|||
#===================================================================== |
|||
# |
|||
# VER 1.0 - (2005-03-25) |
|||
# Initial Release - based on AutoMySQLBackup 2.2 |
|||
# |
|||
#===================================================================== |
|||
#===================================================================== |
|||
#===================================================================== |
|||
# |
|||
# Should not need to be modified from here down!! |
|||
# |
|||
#===================================================================== |
|||
#===================================================================== |
|||
#===================================================================== |
|||
PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/postgres/bin:/usr/local/pgsql/bin |
|||
DATE=$(date +%Y-%m-%d_%Hh%Mm) # Datestamp e.g 2002-09-21 |
|||
DOW=$(date +%A) # Day of the week e.g. Monday |
|||
DNOW=$(date +%u) # Day number of the week 1 to 7 where 1 represents Monday |
|||
DOM=$(date +%d) # Date of the Month e.g. 27 |
|||
M=$(date +%B) # Month e.g January |
|||
W=$(date +%V) # Week Number e.g 37 |
|||
VER=1.0 # Version Number |
|||
LOGFILE=$BACKUPDIR/${DBHOST//\//_}-$(date +%N).log # Logfile Name |
|||
LOGERR=$BACKUPDIR/ERRORS_${DBHOST//\//_}-$(date +%N).log # Logfile Name |
|||
BACKUPFILES="" |
|||
|
|||
# Add --compress pg_dump option to $OPT |
|||
if [ "$COMMCOMP" -gt 0 ]; then |
|||
OPT="$OPT --compress=$COMMCOMP" |
|||
fi |
|||
|
|||
# Create required directories |
|||
if [ ! -e "$BACKUPDIR" ]; then # Check Backup Directory exists. |
|||
mkdir -p "$BACKUPDIR" |
|||
fi |
|||
|
|||
if [ ! -e "$BACKUPDIR/daily" ]; then # Check Daily Directory exists. |
|||
mkdir -p "$BACKUPDIR/daily" |
|||
fi |
|||
|
|||
if [ ! -e "$BACKUPDIR/weekly" ]; then # Check Weekly Directory exists. |
|||
mkdir -p "$BACKUPDIR/weekly" |
|||
fi |
|||
|
|||
if [ ! -e "$BACKUPDIR/monthly" ]; then # Check Monthly Directory exists. |
|||
mkdir -p "$BACKUPDIR/monthly" |
|||
fi |
|||
|
|||
if [ "$LATEST" = "yes" ]; then |
|||
if [ ! -e "$BACKUPDIR/latest" ]; then # Check Latest Directory exists. |
|||
mkdir -p "$BACKUPDIR/latest" |
|||
fi |
|||
rm -f "$BACKUPDIR"/latest/* |
|||
fi |
|||
|
|||
# IO redirection for logging. |
|||
touch $LOGFILE |
|||
exec 6>&1 # Link file descriptor #6 with stdout. |
|||
# Saves stdout. |
|||
exec >$LOGFILE # stdout replaced with file $LOGFILE. |
|||
touch $LOGERR |
|||
exec 7>&2 # Link file descriptor #7 with stderr. |
|||
# Saves stderr. |
|||
exec 2>$LOGERR # stderr replaced with file $LOGERR. |
|||
|
|||
# Functions |
|||
|
|||
# Database dump function |
|||
dbdump() { |
|||
rm -f $2 |
|||
touch $2 |
|||
chmod $PERM $2 |
|||
for db in $1; do |
|||
if [ -n "$SU_USERNAME" ]; then |
|||
if [ "$db" = "$GLOBALS_OBJECTS" ]; then |
|||
su $SU_USERNAME -l -c "pg_dumpall $PGHOST --globals-only" >>$2 |
|||
else |
|||
su $SU_USERNAME -l -c "pg_dump $PGHOST $OPT $db" >>$2 |
|||
fi |
|||
else |
|||
if [ "$db" = "$GLOBALS_OBJECTS" ]; then |
|||
pg_dumpall --username=$USERNAME $PGHOST --globals-only >>$2 |
|||
else |
|||
pg_dump --username=$USERNAME $PGHOST $OPT $db >>$2 |
|||
fi |
|||
fi |
|||
done |
|||
return 0 |
|||
} |
|||
|
|||
# Encryption function |
|||
encryption() { |
|||
ENCRYPTED_FILE="$1$ENCRYPTION_SUFFIX" |
|||
# Encrypt as needed |
|||
if [ "$ENCRYPTION" = "yes" ]; then |
|||
echo |
|||
echo "Encrypting $1" |
|||
echo " to $ENCRYPTED_FILE" |
|||
echo " using cypher $ENCRYPTION_CIPHER and public key $ENCRYPTION_PUBLIC_KEY" |
|||
if openssl smime -encrypt -$ENCRYPTION_CIPHER -binary -outform DEM \ |
|||
-out "$ENCRYPTED_FILE" \ |
|||
-in "$1" "$ENCRYPTION_PUBLIC_KEY"; then |
|||
echo " and remove $1" |
|||
chmod $PERM "$ENCRYPTED_FILE" |
|||
rm -f "$1" |
|||
fi |
|||
fi |
|||
return 0 |
|||
} |
|||
|
|||
# Compression (and encrypt) function plus latest copy |
|||
SUFFIX="" |
|||
compression() { |
|||
if [ "$COMP" = "gzip" ]; then |
|||
gzip -f "$1" |
|||
echo |
|||
echo Backup Information for "$1" |
|||
gzip -l "$1.gz" |
|||
SUFFIX=".gz" |
|||
elif [ "$COMP" = "bzip2" ]; then |
|||
echo Compression information for "$1.bz2" |
|||
bzip2 -f -v $1 2>&1 |
|||
SUFFIX=".bz2" |
|||
elif [ "$COMP" = "xz" ]; then |
|||
echo Compression information for "$1.xz" |
|||
xz -9 -v $1 2>&1 |
|||
SUFFIX=".xz" |
|||
else |
|||
echo "No compression option set, check advanced settings" |
|||
fi |
|||
encryption $1$SUFFIX |
|||
if [ "$LATEST" = "yes" ]; then |
|||
cp $1$SUFFIX* "$BACKUPDIR/latest/" |
|||
fi |
|||
return 0 |
|||
} |
|||
|
|||
# Run command before we begin |
|||
if [ "$PREBACKUP" ]; then |
|||
echo ====================================================================== |
|||
echo "Prebackup command output." |
|||
echo |
|||
$PREBACKUP |
|||
echo |
|||
echo ====================================================================== |
|||
echo |
|||
fi |
|||
|
|||
if [ "$SEPDIR" = "yes" ]; then # Check if CREATE DATABSE should be included in Dump |
|||
if [ "$CREATE_DATABASE" = "no" ]; then |
|||
OPT="$OPT" |
|||
else |
|||
OPT="$OPT --create" |
|||
fi |
|||
else |
|||
OPT="$OPT" |
|||
fi |
|||
|
|||
# Hostname for LOG information |
|||
if [ "$DBHOST" = "localhost" ]; then |
|||
HOST=$(hostname) |
|||
PGHOST="" |
|||
else |
|||
HOST=$DBHOST |
|||
PGHOST="-h $DBHOST" |
|||
fi |
|||
|
|||
# If backing up all DBs on the server |
|||
if [ "$DBNAMES" = "all" ]; then |
|||
if [ -n "$SU_USERNAME" ]; then |
|||
DBNAMES="$(su $SU_USERNAME -l -c "LANG=C psql -U $USERNAME $PGHOST -l -A -F: | sed -ne '/:/ { /Name:Owner/d; /template0/d; s/:.*$//; p }'")" |
|||
else |
|||
DBNAMES="$(LANG=C psql -U $USERNAME $PGHOST -l -A -F: | sed -ne "/:/ { /Name:Owner/d; /template0/d; s/:.*$//; p }")" |
|||
fi |
|||
|
|||
# If DBs are excluded |
|||
for exclude in $DBEXCLUDE; do |
|||
DBNAMES=$(echo $DBNAMES | sed "s/\b$exclude\b//g") |
|||
done |
|||
DBNAMES="$(echo $DBNAMES | tr '\n' ' ')" |
|||
MDBNAMES=$DBNAMES |
|||
fi |
|||
|
|||
# Include global objects (users, tablespaces) |
|||
if [ "$GLOBALS_OBJECTS_INCLUDE" = "yes" ]; then |
|||
DBNAMES="$GLOBALS_OBJECTS $DBNAMES" |
|||
MDBNAMES="$GLOBALS_OBJECTS $MDBNAMES" |
|||
fi |
|||
|
|||
echo ====================================================================== |
|||
echo AutoPostgreSQLBackup VER $VER |
|||
echo http://autopgsqlbackup.frozenpc.net/ |
|||
echo |
|||
echo Backup of Database Server - $HOST |
|||
echo ====================================================================== |
|||
|
|||
# Test is seperate DB backups are required |
|||
if [ "$SEPDIR" = "yes" ]; then |
|||
echo Backup Start Time $(date) |
|||
echo ====================================================================== |
|||
# Monthly Full Backup of all Databases |
|||
if [ "$DOM" = "01" ]; then |
|||
for MDB in $MDBNAMES; do |
|||
|
|||
# Prepare $DB for using |
|||
MDB="$(echo $MDB | sed 's/%/ /g')" |
|||
|
|||
if [ ! -e "$BACKUPDIR/monthly/$MDB" ]; then # Check Monthly DB Directory exists. |
|||
mkdir -p "$BACKUPDIR/monthly/$MDB" |
|||
fi |
|||
echo Monthly Backup of $MDB... |
|||
dbdump "$MDB" "$BACKUPDIR/monthly/$MDB/${MDB}_$DATE.$M.$MDB.$EXT" |
|||
compression "$BACKUPDIR/monthly/$MDB/${MDB}_$DATE.$M.$MDB.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/monthly/$MDB/${MDB}_$DATE.$M.$MDB.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
done |
|||
fi |
|||
|
|||
for DB in $DBNAMES; do |
|||
# Prepare $DB for using |
|||
DB="$(echo $DB | sed 's/%/ /g')" |
|||
|
|||
# Create Seperate directory for each DB |
|||
if [ ! -e "$BACKUPDIR/daily/$DB" ]; then # Check Daily DB Directory exists. |
|||
mkdir -p "$BACKUPDIR/daily/$DB" |
|||
fi |
|||
|
|||
if [ ! -e "$BACKUPDIR/weekly/$DB" ]; then # Check Weekly DB Directory exists. |
|||
mkdir -p "$BACKUPDIR/weekly/$DB" |
|||
fi |
|||
|
|||
# Weekly Backup |
|||
if [ "$DNOW" = "$DOWEEKLY" ]; then |
|||
echo Weekly Backup of Database \( $DB \) |
|||
echo Rotating 5 weeks Backups... |
|||
if [ "$W" -le 05 ]; then |
|||
REMW=$(expr 48 + $W) |
|||
elif [ "$W" -lt 15 ]; then |
|||
REMW=0$(expr $W - 5) |
|||
else |
|||
REMW=$(expr $W - 5) |
|||
fi |
|||
rm -fv "$BACKUPDIR/weekly/$DB/${DB}_week.$REMW".* |
|||
echo |
|||
dbdump "$DB" "$BACKUPDIR/weekly/$DB/${DB}_week.$W.$DATE.$EXT" |
|||
compression "$BACKUPDIR/weekly/$DB/${DB}_week.$W.$DATE.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/weekly/$DB/${DB}_week.$W.$DATE.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
|
|||
# Daily Backup |
|||
else |
|||
echo Daily Backup of Database \( $DB \) |
|||
echo Rotating last weeks Backup... |
|||
rm -fv "$BACKUPDIR/daily/$DB"/*."$DOW".$EXT* |
|||
echo |
|||
dbdump "$DB" "$BACKUPDIR/daily/$DB/${DB}_$DATE.$DOW.$EXT" |
|||
compression "$BACKUPDIR/daily/$DB/${DB}_$DATE.$DOW.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/daily/$DB/${DB}_$DATE.$DOW.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
fi |
|||
done |
|||
echo Backup End $(date) |
|||
echo ====================================================================== |
|||
|
|||
else |
|||
# One backup file for all DBs |
|||
echo Backup Start $(date) |
|||
echo ====================================================================== |
|||
# Monthly Full Backup of all Databases |
|||
if [ "$DOM" = "01" ]; then |
|||
echo Monthly full Backup of \( $MDBNAMES \)... |
|||
dbdump "$MDBNAMES" "$BACKUPDIR/monthly/$DATE.$M.all-databases.$EXT" |
|||
compression "$BACKUPDIR/monthly/$DATE.$M.all-databases.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/monthly/$DATE.$M.all-databases.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
fi |
|||
|
|||
# Weekly Backup |
|||
if [ "$DNOW" = "$DOWEEKLY" ]; then |
|||
echo Weekly Backup of Databases \( $DBNAMES \) |
|||
echo |
|||
echo Rotating 5 weeks Backups... |
|||
if [ "$W" -le 05 ]; then |
|||
REMW=$(expr 48 + $W) |
|||
elif [ "$W" -lt 15 ]; then |
|||
REMW=0$(expr $W - 5) |
|||
else |
|||
REMW=$(expr $W - 5) |
|||
fi |
|||
rm -fv "$BACKUPDIR/weekly/week.$REMW".* |
|||
echo |
|||
dbdump "$DBNAMES" "$BACKUPDIR/weekly/week.$W.$DATE.$EXT" |
|||
compression "$BACKUPDIR/weekly/week.$W.$DATE.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/weekly/week.$W.$DATE.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
# Daily Backup |
|||
else |
|||
echo Daily Backup of Databases \( $DBNAMES \) |
|||
echo |
|||
echo Rotating last weeks Backup... |
|||
rm -fv "$BACKUPDIR"/daily/*."$DOW".$EXT* |
|||
echo |
|||
dbdump "$DBNAMES" "$BACKUPDIR/daily/$DATE.$DOW.$EXT" |
|||
compression "$BACKUPDIR/daily/$DATE.$DOW.$EXT" |
|||
BACKUPFILES="$BACKUPFILES $BACKUPDIR/daily/$DATE.$DOW.$EXT$SUFFIX*" |
|||
echo ---------------------------------------------------------------------- |
|||
fi |
|||
echo Backup End Time $(date) |
|||
echo ====================================================================== |
|||
fi |
|||
echo Total disk space used for backup storage.. |
|||
echo Size - Location |
|||
echo $(du -hs "$BACKUPDIR") |
|||
echo |
|||
|
|||
# Run command when we're done |
|||
if [ "$POSTBACKUP" ]; then |
|||
echo ====================================================================== |
|||
echo "Postbackup command output." |
|||
echo |
|||
$POSTBACKUP |
|||
echo |
|||
echo ====================================================================== |
|||
fi |
|||
|
|||
#Clean up IO redirection |
|||
exec 1>&6 6>&- # Restore stdout and close file descriptor #6. |
|||
exec 2>&7 7>&- # Restore stdout and close file descriptor #7. |
|||
|
|||
if [ "$MAILCONTENT" = "files" ]; then |
|||
if [ -s "$LOGERR" ]; then |
|||
# Include error log if is larger than zero. |
|||
BACKUPFILES="$BACKUPFILES $LOGERR" |
|||
ERRORNOTE="WARNING: Error Reported - " |
|||
fi |
|||
#Get backup size |
|||
ATTSIZE=$(du -c $BACKUPFILES | grep "[[:digit:][:space:]]total$" | sed s/\s*total//) |
|||
if [ $MAXATTSIZE -ge $ATTSIZE ]; then |
|||
if which biabam >/dev/null 2>&1; then |
|||
BACKUPFILES=$(echo $BACKUPFILES | sed -r -e 's#\s+#,#g') |
|||
biabam -s "PostgreSQL Backup Log and SQL Files for $HOST - $DATE" $BACKUPFILES $MAILADDR <$LOGFILE |
|||
elif which heirloom-mailx >/dev/null 2>&1; then |
|||
BACKUPFILES=$(echo $BACKUPFILES | sed -e 's# # -a #g') |
|||
heirloom-mailx -s "PostgreSQL Backup Log and SQL Files for $HOST - $DATE" $BACKUPFILES $MAILADDR <$LOGFILE |
|||
elif which neomutt >/dev/null 2>&1; then |
|||
BACKUPFILES=$(echo $BACKUPFILES | sed -e 's# # -a #g') |
|||
neomutt -s "PostgreSQL Backup Log and SQL Files for $HOST - $DATE" -a $BACKUPFILES -- $MAILADDR <$LOGFILE |
|||
elif which mutt >/dev/null 2>&1; then |
|||
BACKUPFILES=$(echo $BACKUPFILES | sed -e 's# # -a #g') |
|||
mutt -s "PostgreSQL Backup Log and SQL Files for $HOST - $DATE" -a $BACKUPFILES -- $MAILADDR <$LOGFILE |
|||
else |
|||
cat "$LOGFILE" | mail -s "WARNING! - Enable to send PostgreSQL Backup dumps, no suitable mail client found on $HOST - $DATE" $MAILADDR |
|||
fi |
|||
else |
|||
cat "$LOGFILE" | mail -s "WARNING! - PostgreSQL Backup exceeds set maximum attachment size on $HOST - $DATE" $MAILADDR |
|||
fi |
|||
elif [ "$MAILCONTENT" = "log" ]; then |
|||
cat "$LOGFILE" | mail -s "PostgreSQL Backup Log for $HOST - $DATE" $MAILADDR |
|||
if [ -s "$LOGERR" ]; then |
|||
cat "$LOGERR" | mail -s "ERRORS REPORTED: PostgreSQL Backup error Log for $HOST - $DATE" $MAILADDR |
|||
fi |
|||
elif [ "$MAILCONTENT" = "quiet" ]; then |
|||
if [ -s "$LOGERR" ]; then |
|||
cat "$LOGERR" | mail -s "ERRORS REPORTED: PostgreSQL Backup error Log for $HOST - $DATE" $MAILADDR |
|||
cat "$LOGFILE" | mail -s "PostgreSQL Backup Log for $HOST - $DATE" $MAILADDR |
|||
fi |
|||
else |
|||
if [ -s "$LOGERR" ]; then |
|||
cat "$LOGFILE" |
|||
echo |
|||
echo "###### WARNING ######" |
|||
echo "Errors reported during AutoPostgreSQLBackup execution.. Backup failed" |
|||
echo "Error log below.." |
|||
cat "$LOGERR" |
|||
else |
|||
cat "$LOGFILE" |
|||
fi |
|||
fi |
|||
|
|||
if [ -s "$LOGERR" ]; then |
|||
STATUS=1 |
|||
else |
|||
STATUS=0 |
|||
fi |
|||
|
|||
# Clean up Logfile |
|||
rm -f "$LOGFILE" |
|||
rm -f "$LOGERR" |
|||
|
|||
exit $STATUS |
@ -0,0 +1,162 @@ |
|||
[flags] |
|||
# Whether to hide the average cpu entry. |
|||
#hide_avg_cpu = false |
|||
# Whether to use dot markers rather than braille. |
|||
#dot_marker = false |
|||
# The update rate of the application. |
|||
#rate = 1000 |
|||
# Whether to put the CPU legend to the left. |
|||
#left_legend = false |
|||
# Whether to set CPU% on a process to be based on the total CPU or just current usage. |
|||
#current_usage = false |
|||
# Whether to group processes with the same name together by default. |
|||
#group_processes = false |
|||
# Whether to make process searching case sensitive by default. |
|||
#case_sensitive = false |
|||
# Whether to make process searching look for matching the entire word by default. |
|||
#whole_word = false |
|||
# Whether to make process searching use regex by default. |
|||
#regex = false |
|||
# Defaults to Celsius. Temperature is one of: |
|||
#temperature_type = "k" |
|||
#temperature_type = "f" |
|||
#temperature_type = "c" |
|||
#temperature_type = "kelvin" |
|||
#temperature_type = "fahrenheit" |
|||
#temperature_type = "celsius" |
|||
# The default time interval (in milliseconds). |
|||
#default_time_value = 60000 |
|||
# The time delta on each zoom in/out action (in milliseconds). |
|||
#time_delta = 15000 |
|||
# Hides the time scale. |
|||
#hide_time = false |
|||
# Override layout default widget |
|||
#default_widget_type = "proc" |
|||
#default_widget_count = 1 |
|||
# Use basic mode |
|||
#basic = false |
|||
# Use the old network legend style |
|||
#use_old_network_legend = false |
|||
# Remove space in tables |
|||
#hide_table_gap = false |
|||
# Show the battery widgets |
|||
#battery = false |
|||
# Disable mouse clicks |
|||
#disable_click = false |
|||
# Built-in themes. Valid values are "default", "default-light", "gruvbox", "gruvbox-light", "nord", "nord-light" |
|||
#color = "default" |
|||
# Show memory values in the processes widget as values by default |
|||
#mem_as_value = false |
|||
# Show tree mode by default in the processes widget. |
|||
#tree = false |
|||
# Shows an indicator in table widgets tracking where in the list you are. |
|||
#show_table_scroll_position = false |
|||
# Show processes as their commands by default in the process widget. |
|||
#process_command = false |
|||
# Displays the network widget with binary prefixes. |
|||
#network_use_binary_prefix = false |
|||
# Displays the network widget using bytes. |
|||
network_use_bytes = true |
|||
# Displays the network widget with a log scale. |
|||
#network_use_log = false |
|||
# Hides advanced options to stop a process on Unix-like systems. |
|||
#disable_advanced_kill = false |
|||
|
|||
# These are all the components that support custom theming. Note that colour support |
|||
# will depend on terminal support. |
|||
|
|||
#[colors] # Uncomment if you want to use custom colors |
|||
# Represents the colour of table headers (processes, CPU, disks, temperature). |
|||
#table_header_color="LightBlue" |
|||
# Represents the colour of the label each widget has. |
|||
#widget_title_color="Gray" |
|||
# Represents the average CPU color. |
|||
#avg_cpu_color="Red" |
|||
# Represents the colour the core will use in the CPU legend and graph. |
|||
#cpu_core_colors=["LightMagenta", "LightYellow", "LightCyan", "LightGreen", "LightBlue", "LightRed", "Cyan", "Green", "Blue", "Red"] |
|||
# Represents the colour RAM will use in the memory legend and graph. |
|||
#ram_color="LightMagenta" |
|||
# Represents the colour SWAP will use in the memory legend and graph. |
|||
#swap_color="LightYellow" |
|||
# Represents the colour rx will use in the network legend and graph. |
|||
#rx_color="LightCyan" |
|||
# Represents the colour tx will use in the network legend and graph. |
|||
#tx_color="LightGreen" |
|||
# Represents the colour of the border of unselected widgets. |
|||
#border_color="Gray" |
|||
# Represents the colour of the border of selected widgets. |
|||
#highlighted_border_color="LightBlue" |
|||
# Represents the colour of most text. |
|||
#text_color="Gray" |
|||
# Represents the colour of text that is selected. |
|||
#selected_text_color="Black" |
|||
# Represents the background colour of text that is selected. |
|||
#selected_bg_color="LightBlue" |
|||
# Represents the colour of the lines and text of the graph. |
|||
#graph_color="Gray" |
|||
# Represents the colours of the battery based on charge |
|||
#high_battery_color="green" |
|||
#medium_battery_color="yellow" |
|||
#low_battery_color="red" |
|||
|
|||
# Layout - layouts follow a pattern like this: |
|||
# [[row]] represents a row in the application. |
|||
# [[row.child]] represents either a widget or a column. |
|||
# [[row.child.child]] represents a widget. |
|||
# |
|||
# All widgets must have the type value set to one of ["cpu", "mem", "proc", "net", "temp", "disk", "empty"]. |
|||
# All layout components have a ratio value - if this is not set, then it defaults to 1. |
|||
# The default widget layout: |
|||
[[row]] |
|||
ratio=30 |
|||
[[row.child]] |
|||
type="cpu" |
|||
|
|||
[[row]] |
|||
ratio=40 |
|||
[[row.child]] |
|||
ratio=4 |
|||
type="mem" |
|||
[[row.child]] |
|||
ratio=3 |
|||
[[row.child.child]] |
|||
type="disk" |
|||
|
|||
[[row]] |
|||
ratio=30 |
|||
[[row.child]] |
|||
type="net" |
|||
[[row.child]] |
|||
type="proc" |
|||
default=true |
|||
|
|||
|
|||
# Filters - you can hide specific temperature sensors, network interfaces, and disks using filters. This is admittedly |
|||
# a bit hard to use as of now, and there is a planned in-app interface for managing this in the future: |
|||
[disk_filter] |
|||
is_list_ignored = true |
|||
list = ["/dev/loop\\d+"] |
|||
regex = true |
|||
case_sensitive = false |
|||
whole_word = false |
|||
|
|||
#[mount_filter] |
|||
#is_list_ignored = true |
|||
#list = ["/mnt/.*", "/boot"] |
|||
#regex = true |
|||
#case_sensitive = false |
|||
#whole_word = false |
|||
|
|||
#[temp_filter] |
|||
#is_list_ignored = true |
|||
#list = ["cpu", "wifi"] |
|||
#regex = false |
|||
#case_sensitive = false |
|||
#whole_word = false |
|||
|
|||
#[net_filter] |
|||
#is_list_ignored = true |
|||
#list = ["virbr0.*"] |
|||
#regex = true |
|||
#case_sensitive = false |
|||
#whole_word = false |
@ -0,0 +1,14 @@ |
|||
Port 22 |
|||
AddressFamily inet |
|||
|
|||
AllowUsers {{ env.USERS }} |
|||
|
|||
AcceptEnv LANG LC_* |
|||
Subsystem sftp internal-sftp |
|||
UsePAM yes |
|||
PasswordAuthentication no |
|||
PermitRootLogin no |
|||
PrintLastLog no |
|||
PrintMotd no |
|||
ChallengeResponseAuthentication no |
|||
X11Forwarding no |
@ -0,0 +1,10 @@ |
|||
--- |
|||
containers: |
|||
dmz: [dmz] |
|||
ct1: [mariadb, postgresql] |
|||
credential: |
|||
username: {{env.current_user}} |
|||
shadow: {{env.shadow_passwd}} |
|||
email: TO BE DEFINED # example user@domain.tld |
|||
password: TO BE DEFINED |
|||
|
@ -0,0 +1,3 @@ |
|||
--- |
|||
services: [] |
|||
containers: [] |
@ -0,0 +1,26 @@ |
|||
--- |
|||
containers: |
|||
ct2: [ dokuwiki, dolibarr13 ] |
|||
services: |
|||
artcode.re: |
|||
compta: |
|||
app: dolibarr13 |
|||
port: 9001 |
|||
name: doli-artcode |
|||
enabled: false |
|||
pnrun.re: |
|||
wiki: |
|||
app: dokuwiki |
|||
port: 80 |
|||
name: doku-pnrun |
|||
enabled: false |
|||
original: |
|||
app: dokuwiki |
|||
port: 8001 |
|||
name: doku-first |
|||
enabled: false |
|||
comptoirduvrac.re: |
|||
gestion: |
|||
app: odoo12 |
|||
port: 6003 |
|||
name: cdv |
@ -0,0 +1,23 @@ |
|||
table inet firewall { |
|||
|
|||
chain input { |
|||
type filter hook input priority 0; policy drop; |
|||
|
|||
# established/related connections |
|||
ct state established,related accept |
|||
|
|||
# loopback + lxdbr0 interface |
|||
iifname lo accept |
|||
iifname lxdbr0 accept |
|||
|
|||
# icmp |
|||
icmp type echo-request accept |
|||
|
|||
# allow mDNS |
|||
udp dport mdns accept |
|||
|
|||
# allow SSH + GITEA + NGINX |
|||
tcp dport {22, 2222, 80, 443} accept |
|||
} |
|||
|
|||
} |
@ -0,0 +1,14 @@ |
|||
--- |
|||
|
|||
authorized: |
|||
pubkey: TO_BE_DEFINED |
|||
|
|||
alert: |
|||
to: TO_BE_DEFINED # example: mine@domain.tld |
|||
from: TO_BE_DEFINED # example: no-reply@domain.tld |
|||
smtp: |
|||
server: TO_BE_DEFINED # example: mail.domain.tld |
|||
username: TO_BE_DEFINED # example: postmaster@domain.tld |
|||
password: TO_BE_DEFINED |
|||
|
|||
timezone: # optional, example: UTC, Indian/Reunion, ... |
@ -0,0 +1,2 @@ |
|||
{{ env.current_user }}: root |
|||
root: {{ alert.to }} |
@ -0,0 +1,6 @@ |
|||
set ask askcc append dot save crt |
|||
#ignore Received Message-Id Resent-Message-Id Status Mail-From Return-Path Via Delivered-To |
|||
set mta=/usr/bin/msmtp |
|||
|
|||
alias {{ env.current_user }} root |
|||
alias root {{ alert.to }} |
@ -0,0 +1,26 @@ |
|||
# Set default values for all following accounts. |
|||
defaults |
|||
|
|||
# Use the mail submission port 587 instead of the SMTP port 25. |
|||
port 587 |
|||
|
|||
# Always use TLS. |
|||
tls on |
|||
|
|||
# Set a list of trusted CAs for TLS. The default is to use system settings, but |
|||
# you can select your own file. |
|||
tls_trust_file /etc/ssl/certs/ca-certificates.crt |
|||
|
|||
# The SMTP server of your ISP |
|||
account alert |
|||
host {{ alert.smtp.server }} |
|||
from {{ env.fqdn }} <{{ alert.from }}> |
|||
auth on |
|||
user {{ alert.smtp.username }} |
|||
password {{ alert.smtp.password }} |
|||
|
|||
# Set default account to isp |
|||
account default: alert |
|||
|
|||
# Map local users to mail addresses |
|||
aliases /etc/aliases |
@ -0,0 +1,31 @@ |
|||
#!/bin/bash |
|||
|
|||
hostname=$(hostname -s) |
|||
number=$(echo $hostname | grep -oP '[0-9]*$') |
|||
hostname=${hostname%"$number"} |
|||
rows=9 |
|||
|
|||
case $hostname in |
|||
'prod') |
|||
#print in RED |
|||
echo -ne "\033[31;1m" |
|||
;; |
|||
'beta') |
|||
rows=7 |
|||
#print in ORANGE |
|||
echo -ne "\033[33;1m" |
|||
;; |
|||
'dev') |
|||
rows=7 |
|||
#print in GREEN |
|||
echo -ne "\033[33;1m" |
|||
;; |
|||
*) |
|||
#print in GREEN |
|||
echo -ne "\033[32;1m" |
|||
;; |
|||
esac |
|||
|
|||
fullname="$hostname $number" |
|||
figlet -f big "$fullname" | head -n$rows |
|||
echo -ne "\033[0m" |
@ -0,0 +1,15 @@ |
|||
#!/bin/bash |
|||
|
|||
FQDN=$(hostname --fqdn) |
|||
IP_ADDRESS=$(hostname -I | cut -d ' ' -f1) |
|||
DISTRO=$(lsb_release -d | cut -f2) |
|||
KERNEL=$(uname -srm) |
|||
UPTIME=$(uptime | awk -F'( |,|:)+' '{if ($7=="min") m=$6; else {if ($7~/^day/) {d=$6;h=$8;m=$9} else {h=$6;m=$7}}} {print d+0,"days"}') |
|||
LOAD=$(cat /proc/loadavg) |
|||
|
|||
echo "FQDN : $FQDN" |
|||
echo "UPTIME: $UPTIME" |
|||
echo "IPADDR: $IP_ADDRESS" |
|||
echo "DISTRO: $DISTRO" |
|||
echo "KERNEL: $KERNEL" |
|||
echo "LOAD : $LOAD" |
@ -0,0 +1,15 @@ |
|||
#!/bin/bash |
|||
|
|||
RED='\033[0;31m' |
|||
NC='\033[0m' # No Color |
|||
|
|||
USERS=$( |
|||
w -uh |
|||
) |
|||
|
|||
if [ -n "$USERS" ]; then |
|||
echo '-----------------------------------------------' |
|||
echo -e "${RED}Beware,${NC} there is another connected user${RED}" |
|||
echo "$USERS" |
|||
echo -e "${NC}-----------------------------------------------" |
|||
fi |
@ -0,0 +1,5 @@ |
|||
#!/usr/sbin/nft -f |
|||
|
|||
flush ruleset |
|||
|
|||
include "/etc/nftables.rules.d/*" |
@ -0,0 +1,17 @@ |
|||
#!/bin/bash |
|||
|
|||
[[ "$PAM_TYPE" != "open_session" ]] && exit 0 |
|||
|
|||
if journalctl --since "1 minute ago" -u ssh | tac | grep Accepted -m1 | grep password; then |
|||
{ |
|||
echo "User: $PAM_USER" |
|||
echo "Remote Host: $PAM_RHOST" |
|||
echo "Service: $PAM_SERVICE" |
|||
echo "TTY: $PAM_TTY" |
|||
echo "Date: $(date)" |
|||
echo "Server: $(uname -a)" |
|||
echo |
|||
echo "Somebody has successfully logged in your machine, please be aware and acknowledge this event." |
|||
} | mail -s "$PAM_SERVICE login on $(hostname -f) for account $PAM_USER" root |
|||
fi |
|||
exit 0 |
@ -0,0 +1,16 @@ |
|||
Port 2222 |
|||
AllowUsers {{env.current_user}} |
|||
|
|||
AcceptEnv LANG LC_* |
|||
Subsystem sftp /usr/lib/openssh/sftp-server |
|||
ClientAliveInterval 120 |
|||
UsePAM yes |
|||
MaxAuthTries 3 |
|||
|
|||
PasswordAuthentication no |
|||
PermitRootLogin no |
|||
PermitEmptyPasswords no |
|||
PrintLastLog no |
|||
PrintMotd no |
|||
ChallengeResponseAuthentication no |
|||
X11Forwarding no |
@ -0,0 +1,6 @@ |
|||
Defaults env_reset |
|||
Defaults mail_badpass |
|||
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/TOOLBOX" |
|||
|
|||
User_Alias ROOT = root, {{env.current_user}} |
|||
ROOT ALL=(ALL:ALL) NOPASSWD: ALL |
@ -0,0 +1,12 @@ |
|||
[Unit] |
|||
Description=Startup Script |
|||
After=network-online.target |
|||
Wants=network-online.target |
|||
|
|||
[Service] |
|||
ExecStartPre=/bin/sleep 10 |
|||
ExecStart=/bin/bash -c "last -wad | mail -s 'server has been rebooted' root" |
|||
RemainAfterExit=yes |
|||
|
|||
[Install] |
|||
WantedBy=multi-user.target |
@ -0,0 +1,7 @@ |
|||
{% for container in expanded.monitored.containers -%} |
|||
check program {{ container }}.running |
|||
with path "/root/lxc-is-running {{ container }}" |
|||
depends on bridge |
|||
if status != 0 then alert |
|||
|
|||
{% endfor -%} |
@ -0,0 +1,6 @@ |
|||
{% for host in expanded.monitored.hosts -%} |
|||
check host {{ host.container }}.{{ host.port }} with address {{ host.container }}.lxd |
|||
depends on {{ host.container }}.running |
|||
if failed port {{ host.port }} protocol http for 2 cycles then alert |
|||
|
|||
{% endfor -%} |
@ -0,0 +1,27 @@ |
|||
#!/bin/bash |
|||
|
|||
if [[ "$2" == "up" ]]; then |
|||
|
|||
ACTIVE_CONNECTION=$(nmcli -g NAME connection show --active | head -n1) |
|||
ACTIVE_DEVICE=$(nmcli -g DEVICE connection show --active | head -n1) |
|||
BRIDGE=$(ip addr show lxdbr0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) |
|||
GATEWAY=$(ip route | head -n1 | grep default | cut -d' ' -f3) |
|||
logger -t NetworkManager:Dispatcher -p info "on $ACTIVE_DEVICE:$ACTIVE_CONNECTION up , change resolver to $BRIDGE,$GATEWAY" |
|||
nmcli device modify "$ACTIVE_DEVICE" ipv4.dns "$BRIDGE,$GATEWAY" |
|||
|
|||
if ! grep nameserver /etc/resolv.conf | head -n1 | grep -q "$BRIDGE"; then |
|||
# sometimes, nmcli generates wrong order for namespace in resolv.conf, therefore forcing connection settings must be applied! |
|||
logger -t NetworkManager:Dispatcher -p info "on $ACTIVE_DEVICE:$ACTIVE_CONNECTION nameservers wrong order detected, therefore forcing connection settings must be applied" |
|||
nmcli connection modify "$ACTIVE_CONNECTION" ipv4.ignore-auto-dns yes |
|||
nmcli connection modify "$ACTIVE_CONNECTION" ipv4.dns "$BRIDGE,$GATEWAY" |
|||
logger -t NetworkManager:Dispatcher -p info "on $ACTIVE_DEVICE:$ACTIVE_CONNECTION nameservers wrong order detected, connection reloaded now!" |
|||
nmcli connection up "$ACTIVE_CONNECTION" |
|||
else |
|||
logger -t NetworkManager:Dispatcher -p info "on $ACTIVE_DEVICE:$ACTIVE_CONNECTION nameservers look fine" |
|||
fi |
|||
else |
|||
if [[ "$2" == "connectivity-change" ]]; then |
|||
ACTIVE_DEVICE=$(nmcli -g DEVICE connection show --active | head -n1) |
|||
logger -t NetworkManager:Dispatcher -p info "on $ACTIVE_DEVICE connectivity-change detected" |
|||
fi |
|||
fi |
@ -0,0 +1,35 @@ |
|||
table inet lxd { |
|||
chain pstrt.lxdbr0 { |
|||
type nat hook postrouting priority srcnat; policy accept; |
|||
|
|||
{%- if target != 'prod' %} |
|||
# BLOCK SMTP PORTS |
|||
tcp dport { 25, 465, 587 } ip saddr {{ firewall.bridge_subnet }} {%- if firewall.container_mail_passthrough %} ip saddr |
|||
!= {{ env.ip_mail_passthrough }} {% endif %} log prefix "Drop SMTP away from container: " drop |
|||
{% endif -%} |
|||
|
|||
ip saddr {{ firewall.bridge_subnet }} ip daddr != {{ firewall.bridge_subnet }} masquerade |
|||
} |
|||
|
|||
chain fwd.lxdbr0 { |
|||
type filter hook forward priority filter; policy accept; |
|||
ip version 4 oifname "lxdbr0" accept |
|||
ip version 4 iifname "lxdbr0" accept |
|||
} |
|||
|
|||
chain in.lxdbr0 { |
|||
type filter hook input priority filter; policy accept; |
|||
iifname "lxdbr0" tcp dport 53 accept |
|||
iifname "lxdbr0" udp dport 53 accept |
|||
iifname "lxdbr0" icmp type { destination-unreachable, time-exceeded, parameter-problem } accept |
|||
iifname "lxdbr0" udp dport 67 accept |
|||
} |
|||
|
|||
chain out.lxdbr0 { |
|||
type filter hook output priority filter; policy accept; |
|||
oifname "lxdbr0" tcp sport 53 accept |
|||
oifname "lxdbr0" udp sport 53 accept |
|||
oifname "lxdbr0" icmp type { destination-unreachable, time-exceeded, parameter-problem } accept |
|||
oifname "lxdbr0" udp sport 67 accept |
|||
} |
|||
} |
@ -0,0 +1,6 @@ |
|||
table ip nat { |
|||
chain prerouting { |
|||
type nat hook prerouting priority dstnat; policy accept; |
|||
iif "{{ nftables.wan_interface }}" tcp dport { 80, 443 } dnat to {{ nftables.dmz_ip }} |
|||
} |
|||
} |
@ -0,0 +1,16 @@ |
|||
# DEFAULT SERVER |
|||
|
|||
# redirect any http request to https |
|||
server { |
|||
listen 80; |
|||
server_name _; |
|||
return 301 https://$host$request_uri; |
|||
} |
|||
|
|||
# respond dummy nginx page |
|||
server { |
|||
listen 443 default_server ssl; |
|||
include snippets/snakeoil.conf; |
|||
root /var/www/html; |
|||
index index.html index.htm index.nginx-debian.html; |
|||
} |
@ -0,0 +1,37 @@ |
|||
{% for service in expanded.services %} |
|||
server { |
|||
listen 443 http2 ssl; |
|||
server_name {{ service.fqdn }}; |
|||
|
|||
{%- if target == 'dev' %} |
|||
include snippets/snakeoil.conf; |
|||
{%- else %} |
|||
ssl_certificate /etc/letsencrypt/live/{{ service.domain }}/fullchain.pem; |
|||
ssl_certificate_key /etc/letsencrypt/live/{{ service.domain }}/privkey.pem; |
|||
{%- endif %} |
|||
|
|||
location / { |
|||
proxy_pass http://{{ service.container }}:{{ service.port }}; |
|||
{%- if service.app == 'odoo15' %} |
|||
client_max_body_size 5M; |
|||
{%- endif %} |
|||
|
|||
proxy_http_version 1.1; |
|||
proxy_set_header X-Real-IP $remote_addr; |
|||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
|||
proxy_set_header Host $http_host; |
|||
proxy_set_header Upgrade $http_upgrade; |
|||
proxy_set_header Connection "upgrade"; |
|||
|
|||
{%- if target != 'prod' %} |
|||
include snippets/banner_{{ target }}.conf; |
|||
{%- endif %} |
|||
} |
|||
|
|||
{%- if service.app == 'odoo15' or service.app == 'odoo12' %} |
|||
location /longpolling { |
|||
proxy_pass http://{{ service.container }}:{{ service.port + 1000 }}; |
|||
} |
|||
{%- endif %} |
|||
} |
|||
{% endfor %} |
@ -0,0 +1,67 @@ |
|||
proxy_set_header Accept-Encoding ""; |
|||
|
|||
subs_filter '</body>' ' |
|||
<div class="betabanner_box"> |
|||
<div class="betabanner_ribbon"><span>BETA</span></div> |
|||
</div> |
|||
|
|||
<style> |
|||
|
|||
.betabanner_box { |
|||
height: 100%; |
|||
position: absolute; |
|||
bottom: 0; |
|||
pointer-events: none; |
|||
opacity: 0.7; |
|||
} |
|||
|
|||
.betabanner_ribbon { |
|||
position: fixed; |
|||
left: -5px; |
|||
bottom : 0; |
|||
z-index: 9999; |
|||
overflow: hidden; |
|||
width: 75px; height: 75px; |
|||
text-align: right; |
|||
} |
|||
.betabanner_ribbon span { |
|||
font-size: 10px; |
|||
font-weight: bold; |
|||
color: #FFF; |
|||
text-transform: uppercase; |
|||
text-align: center; |
|||
line-height: 20px; |
|||
transform: rotate(45deg); |
|||
-webkit-transform: rotate(45deg); |
|||
width: 100px; |
|||
display: block; |
|||
background: #79A70A; |
|||
|
|||
background: linear-gradient(#ABC900 30%, #79A70A 61%); |
|||
box-shadow: 5px 9px 27px -4px rgba(0, 0, 0, 1); |
|||
|
|||
position: absolute; |
|||
bottom: 16px; |
|||
left: -21px; |
|||
} |
|||
.betabanner_ribbon span::before { |
|||
content: ""; |
|||
position: absolute; left: 0px; top: 100%; |
|||
z-index: -1; |
|||
border-left: 3px solid #79A70A; |
|||
border-right: 3px solid transparent; |
|||
border-bottom: 3px solid transparent; |
|||
border-top: 3px solid #79A70A; |
|||
} |
|||
.betabanner_ribbon span::after { |
|||
content: ""; |
|||
position: absolute; right: 0px; top: 100%; |
|||
z-index: -1; |
|||
border-left: 3px solid transparent; |
|||
border-right: 3px solid #79A70A; |
|||
border-bottom: 3px solid transparent; |
|||
border-top: 3px solid #79A70A; |
|||
} |
|||
</style> |
|||
</body> |
|||
'; |
@ -0,0 +1,65 @@ |
|||
proxy_set_header Accept-Encoding ""; |
|||
|
|||
subs_filter '</body>' ' |
|||
<div class="betabanner_box"> |
|||
<div class="betabanner_ribbon"><span>DEV</span></div> |
|||
</div> |
|||
|
|||
<style> |
|||
|
|||
.betabanner_box { |
|||
height: 100%; |
|||
position: absolute; |
|||
bottom: 0; |
|||
pointer-events: none; |
|||
opacity: 0.7; |
|||
} |
|||
|
|||
.betabanner_ribbon { |
|||
position: fixed; |
|||
left: -5px; |
|||
bottom : 0; |
|||
z-index: 9999; |
|||
overflow: hidden; |
|||
width: 75px; height: 75px; |
|||
text-align: right; |
|||
} |
|||
.betabanner_ribbon span { |
|||
font-size: 10px; |
|||
font-weight: bold; |
|||
color: #FFF; |
|||
text-transform: uppercase; |
|||
text-align: center; |
|||
line-height: 20px; |
|||
transform: rotate(45deg); |
|||
-webkit-transform: rotate(45deg); |
|||
width: 100px; |
|||
display: block; |
|||
background: linear-gradient(lightblue 30%, blue 81%); |
|||
box-shadow: 5px 9px 27px -4px rgba(0, 0, 0, 1); |
|||
|
|||
position: absolute; |
|||
bottom: 16px; |
|||
left: -21px; |
|||
} |
|||
.betabanner_ribbon span::before { |
|||
content: ""; |
|||
position: absolute; left: 0px; top: 100%; |
|||
z-index: -1; |
|||
border-left: 3px solid #79A70A; |
|||
border-right: 3px solid transparent; |
|||
border-bottom: 3px solid transparent; |
|||
border-top: 3px solid #79A70A; |
|||
} |
|||
.betabanner_ribbon span::after { |
|||
content: ""; |
|||
position: absolute; right: 0px; top: 100%; |
|||
z-index: -1; |
|||
border-left: 3px solid transparent; |
|||
border-right: 3px solid #79A70A; |
|||
border-bottom: 3px solid transparent; |
|||
border-top: 3px solid #79A70A; |
|||
} |
|||
</style> |
|||
</body> |
|||
'; |
Write
Preview
Loading…
Cancel
Save
Reference in new issue